diff --git a/.github/workflows/containers/github-action-ci/stage1.Dockerfile b/.github/workflows/containers/github-action-ci/stage1.Dockerfile index 73828cc05736e6..3e2c1ab11d58bf 100644 --- a/.github/workflows/containers/github-action-ci/stage1.Dockerfile +++ b/.github/workflows/containers/github-action-ci/stage1.Dockerfile @@ -2,7 +2,7 @@ FROM docker.io/library/ubuntu:22.04 as base ENV LLVM_SYSROOT=/opt/llvm FROM base as stage1-toolchain -ENV LLVM_VERSION=18.1.8 +ENV LLVM_VERSION=19.1.2 RUN apt-get update && \ apt-get install -y \ diff --git a/clang-tools-extra/clangd/Config.h b/clang-tools-extra/clangd/Config.h index 8fcbc5c33469fa..e174f7fabe344e 100644 --- a/clang-tools-extra/clangd/Config.h +++ b/clang-tools-extra/clangd/Config.h @@ -162,6 +162,7 @@ struct Config { bool DeducedTypes = true; bool Designators = true; bool BlockEnd = false; + bool DefaultArguments = false; // Limit the length of type names in inlay hints. (0 means no limit) uint32_t TypeNameLimit = 32; } InlayHints; diff --git a/clang-tools-extra/clangd/ConfigCompile.cpp b/clang-tools-extra/clangd/ConfigCompile.cpp index 58610a5b87922d..fb7692998d05c7 100644 --- a/clang-tools-extra/clangd/ConfigCompile.cpp +++ b/clang-tools-extra/clangd/ConfigCompile.cpp @@ -43,7 +43,6 @@ #include "llvm/Support/Regex.h" #include "llvm/Support/SMLoc.h" #include "llvm/Support/SourceMgr.h" -#include #include #include #include @@ -669,6 +668,11 @@ struct FragmentCompiler { Out.Apply.push_back([Value(**F.BlockEnd)](const Params &, Config &C) { C.InlayHints.BlockEnd = Value; }); + if (F.DefaultArguments) + Out.Apply.push_back( + [Value(**F.DefaultArguments)](const Params &, Config &C) { + C.InlayHints.DefaultArguments = Value; + }); if (F.TypeNameLimit) Out.Apply.push_back( [Value(**F.TypeNameLimit)](const Params &, Config &C) { diff --git a/clang-tools-extra/clangd/ConfigFragment.h b/clang-tools-extra/clangd/ConfigFragment.h index fc1b45f5d4c3e9..36f7d04231c414 100644 --- a/clang-tools-extra/clangd/ConfigFragment.h +++ b/clang-tools-extra/clangd/ConfigFragment.h @@ -339,6 +339,9 @@ struct Fragment { std::optional> Designators; /// Show defined symbol names at the end of a definition block. std::optional> BlockEnd; + /// Show parameter names and default values of default arguments after all + /// of the explicit arguments. + std::optional> DefaultArguments; /// Limit the length of type name hints. (0 means no limit) std::optional> TypeNameLimit; }; diff --git a/clang-tools-extra/clangd/ConfigYAML.cpp b/clang-tools-extra/clangd/ConfigYAML.cpp index bcdda99eeed67a..32e028981d4244 100644 --- a/clang-tools-extra/clangd/ConfigYAML.cpp +++ b/clang-tools-extra/clangd/ConfigYAML.cpp @@ -14,7 +14,6 @@ #include "llvm/Support/YAMLParser.h" #include #include -#include namespace clang { namespace clangd { @@ -268,6 +267,10 @@ class Parser { if (auto Value = boolValue(N, "BlockEnd")) F.BlockEnd = *Value; }); + Dict.handle("DefaultArguments", [&](Node &N) { + if (auto Value = boolValue(N, "DefaultArguments")) + F.DefaultArguments = *Value; + }); Dict.handle("TypeNameLimit", [&](Node &N) { if (auto Value = uint32Value(N, "TypeNameLimit")) F.TypeNameLimit = *Value; diff --git a/clang-tools-extra/clangd/InlayHints.cpp b/clang-tools-extra/clangd/InlayHints.cpp index cd4f1931b3ce1d..c4053fced81d6f 100644 --- a/clang-tools-extra/clangd/InlayHints.cpp +++ b/clang-tools-extra/clangd/InlayHints.cpp @@ -11,9 +11,11 @@ #include "Config.h" #include "HeuristicResolver.h" #include "ParsedAST.h" +#include "Protocol.h" #include "SourceCode.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" @@ -23,15 +25,22 @@ #include "clang/AST/Type.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/OperatorKinds.h" +#include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/SaveAndRestore.h" #include "llvm/Support/ScopedPrinter.h" #include "llvm/Support/raw_ostream.h" +#include +#include #include #include @@ -372,6 +381,23 @@ maybeDropCxxExplicitObjectParameters(ArrayRef Params) { return Params; } +template +std::string joinAndTruncate(const R &Range, size_t MaxLength) { + std::string Out; + llvm::raw_string_ostream OS(Out); + llvm::ListSeparator Sep(", "); + for (auto &&Element : Range) { + OS << Sep; + if (Out.size() + Element.size() >= MaxLength) { + OS << "..."; + break; + } + OS << Element; + } + OS.flush(); + return Out; +} + struct Callee { // Only one of Decl or Loc is set. // Loc is for calls through function pointers. @@ -422,7 +448,8 @@ class InlayHintVisitor : public RecursiveASTVisitor { Callee.Decl = E->getConstructor(); if (!Callee.Decl) return true; - processCall(Callee, {E->getArgs(), E->getNumArgs()}); + processCall(Callee, E->getParenOrBraceRange().getEnd(), + {E->getArgs(), E->getNumArgs()}); return true; } @@ -495,7 +522,7 @@ class InlayHintVisitor : public RecursiveASTVisitor { dyn_cast_or_null(Callee.Decl)) if (IsFunctor || Method->hasCXXExplicitFunctionObjectParameter()) Args = Args.drop_front(1); - processCall(Callee, Args); + processCall(Callee, E->getRParenLoc(), Args); return true; } @@ -709,10 +736,12 @@ class InlayHintVisitor : public RecursiveASTVisitor { private: using NameVec = SmallVector; - void processCall(Callee Callee, llvm::ArrayRef Args) { + void processCall(Callee Callee, SourceLocation RParenOrBraceLoc, + llvm::ArrayRef Args) { assert(Callee.Decl || Callee.Loc); - if (!Cfg.InlayHints.Parameters || Args.size() == 0) + if ((!Cfg.InlayHints.Parameters && !Cfg.InlayHints.DefaultArguments) || + Args.size() == 0) return; // The parameter name of a move or copy constructor is not very interesting. @@ -721,6 +750,9 @@ class InlayHintVisitor : public RecursiveASTVisitor { if (Ctor->isCopyOrMoveConstructor()) return; + SmallVector FormattedDefaultArgs; + bool HasNonDefaultArgs = false; + ArrayRef Params, ForwardedParams; // Resolve parameter packs to their forwarded parameter SmallVector ForwardedParamsStorage; @@ -752,15 +784,44 @@ class InlayHintVisitor : public RecursiveASTVisitor { } StringRef Name = ParameterNames[I]; - bool NameHint = shouldHintName(Args[I], Name); - bool ReferenceHint = shouldHintReference(Params[I], ForwardedParams[I]); - - if (NameHint || ReferenceHint) { + const bool NameHint = + shouldHintName(Args[I], Name) && Cfg.InlayHints.Parameters; + const bool ReferenceHint = + shouldHintReference(Params[I], ForwardedParams[I]) && + Cfg.InlayHints.Parameters; + + const bool IsDefault = isa(Args[I]); + HasNonDefaultArgs |= !IsDefault; + if (IsDefault) { + if (Cfg.InlayHints.DefaultArguments) { + const auto SourceText = Lexer::getSourceText( + CharSourceRange::getTokenRange(Params[I]->getDefaultArgRange()), + AST.getSourceManager(), AST.getLangOpts()); + const auto Abbrev = + (SourceText.size() > Cfg.InlayHints.TypeNameLimit || + SourceText.contains("\n")) + ? "..." + : SourceText; + if (NameHint) + FormattedDefaultArgs.emplace_back( + llvm::formatv("{0}: {1}", Name, Abbrev)); + else + FormattedDefaultArgs.emplace_back(llvm::formatv("{0}", Abbrev)); + } + } else if (NameHint || ReferenceHint) { addInlayHint(Args[I]->getSourceRange(), HintSide::Left, InlayHintKind::Parameter, ReferenceHint ? "&" : "", NameHint ? Name : "", ": "); } } + + if (!FormattedDefaultArgs.empty()) { + std::string Hint = + joinAndTruncate(FormattedDefaultArgs, Cfg.InlayHints.TypeNameLimit); + addInlayHint(SourceRange{RParenOrBraceLoc}, HintSide::Left, + InlayHintKind::DefaultArgument, + HasNonDefaultArgs ? ", " : "", Hint, ""); + } } static bool isSetter(const FunctionDecl *Callee, const NameVec &ParamNames) { @@ -968,6 +1029,7 @@ class InlayHintVisitor : public RecursiveASTVisitor { CHECK_KIND(Type, DeducedTypes); CHECK_KIND(Designator, Designators); CHECK_KIND(BlockEnd, BlockEnd); + CHECK_KIND(DefaultArgument, DefaultArguments); #undef CHECK_KIND } diff --git a/clang-tools-extra/clangd/Protocol.cpp b/clang-tools-extra/clangd/Protocol.cpp index c08f80442eaa06..295ccd26a40454 100644 --- a/clang-tools-extra/clangd/Protocol.cpp +++ b/clang-tools-extra/clangd/Protocol.cpp @@ -1477,6 +1477,7 @@ llvm::json::Value toJSON(const InlayHintKind &Kind) { return 2; case InlayHintKind::Designator: case InlayHintKind::BlockEnd: + case InlayHintKind::DefaultArgument: // This is an extension, don't serialize. return nullptr; } @@ -1517,6 +1518,8 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, InlayHintKind Kind) { return "designator"; case InlayHintKind::BlockEnd: return "block-end"; + case InlayHintKind::DefaultArgument: + return "default-argument"; } llvm_unreachable("Unknown clang.clangd.InlayHintKind"); }; diff --git a/clang-tools-extra/clangd/Protocol.h b/clang-tools-extra/clangd/Protocol.h index a0f8b04bc4ffdb..5b28095758198d 100644 --- a/clang-tools-extra/clangd/Protocol.h +++ b/clang-tools-extra/clangd/Protocol.h @@ -1681,6 +1681,15 @@ enum class InlayHintKind { /// This is a clangd extension. BlockEnd = 4, + /// An inlay hint that is for a default argument. + /// + /// An example of a parameter hint for a default argument: + /// void foo(bool A = true); + /// foo(^); + /// Adds an inlay hint "A: true". + /// This is a clangd extension. + DefaultArgument = 6, + /// Other ideas for hints that are not currently implemented: /// /// * Chaining hints, showing the types of intermediate expressions diff --git a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp index a5a349e93037ad..73dd273d6c39d4 100644 --- a/clang-tools-extra/clangd/unittests/InlayHintTests.cpp +++ b/clang-tools-extra/clangd/unittests/InlayHintTests.cpp @@ -15,9 +15,12 @@ #include "support/Context.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/ScopedPrinter.h" +#include "llvm/Support/raw_ostream.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +#include #include +#include #include namespace clang { @@ -81,6 +84,7 @@ Config noHintsConfig() { C.InlayHints.DeducedTypes = false; C.InlayHints.Designators = false; C.InlayHints.BlockEnd = false; + C.InlayHints.DefaultArguments = false; return C; } @@ -1465,6 +1469,75 @@ TEST(TypeHints, DefaultTemplateArgs) { ExpectedHint{": A", "binding"}); } +TEST(DefaultArguments, Smoke) { + Config Cfg; + Cfg.InlayHints.Parameters = + true; // To test interplay of parameters and default parameters + Cfg.InlayHints.DeducedTypes = false; + Cfg.InlayHints.Designators = false; + Cfg.InlayHints.BlockEnd = false; + + Cfg.InlayHints.DefaultArguments = true; + WithContextValue WithCfg(Config::Key, std::move(Cfg)); + + const auto *Code = R"cpp( + int foo(int A = 4) { return A; } + int bar(int A, int B = 1, bool C = foo($default1[[)]]) { return A; } + int A = bar($explicit[[2]]$default2[[)]]; + + void baz(int = 5) { if (false) baz($unnamed[[)]]; }; + )cpp"; + + assertHints(InlayHintKind::DefaultArgument, Code, + ExpectedHint{"A: 4", "default1", Left}, + ExpectedHint{", B: 1, C: foo()", "default2", Left}, + ExpectedHint{"5", "unnamed", Left}); + + assertHints(InlayHintKind::Parameter, Code, + ExpectedHint{"A: ", "explicit", Left}); +} + +TEST(DefaultArguments, WithoutParameterNames) { + Config Cfg; + Cfg.InlayHints.Parameters = false; // To test just default args this time + Cfg.InlayHints.DeducedTypes = false; + Cfg.InlayHints.Designators = false; + Cfg.InlayHints.BlockEnd = false; + + Cfg.InlayHints.DefaultArguments = true; + WithContextValue WithCfg(Config::Key, std::move(Cfg)); + + const auto *Code = R"cpp( + struct Baz { + Baz(float a = 3 // + + 2); + }; + struct Foo { + Foo(int, Baz baz = // + Baz{$abbreviated[[}]] + + // + ) {} + }; + + int main() { + Foo foo1(1$paren[[)]]; + Foo foo2{2$brace1[[}]]; + Foo foo3 = {3$brace2[[}]]; + auto foo4 = Foo{4$brace3[[}]]; + } + )cpp"; + + assertHints(InlayHintKind::DefaultArgument, Code, + ExpectedHint{"...", "abbreviated", Left}, + ExpectedHint{", Baz{}", "paren", Left}, + ExpectedHint{", Baz{}", "brace1", Left}, + ExpectedHint{", Baz{}", "brace2", Left}, + ExpectedHint{", Baz{}", "brace3", Left}); + + assertHints(InlayHintKind::Parameter, Code); +} + TEST(TypeHints, Deduplication) { assertTypeHints(R"cpp( template diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index e8148e06b6af28..a9b1ab367f538a 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -56,6 +56,8 @@ Improvements to clangd Inlay hints ^^^^^^^^^^^ +- Added `DefaultArguments` Inlay Hints option. + Diagnostics ^^^^^^^^^^^ diff --git a/clang/docs/analyzer/user-docs/CommandLineUsage.rst b/clang/docs/analyzer/user-docs/CommandLineUsage.rst index d7f8253469df40..59f8187f374a95 100644 --- a/clang/docs/analyzer/user-docs/CommandLineUsage.rst +++ b/clang/docs/analyzer/user-docs/CommandLineUsage.rst @@ -2,7 +2,7 @@ Command Line Usage: scan-build and CodeChecker ============================================== This document provides guidelines for running the static analyzer from the command line on whole projects. -CodeChecker and scan-build are two CLI tools for using CSA on multiple files (tranlation units). +CodeChecker and scan-build are two CLI tools for using CSA on multiple files (translation units). Both provide a way of driving the analyzer, detecting compilation flags, and generating reports. CodeChecker is more actively maintained, provides heuristics for working with multiple versions of popular compilers and it also comes with a web-based GUI for viewing, filtering, categorizing and suppressing the results. Therefore CodeChecker is recommended in case you need any of the above features or just more customizability in general. diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 975bcdac5069b9..cfe3938f83847b 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -876,13 +876,13 @@ class CXXTypeidExpr : public Expr { /// Best-effort check if the expression operand refers to a most derived /// object. This is not a strong guarantee. - bool isMostDerived(ASTContext &Context) const; + bool isMostDerived(const ASTContext &Context) const; bool isTypeOperand() const { return Operand.is(); } /// Retrieves the type operand of this typeid() expression after /// various required adjustments (removing reference types, cv-qualifiers). - QualType getTypeOperand(ASTContext &Context) const; + QualType getTypeOperand(const ASTContext &Context) const; /// Retrieve source information for the type operand. TypeSourceInfo *getTypeOperandSourceInfo() const { diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp index 83ce404add5f50..a2c0c60d43dd14 100644 --- a/clang/lib/AST/ExprCXX.cpp +++ b/clang/lib/AST/ExprCXX.cpp @@ -147,7 +147,7 @@ bool CXXTypeidExpr::isPotentiallyEvaluated() const { return false; } -bool CXXTypeidExpr::isMostDerived(ASTContext &Context) const { +bool CXXTypeidExpr::isMostDerived(const ASTContext &Context) const { assert(!isTypeOperand() && "Cannot call isMostDerived for typeid(type)"); const Expr *E = getExprOperand()->IgnoreParenNoopCasts(Context); if (const auto *DRE = dyn_cast(E)) { @@ -159,7 +159,7 @@ bool CXXTypeidExpr::isMostDerived(ASTContext &Context) const { return false; } -QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const { +QualType CXXTypeidExpr::getTypeOperand(const ASTContext &Context) const { assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)"); Qualifiers Quals; return Context.getUnqualifiedArrayType( diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp index 577a0f571e16ea..0a63c50d44f4b7 100644 --- a/clang/lib/CodeGen/CoverageMappingGen.cpp +++ b/clang/lib/CodeGen/CoverageMappingGen.cpp @@ -1098,12 +1098,6 @@ struct CounterCoverageMappingBuilder return ExitCount; } - /// Determine whether the given condition can be constant folded. - bool ConditionFoldsToBool(const Expr *Cond) { - Expr::EvalResult Result; - return (Cond->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext())); - } - /// Create a Branch Region around an instrumentable condition for coverage /// and add it to the function's SourceRegions. A branch region tracks a /// "True" counter and a "False" counter for boolean expressions that @@ -1133,13 +1127,15 @@ struct CounterCoverageMappingBuilder // Alternatively, we can prevent any optimization done via // constant-folding by ensuring that ConstantFoldsToSimpleInteger() in // CodeGenFunction.c always returns false, but that is very heavy-handed. - if (ConditionFoldsToBool(C)) - popRegions(pushRegion(Counter::getZero(), getStart(C), getEnd(C), - Counter::getZero(), BranchParams)); - else - // Otherwise, create a region with the True counter and False counter. - popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt, - BranchParams)); + Expr::EvalResult Result; + if (C->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext())) { + if (Result.Val.getInt().getBoolValue()) + FalseCnt = Counter::getZero(); + else + TrueCnt = Counter::getZero(); + } + popRegions( + pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt, BranchParams)); } } @@ -1153,12 +1149,12 @@ struct CounterCoverageMappingBuilder /// Create a Branch Region around a SwitchCase for code coverage /// and add it to the function's SourceRegions. - void createSwitchCaseRegion(const SwitchCase *SC, Counter TrueCnt, - Counter FalseCnt) { + void createSwitchCaseRegion(const SwitchCase *SC, Counter TrueCnt) { // Push region onto RegionStack but immediately pop it (which adds it to // the function's SourceRegions) because it doesn't apply to any other // source other than the SwitchCase. - popRegions(pushRegion(TrueCnt, getStart(SC), SC->getColonLoc(), FalseCnt)); + popRegions(pushRegion(TrueCnt, getStart(SC), SC->getColonLoc(), + Counter::getZero())); } /// Check whether a region with bounds \c StartLoc and \c EndLoc @@ -1870,24 +1866,16 @@ struct CounterCoverageMappingBuilder const SwitchCase *Case = S->getSwitchCaseList(); for (; Case; Case = Case->getNextSwitchCase()) { HasDefaultCase = HasDefaultCase || isa(Case); - CaseCountSum = - addCounters(CaseCountSum, getRegionCounter(Case), /*Simplify=*/false); - createSwitchCaseRegion( - Case, getRegionCounter(Case), - subtractCounters(ParentCount, getRegionCounter(Case))); + auto CaseCount = getRegionCounter(Case); + CaseCountSum = addCounters(CaseCountSum, CaseCount, /*Simplify=*/false); + createSwitchCaseRegion(Case, CaseCount); } - // Simplify is skipped while building the counters above: it can get really - // slow on top of switches with thousands of cases. Instead, trigger - // simplification by adding zero to the last counter. - CaseCountSum = addCounters(CaseCountSum, Counter::getZero()); - // If no explicit default case exists, create a branch region to represent // the hidden branch, which will be added later by the CodeGen. This region // will be associated with the switch statement's condition. if (!HasDefaultCase) { - Counter DefaultTrue = subtractCounters(ParentCount, CaseCountSum); - Counter DefaultFalse = subtractCounters(ParentCount, DefaultTrue); - createBranchRegion(S->getCond(), DefaultTrue, DefaultFalse); + Counter DefaultCount = subtractCounters(ParentCount, CaseCountSum); + createBranchRegion(S->getCond(), Counter::getZero(), DefaultCount); } } diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp index e9d2e3fe6d5ce0..412b379304b1e6 100644 --- a/clang/lib/Driver/ToolChains/Cuda.cpp +++ b/clang/lib/Driver/ToolChains/Cuda.cpp @@ -635,7 +635,7 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA, getNVPTXTargetFeatures(C.getDriver(), getToolChain().getTriple(), Args, Features); CmdArgs.push_back( - Args.MakeArgString("--plugin-opt=mattr=" + llvm::join(Features, ","))); + Args.MakeArgString("--plugin-opt=-mattr=" + llvm::join(Features, ","))); // Add paths for the default clang library path. SmallString<256> DefaultLibPath = diff --git a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp index 0cb96097415ea8..01b3be700b9fad 100644 --- a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp +++ b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp @@ -335,10 +335,9 @@ HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code, // \p Offset: the start of the line following this include directive. void HeaderIncludes::addExistingInclude(Include IncludeToAdd, unsigned NextLineOffset) { - auto Iter = - ExistingIncludes.try_emplace(trimInclude(IncludeToAdd.Name)).first; - Iter->second.push_back(std::move(IncludeToAdd)); - auto &CurInclude = Iter->second.back(); + auto &Incs = ExistingIncludes[trimInclude(IncludeToAdd.Name)]; + Incs.push_back(std::move(IncludeToAdd)); + auto &CurInclude = Incs.back(); // The header name with quotes or angle brackets. // Only record the offset of current #include if we can insert after it. if (CurInclude.R.getOffset() <= MaxInsertOffset) { diff --git a/clang/test/CodeGenCUDA/bf16.cu b/clang/test/CodeGenCUDA/bf16.cu index 3c443420dbd36a..f794b83239f14a 100644 --- a/clang/test/CodeGenCUDA/bf16.cu +++ b/clang/test/CodeGenCUDA/bf16.cu @@ -25,7 +25,7 @@ __device__ void test_arg(__bf16 *out, __bf16 in) { __device__ __bf16 test_ret( __bf16 in) { // CHECK: ld.param.b16 %[[R:rs[0-9]+]], [_Z8test_retDF16b_param_0]; return in; -// CHECK: st.param.b16 [func_retval0+0], %[[R]] +// CHECK: st.param.b16 [func_retval0], %[[R]] // CHECK: ret; } @@ -35,15 +35,15 @@ __device__ __bf16 external_func( __bf16 in); // CHECK: .param .align 2 .b8 _Z9test_callDF16b_param_0[2] __device__ __bf16 test_call( __bf16 in) { // CHECK: ld.param.b16 %[[R:rs[0-9]+]], [_Z9test_callDF16b_param_0]; -// CHECK: st.param.b16 [param0+0], %[[R]]; +// CHECK: st.param.b16 [param0], %[[R]]; // CHECK: .param .align 2 .b8 retval0[2]; // CHECK: call.uni (retval0), // CHECK-NEXT: _Z13external_funcDF16b, // CHECK-NEXT: ( // CHECK-NEXT: param0 // CHECK-NEXT ); -// CHECK: ld.param.b16 %[[RET:rs[0-9]+]], [retval0+0]; +// CHECK: ld.param.b16 %[[RET:rs[0-9]+]], [retval0]; return external_func(in); -// CHECK: st.param.b16 [func_retval0+0], %[[RET]] +// CHECK: st.param.b16 [func_retval0], %[[RET]] // CHECK: ret; } diff --git a/clang/test/CoverageMapping/branch-constfolded.cpp b/clang/test/CoverageMapping/branch-constfolded.cpp index 1e7e32808e8382..a2ac1c1eacd28f 100644 --- a/clang/test/CoverageMapping/branch-constfolded.cpp +++ b/clang/test/CoverageMapping/branch-constfolded.cpp @@ -5,94 +5,94 @@ // CHECK-LABEL: _Z6fand_0b: bool fand_0(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:20 = M:3, C:2 - return false && a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, 0 + return false && a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, (#0 - #1) } // CHECK: Branch,File 0, [[@LINE-1]]:19 -> [[@LINE-1]]:20 = #2, (#1 - #2) // CHECK-LABEL: _Z6fand_1b: bool fand_1(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:19 = M:3, C:2 return a && true; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = #1, (#0 - #1) -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = #2, 0 // CHECK-LABEL: _Z6fand_2bb: bool fand_2(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:25 = M:4, C:3 - return false && a && b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, 0 + return false && a && b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, (#0 - #3) } // CHECK: Branch,File 0, [[@LINE-1]]:19 -> [[@LINE-1]]:20 = #4, (#3 - #4) // CHECK: Branch,File 0, [[@LINE-2]]:24 -> [[@LINE-2]]:25 = #2, (#1 - #2) // CHECK-LABEL: _Z6fand_3bb: bool fand_3(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:24 = M:4, C:3 return a && true && b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = #3, (#0 - #3) -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = #4, 0 // CHECK: Branch,File 0, [[@LINE-2]]:23 -> [[@LINE-2]]:24 = #2, (#1 - #2) // CHECK-LABEL: _Z6fand_4bb: bool fand_4(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:25 = M:4, C:3 return a && b && false; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = #3, (#0 - #3) } // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:16 = #4, (#3 - #4) - // CHECK: Branch,File 0, [[@LINE-2]]:20 -> [[@LINE-2]]:25 = 0, 0 + // CHECK: Branch,File 0, [[@LINE-2]]:20 -> [[@LINE-2]]:25 = 0, (#1 - #2) // CHECK-LABEL: _Z6fand_5b: bool fand_5(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:23 = M:3, C:2 - return false && true; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, 0 -} // CHECK: Branch,File 0, [[@LINE-1]]:19 -> [[@LINE-1]]:23 = 0, 0 + return false && true; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, (#0 - #1) +} // CHECK: Branch,File 0, [[@LINE-1]]:19 -> [[@LINE-1]]:23 = #2, 0 // CHECK-LABEL: _Z6fand_6b: bool fand_6(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:19 = M:3, C:2 - return true && a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = 0, 0 + return true && a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = #1, 0 } // CHECK: Branch,File 0, [[@LINE-1]]:18 -> [[@LINE-1]]:19 = #2, (#1 - #2) // CHECK-LABEL: _Z6fand_7b: bool fand_7(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:20 = M:3, C:2 return a && false; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = #1, (#0 - #1) -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, (#1 - #2) // CHECK-LABEL: _Z5for_0b: bool for_0(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:19 = M:3, C:2 - return true || a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = 0, 0 + return true || a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = (#0 - #1), 0 } // CHECK: Branch,File 0, [[@LINE-1]]:18 -> [[@LINE-1]]:19 = (#1 - #2), #2 // CHECK-LABEL: _Z5for_1b: bool for_1(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:20 = M:3, C:2 return a || false; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = (#0 - #1), #1 -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, #2 // CHECK-LABEL: _Z5for_2bb: bool for_2(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:24 = M:4, C:3 - return true || a || b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = 0, 0 + return true || a || b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = (#0 - #3), 0 } // CHECK: Branch,File 0, [[@LINE-1]]:18 -> [[@LINE-1]]:19 = (#3 - #4), #4 // CHECK: Branch,File 0, [[@LINE-2]]:23 -> [[@LINE-2]]:24 = (#1 - #2), #2 // CHECK-LABEL: _Z5for_3bb: bool for_3(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:25 = M:4, C:3 return a || false || b; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = (#0 - #3), #3 -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:20 = 0, #4 // CHECK: Branch,File 0, [[@LINE-2]]:24 -> [[@LINE-2]]:25 = (#1 - #2), #2 // CHECK-LABEL: _Z5for_4bb: bool for_4(bool a, bool b) {// MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:24 = M:4, C:3 return a || b || true; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = (#0 - #3), #3 } // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:16 = (#3 - #4), #4 - // CHECK: Branch,File 0, [[@LINE-2]]:20 -> [[@LINE-2]]:24 = 0, 0 + // CHECK: Branch,File 0, [[@LINE-2]]:20 -> [[@LINE-2]]:24 = (#1 - #2), 0 // CHECK-LABEL: _Z5for_5b: bool for_5(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:23 = M:3, C:2 - return true || false; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = 0, 0 -} // CHECK: Branch,File 0, [[@LINE-1]]:18 -> [[@LINE-1]]:23 = 0, 0 + return true || false; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:14 = (#0 - #1), 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:18 -> [[@LINE-1]]:23 = 0, #2 // CHECK-LABEL: _Z5for_6b: bool for_6(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:20 = M:3, C:2 - return false || a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, 0 + return false || a; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:15 = 0, #1 } // CHECK: Branch,File 0, [[@LINE-1]]:19 -> [[@LINE-1]]:20 = (#1 - #2), #2 // CHECK-LABEL: _Z5for_7b: bool for_7(bool a) { // MCDC: Decision,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:19 = M:3, C:2 return a || true; // CHECK: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = (#0 - #1), #1 -} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = 0, 0 +} // CHECK: Branch,File 0, [[@LINE-1]]:15 -> [[@LINE-1]]:19 = (#1 - #2), 0 // CHECK-LABEL: _Z5for_8b: bool for_8(bool a) { // MCDC: Decision,File 0, [[@LINE+3]]:7 -> [[@LINE+3]]:20 = M:3, C:2 - // CHECK: Branch,File 0, [[@LINE+2]]:7 -> [[@LINE+2]]:11 = 0, 0 - // CHECK: Branch,File 0, [[@LINE+1]]:15 -> [[@LINE+1]]:20 = 0, 0 + // CHECK: Branch,File 0, [[@LINE+2]]:7 -> [[@LINE+2]]:11 = #2, 0 + // CHECK: Branch,File 0, [[@LINE+1]]:15 -> [[@LINE+1]]:20 = 0, (#2 - #3) if (true && false) return true; else diff --git a/clang/test/CoverageMapping/if.cpp b/clang/test/CoverageMapping/if.cpp index 445cdfc20e2aff..b6fd525e930f90 100644 --- a/clang/test/CoverageMapping/if.cpp +++ b/clang/test/CoverageMapping/if.cpp @@ -14,7 +14,7 @@ struct S { // CHECK-LABEL: _Z3foov: // CHECK-NEXT: [[@LINE+3]]:12 -> [[@LINE+8]]:2 = #0 // CHECK-NEXT: [[@LINE+3]]:15 -> [[@LINE+3]]:19 = #0 - // CHECK-NEXT: Branch,File 0, [[@LINE+2]]:15 -> [[@LINE+2]]:19 = 0, 0 + // CHECK-NEXT: Branch,File 0, [[@LINE+2]]:15 -> [[@LINE+2]]:19 = #2, 0 void foo() { // CHECK-NEXT: Gap,File 0, [[@LINE+1]]:21 -> [[@LINE+1]]:22 = #2 if (int j = true ? nop() // CHECK-NEXT: [[@LINE]]:22 -> [[@LINE]]:27 = #2 : nop(); // CHECK-NEXT: [[@LINE]]:22 -> [[@LINE]]:27 = (#0 - #2) @@ -168,7 +168,7 @@ int main() { // CHECK: File 0, [[@LINE]]:12 -> {{[0-9]+}}:2 = // GH-45481 S s; s.the_prop = 0? 1 : 2; // CHECK-NEXT: File 0, [[@LINE]]:16 -> [[@LINE]]:17 = #0 - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:16 -> [[@LINE-1]]:17 = 0, 0 + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:16 -> [[@LINE-1]]:17 = 0, (#0 - #7) // CHECK-NEXT: Gap,File 0, [[@LINE-2]]:18 -> [[@LINE-2]]:19 = #7 // CHECK-NEXT: File 0, [[@LINE-3]]:19 -> [[@LINE-3]]:20 = #7 // CHECK-NEXT: File 0, [[@LINE-4]]:23 -> [[@LINE-4]]:24 = (#0 - #7) diff --git a/clang/test/CoverageMapping/macro-expansion.c b/clang/test/CoverageMapping/macro-expansion.c index ad71fb15eda423..4cd2c934371931 100644 --- a/clang/test/CoverageMapping/macro-expansion.c +++ b/clang/test/CoverageMapping/macro-expansion.c @@ -4,29 +4,29 @@ // CHECK: File 1, [[@LINE+7]]:12 -> [[@LINE+7]]:38 = #0 // CHECK-NEXT: File 1, [[@LINE+6]]:15 -> [[@LINE+6]]:28 = (#0 + #2) // CHECK-NEXT: File 1, [[@LINE+5]]:21 -> [[@LINE+5]]:22 = (#0 + #2) -// CHECK: Branch,File 1, [[@LINE+4]]:21 -> [[@LINE+4]]:22 = 0, 0 +// CHECK: Branch,File 1, [[@LINE+4]]:21 -> [[@LINE+4]]:22 = 0, ((#0 + #2) - #3) // CHECK-NEXT: File 1, [[@LINE+3]]:24 -> [[@LINE+3]]:26 = #3 // CHECK-NEXT: File 1, [[@LINE+2]]:36 -> [[@LINE+2]]:37 = (#0 + #2) -// CHECK-NEXT: Branch,File 1, [[@LINE+1]]:36 -> [[@LINE+1]]:37 = 0, 0 +// CHECK-NEXT: Branch,File 1, [[@LINE+1]]:36 -> [[@LINE+1]]:37 = 0, #0 #define M1 do { if (0) {} } while (0) // CHECK-NEXT: File 2, [[@LINE+12]]:15 -> [[@LINE+12]]:41 = #0 // CHECK-NEXT: File 2, [[@LINE+11]]:18 -> [[@LINE+11]]:31 = (#0 + #4) // CHECK-NEXT: File 2, [[@LINE+10]]:24 -> [[@LINE+10]]:25 = (#0 + #4) // CHECK: File 2, [[@LINE+9]]:27 -> [[@LINE+9]]:29 = #5 // CHECK-NEXT: File 2, [[@LINE+8]]:39 -> [[@LINE+8]]:40 = (#0 + #4) -// CHECK-NEXT: Branch,File 2, [[@LINE+7]]:39 -> [[@LINE+7]]:40 = 0, 0 +// CHECK-NEXT: Branch,File 2, [[@LINE+7]]:39 -> [[@LINE+7]]:40 = 0, #0 // CHECK-NEXT: File 3, [[@LINE+6]]:15 -> [[@LINE+6]]:41 = #0 // CHECK-NEXT: File 3, [[@LINE+5]]:18 -> [[@LINE+5]]:31 = (#0 + #6) // CHECK-NEXT: File 3, [[@LINE+4]]:24 -> [[@LINE+4]]:25 = (#0 + #6) // CHECK: File 3, [[@LINE+3]]:27 -> [[@LINE+3]]:29 = #7 // CHECK-NEXT: File 3, [[@LINE+2]]:39 -> [[@LINE+2]]:40 = (#0 + #6) -// CHECK-NEXT: Branch,File 3, [[@LINE+1]]:39 -> [[@LINE+1]]:40 = 0, 0 +// CHECK-NEXT: Branch,File 3, [[@LINE+1]]:39 -> [[@LINE+1]]:40 = 0, #0 #define M2(x) do { if (x) {} } while (0) // CHECK-NEXT: File 4, [[@LINE+5]]:15 -> [[@LINE+5]]:38 = #0 // CHECK-NEXT: File 4, [[@LINE+4]]:18 -> [[@LINE+4]]:28 = (#0 + #8) // CHECK-NEXT: Expansion,File 4, [[@LINE+3]]:20 -> [[@LINE+3]]:22 = (#0 + #8) // CHECK-NEXT: File 4, [[@LINE+2]]:36 -> [[@LINE+2]]:37 = (#0 + #8) -// CHECK-NEXT: Branch,File 4, [[@LINE+1]]:36 -> [[@LINE+1]]:37 = 0, 0 +// CHECK-NEXT: Branch,File 4, [[@LINE+1]]:36 -> [[@LINE+1]]:37 = 0, #0 #define M3(x) do { M2(x); } while (0) // CHECK-NEXT: File 5, [[@LINE+4]]:15 -> [[@LINE+4]]:27 = #0 // CHECK-NEXT: File 5, [[@LINE+3]]:16 -> [[@LINE+3]]:19 = #0 diff --git a/clang/test/CoverageMapping/mcdc-scratch-space.c b/clang/test/CoverageMapping/mcdc-scratch-space.c index a263e9b688faed..60e456948a5182 100644 --- a/clang/test/CoverageMapping/mcdc-scratch-space.c +++ b/clang/test/CoverageMapping/mcdc-scratch-space.c @@ -3,7 +3,7 @@ // CHECK: builtin_macro0: int builtin_macro0(int a) { // CHECK: Decision,File 0, [[@LINE+1]]:11 -> [[@LINE+2]]:15 = M:3, C:2 - return (__LINE__ // CHECK: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:11 = 0, 0 [1,2,0] + return (__LINE__ // CHECK: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:11 = #1, 0 [1,2,0] && a); // CHECK: Branch,File 0, [[@LINE]]:14 -> [[@LINE]]:15 = #2, (#1 - #2) [2,0,0] } @@ -11,7 +11,7 @@ int builtin_macro0(int a) { int builtin_macro1(int a) { // CHECK: Decision,File 0, [[@LINE+1]]:11 -> [[@LINE+2]]:22 = M:3, C:2 return (a // CHECK: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = (#0 - #1), #1 [1,0,2] - || __LINE__); // CHECK: Branch,File 0, [[@LINE]]:14 -> [[@LINE]]:14 = 0, 0 [2,0,0] + || __LINE__); // CHECK: Branch,File 0, [[@LINE]]:14 -> [[@LINE]]:14 = (#1 - #2), 0 [2,0,0] } #define PRE(x) pre_##x diff --git a/clang/test/CoverageMapping/mcdc-system-headers.cpp b/clang/test/CoverageMapping/mcdc-system-headers.cpp index ae26ed5fe469f2..cb1c8743c36e82 100644 --- a/clang/test/CoverageMapping/mcdc-system-headers.cpp +++ b/clang/test/CoverageMapping/mcdc-system-headers.cpp @@ -17,10 +17,10 @@ int func0(int a) { // CHECK: Decision,File 0, [[@LINE+3]]:11 -> [[@LINE+3]]:21 = M:3, C:2 // W_SYS: Expansion,File 0, [[@LINE+2]]:11 -> [[@LINE+2]]:16 = #0 (Expanded file = 1) - // X_SYS: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:11 = 0, 0 [1,2,0] + // X_SYS: Branch,File 0, [[@LINE+1]]:11 -> [[@LINE+1]]:11 = #1, 0 [1,2,0] return (CONST && a); // CHECK: Branch,File 0, [[@LINE-1]]:20 -> [[@LINE-1]]:21 = #2, (#1 - #2) [2,0,0] - // W_SYS: Branch,File 1, [[@LINE-16]]:15 -> [[@LINE-16]]:17 = 0, 0 [1,2,0] + // W_SYS: Branch,File 1, [[@LINE-16]]:15 -> [[@LINE-16]]:17 = #1, 0 [1,2,0] } // CHECK: _Z5func1ii: diff --git a/clang/test/CoverageMapping/switch.cpp b/clang/test/CoverageMapping/switch.cpp index b47c0e80099527..a1fee644faaf0e 100644 --- a/clang/test/CoverageMapping/switch.cpp +++ b/clang/test/CoverageMapping/switch.cpp @@ -2,13 +2,13 @@ // CHECK: foo void foo(int i) { // CHECK-NEXT: File 0, [[@LINE]]:17 -> [[@LINE+11]]:2 = #0 - switch(i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = ((#0 - #2) - #3), (#2 + #3) + switch(i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = 0, ((#0 - #2) - #3) // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:13 -> [[@LINE+5]]:10 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:11 = #2 - return; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, (#0 - #2) + return; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, 0 // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:12 -> [[@LINE+1]]:3 = 0 case 2: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:10 = #3 - break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, (#0 - #3) + break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, 0 } // CHECK-NEXT: Gap,File 0, [[@LINE]]:4 -> [[@LINE+1]]:3 = #1 int x = 0; // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:2 = #1 @@ -18,24 +18,24 @@ int nop() { return 0; } // CHECK: bar void bar(int i) { // CHECK-NEXT: File 0, [[@LINE]]:17 -> [[@LINE+21]]:2 = #0 - switch (i) // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = #0, 0 + switch (i) // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = 0, #0 ; // CHECK-NEXT: File 0, [[@LINE]]:5 -> [[@LINE]]:6 = 0 switch (i) { // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+17]]:2 = #1 - } // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = #1, 0 + } // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = 0, #1 switch (i) // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+14]]:2 = #2 - nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = #2, 0 + nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = 0, #2 // CHECK-NEXT: File 0, [[@LINE-1]]:5 -> [[@LINE-1]]:10 = 0 switch (i) // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+11]]:2 = #3 - case 1: // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = (#3 - #5), #5 + case 1: // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:11 -> [[@LINE-1]]:12 = 0, (#3 - #5) // CHECK-NEXT: File 0, [[@LINE-1]]:3 -> [[@LINE+1]]:10 = #5 - nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:3 -> [[@LINE-2]]:9 = #5, (#3 - #5) + nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:3 -> [[@LINE-2]]:9 = #5, 0 // CHECK-NEXT: File 0, [[@LINE+1]]:3 -> [[@LINE+7]]:2 = #4 - switch (i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = (#4 - #7), #7 + switch (i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = 0, (#4 - #7) nop(); // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:14 -> [[@LINE+2]]:10 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:10 = #7 - nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #7, (#4 - #7) + nop(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #7, 0 } nop(); // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:2 = #6 } @@ -44,7 +44,7 @@ void bar(int i) { // CHECK-NEXT: File 0, [[@LINE]]:17 -> [[@LINE+21]]:2 = #0 void baz() { // CHECK-NEXT: File 0, [[@LINE]]:12 -> [[@LINE+5]]:2 = #0 switch (int i = true ? nop() // CHECK: [[@LINE]]:26 -> [[@LINE]]:31 = #2 : nop(); // CHECK-NEXT: [[@LINE]]:26 -> [[@LINE]]:31 = (#0 - #2) - i) {} // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = #0, 0 + i) {} // CHECK-NEXT: Branch,File 0, [[@LINE]]:11 -> [[@LINE]]:12 = 0, #0 nop(); // CHECK-NEXT: [[@LINE]]:3 -> [[@LINE+1]]:2 = #1 } @@ -53,35 +53,35 @@ int main() { // CHECK-NEXT: File 0, [[@LINE]]:12 -> [[@LINE+39]]:2 = #0 int i = 0; switch(i) { // CHECK-NEXT: Gap,File 0, [[@LINE]]:13 -> [[@LINE+8]]:10 = 0 case 0: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+2]]:10 = #2 - i = 1; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, (#0 - #2) + i = 1; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, 0 break; // CHECK-NEXT: Gap,File 0, [[@LINE]]:11 -> [[@LINE+1]]:3 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+2]]:10 = #3 - i = 2; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, (#0 - #3) + i = 2; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, 0 break; // CHECK-NEXT: Gap,File 0, [[@LINE]]:11 -> [[@LINE+1]]:3 = 0 default: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:10 = #4 - break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #4, (#0 - #4) + break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #4, 0 } // CHECK-NEXT: Gap,File 0, [[@LINE]]:4 -> [[@LINE+1]]:3 = #1 switch(i) { // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+27]]:2 = #1 case 0: // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:13 -> [[@LINE+7]]:10 = 0 i = 1; // CHECK-NEXT: File 0, [[@LINE-1]]:3 -> [[@LINE+1]]:10 = #6 - break; // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:3 -> [[@LINE-2]]:9 = #6, (#1 - #6) + break; // CHECK-NEXT: Branch,File 0, [[@LINE-2]]:3 -> [[@LINE-2]]:9 = #6, 0 // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:11 -> [[@LINE+1]]:3 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+3]]:10 = #7 - i = 2; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #7, (#1 - #7) + i = 2; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #7, 0 default: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:10 = (#7 + #8) - break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #8, (#1 - #8) + break; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #8, 0 } // CHECK-NEXT: Gap,File 0, [[@LINE]]:4 -> [[@LINE+2]]:3 = #5 // CHECK-NEXT: File 0, [[@LINE+1]]:3 -> [[@LINE+17]]:2 = #5 - switch(i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = ((((#5 - #10) - #11) - #12) - #13), (((#10 + #11) + #12) + #13) + switch(i) { // CHECK-NEXT: Branch,File 0, [[@LINE]]:10 -> [[@LINE]]:11 = 0, ((((#5 - #10) - #11) - #12) - #13) // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:13 -> [[@LINE+8]]:11 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+7]]:11 = #10 - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #10, (#5 - #10) + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #10, 0 case 2: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+5]]:11 = (#10 + #11) - i = 11; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #11, (#5 - #11) + i = 11; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #11, 0 case 3: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+3]]:11 = ((#10 + #11) + #12) - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #12, (#5 - #12) + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #12, 0 case 4: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:11 = (((#10 + #11) + #12) + #13) - i = 99; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #13, (#5 - #13) + i = 99; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #13, 0 } foo(1); // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+3]]:11 = #9 @@ -95,10 +95,10 @@ int pr44011(int i) { // CHECK-NEXT: File 0, [[@LINE]]:20 -> {{.*}}:2 = #0 switch (i) { // CHECK-NEXT: Gap,File 0, [[@LINE]]:14 -> [[@LINE+6]]:13 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:13 = #2 - return 0; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, (#0 - #2) + return 0; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, 0 // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:14 -> [[@LINE+1]]:3 = 0 default: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:13 = #3 - return 1; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #3, (#0 - #3) + return 1; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #3, 0 } } // A region for counter #1 is missing due to the missing return. @@ -106,17 +106,17 @@ int pr44011(int i) { // CHECK-NEXT: File 0, [[@LINE]]:20 -> {{.*}}:2 = #0 // FIXME: End location for "case 1" shouldn't point at the end of the switch. // CHECK: fallthrough int fallthrough(int i) { // CHECK-NEXT: File 0, [[@LINE]]:24 -> [[@LINE+14]]:2 = #0 - // CHECK-NEXT: Branch,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:11 = ((((#0 - #2) - #3) - #4) - #5), (((#2 + #3) + #4) + #5) + // CHECK-NEXT: Branch,File 0, [[@LINE+1]]:10 -> [[@LINE+1]]:11 = 0, ((((#0 - #2) - #3) - #4) - #5) switch(i) { // CHECK-NEXT: Gap,File 0, [[@LINE]]:13 -> [[@LINE+10]]:10 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+9]]:10 = #2 - i = 23; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, (#0 - #2) + i = 23; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #2, 0 case 2: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+2]]:10 = (#2 + #3) - i = 11; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, (#0 - #3) + i = 11; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, 0 break; // CHECK-NEXT: Gap,File 0, [[@LINE]]:11 -> [[@LINE+1]]:3 = 0 case 3: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+4]]:10 = #4 - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, (#0 - #4) + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, 0 case 4: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+2]]:10 = (#4 + #5) - i = 99; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #5, (#0 - #5) + i = 99; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #5, 0 break; } } @@ -126,12 +126,12 @@ void abort(void) __attribute((noreturn)); int noret(int x) { // CHECK-NEXT: File 0, [[@LINE]]:18 -> [[@LINE+11]]:2 switch (x) { // CHECK-NEXT: Gap,File 0, [[@LINE]]:14 -> [[@LINE+8]]:14 = 0 default: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:12 - abort(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #2, (#0 - #2) + abort(); // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #2, 0 // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:13 -> [[@LINE+1]]:3 = 0 case 1: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:13 - return 5; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, (#0 - #3) + return 5; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #3, 0 // CHECK-NEXT: Gap,File 0, [[@LINE-1]]:14 -> [[@LINE+1]]:3 = 0 case 2: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+1]]:14 - return 10; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, (#0 - #4) + return 10; // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, 0 } } diff --git a/clang/test/CoverageMapping/switchmacro.c b/clang/test/CoverageMapping/switchmacro.c index 4c98cc7d9403a4..0696e7490cdf99 100644 --- a/clang/test/CoverageMapping/switchmacro.c +++ b/clang/test/CoverageMapping/switchmacro.c @@ -6,7 +6,7 @@ int foo(int i) { // CHECK-NEXT: File 0, [[@LINE]]:16 -> {{[0-9]+}}:2 = #0 switch (i) { // CHECK-NEXT: Gap,File 0, [[@LINE]]:14 -> {{[0-9]+}}:11 = 0 default: // CHECK-NEXT: File 0, [[@LINE]]:3 -> {{[0-9]+}}:11 = #2 - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #2, (#0 - #2) + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:10 = #2, 0 if (i == 1) // CHECK-NEXT: File 0, [[@LINE]]:9 -> [[@LINE]]:15 = #2 // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:9 -> [[@LINE-1]]:15 = #3, (#2 - #3) return 0; // CHECK: File 0, [[@LINE]]:7 -> [[@LINE]]:15 = #3 @@ -15,7 +15,7 @@ int foo(int i) { // CHECK-NEXT: File 0, [[@LINE]]:16 -> {{[0-9]+}}:2 = #0 // CHECK-NEXT: File 0, [[@LINE+1]]:8 -> {{[0-9]+}}:11 = (#2 - #3) FOO(1); case 0: // CHECK-NEXT: File 0, [[@LINE]]:3 -> [[@LINE+2]]:13 = ((#2 + #4) - #3) - // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, (#0 - #4) + // CHECK-NEXT: Branch,File 0, [[@LINE-1]]:3 -> [[@LINE-1]]:9 = #4, 0 return 2; // CHECK-NEXT: Gap,File 0, [[@LINE]]:14 -> [[@LINE+4]]:3 = 0 // CHECK-NEXT: Expansion,File 0, [[@LINE+2]]:3 -> [[@LINE+2]]:6 = 0 diff --git a/clang/test/Driver/cuda-cross-compiling.c b/clang/test/Driver/cuda-cross-compiling.c index 54c291fac66ffd..126e9e9fc83d57 100644 --- a/clang/test/Driver/cuda-cross-compiling.c +++ b/clang/test/Driver/cuda-cross-compiling.c @@ -104,4 +104,4 @@ // RUN: %clang -target nvptx64-nvidia-cuda --cuda-feature=+ptx63 -march=sm_52 -### %s 2>&1 \ // RUN: | FileCheck -check-prefix=FEATURE %s -// FEATURE: clang-nvlink-wrapper{{.*}}"--plugin-opt=mattr=+ptx63" +// FEATURE: clang-nvlink-wrapper{{.*}}"--plugin-opt=-mattr=+ptx63" diff --git a/clang/test/Format/dry-run-warning.cpp b/clang/test/Format/dry-run-warning.cpp new file mode 100644 index 00000000000000..4b85de40b8cd08 --- /dev/null +++ b/clang/test/Format/dry-run-warning.cpp @@ -0,0 +1,22 @@ +// RUN: echo '{' > %t.json +// RUN: echo ' "married": true' >> %t.json +// RUN: echo '}' >> %t.json + +// RUN: clang-format -n -style=LLVM %t.json 2>&1 | FileCheck %s -allow-empty + +// RUN: clang-format -n -style=LLVM < %t.json 2>&1 \ +// RUN: | FileCheck %s -check-prefix=CHECK2 -strict-whitespace + +// RUN: echo '{' > %t.json +// RUN: echo ' "married" : true' >> %t.json +// RUN: echo '}' >> %t.json + +// RUN: clang-format -n -style=LLVM < %t.json 2>&1 | FileCheck %s -allow-empty + +// RUN: clang-format -n -style=LLVM %t.json 2>&1 \ +// RUN: | FileCheck %s -check-prefix=CHECK2 -strict-whitespace + +// RUN: rm %t.json + +// CHECK-NOT: warning +// CHECK2: warning: code should be clang-formatted diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp index 6aed46328f3469..108db7204aa68a 100644 --- a/clang/tools/clang-format/ClangFormat.cpp +++ b/clang/tools/clang-format/ClangFormat.cpp @@ -351,9 +351,6 @@ static void outputReplacementsXML(const Replacements &Replaces) { static bool emitReplacementWarnings(const Replacements &Replaces, StringRef AssumedFileName, const std::unique_ptr &Code) { - if (Replaces.empty()) - return false; - unsigned Errors = 0; if (WarnFormat && !NoWarnFormat) { SourceMgr Mgr; @@ -490,9 +487,11 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) { Replacements Replaces = sortIncludes(*FormatStyle, Code->getBuffer(), Ranges, AssumedFileName, &CursorPosition); + const bool IsJson = FormatStyle->isJson(); + // To format JSON insert a variable to trick the code into thinking its // JavaScript. - if (FormatStyle->isJson() && !FormatStyle->DisableFormat) { + if (IsJson && !FormatStyle->DisableFormat) { auto Err = Replaces.add(tooling::Replacement( tooling::Replacement(AssumedFileName, 0, 0, "x = "))); if (Err) @@ -510,9 +509,11 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) { Replacements FormatChanges = reformat(*FormatStyle, *ChangedCode, Ranges, AssumedFileName, &Status); Replaces = Replaces.merge(FormatChanges); - if (OutputXML || DryRun) { - if (DryRun) - return emitReplacementWarnings(Replaces, AssumedFileName, Code); + if (DryRun) { + return Replaces.size() > (IsJson ? 1 : 0) && + emitReplacementWarnings(Replaces, AssumedFileName, Code); + } + if (OutputXML) { outputXML(Replaces, FormatChanges, Status, Cursor, CursorPosition); } else { IntrusiveRefCntPtr InMemoryFileSystem( diff --git a/clang/tools/clang-format/clang-format.el b/clang/tools/clang-format/clang-format.el index f3da5415f8672b..fb943b7b722f8a 100644 --- a/clang/tools/clang-format/clang-format.el +++ b/clang/tools/clang-format/clang-format.el @@ -70,6 +70,20 @@ in such buffers." :safe #'stringp) (make-variable-buffer-local 'clang-format-fallback-style) +(defcustom clang-format-on-save-p 'clang-format-on-save-check-config-exists + "Only reformat on save if this function returns non-nil. + +You may wish to choose one of the following options: +- `always': To always format on save. +- `clang-format-on-save-check-config-exists': + Only reformat when \".clang-format\" exists. + +Otherwise you can set this to a user defined function." + :group 'clang-format + :type 'function + :risky t) +(make-variable-buffer-local 'clang-format-on-save-p) + (defun clang-format--extract (xml-node) "Extract replacements and cursor information from XML-NODE." (unless (and (listp xml-node) (eq (xml-node-name xml-node) 'replacements)) @@ -217,5 +231,48 @@ the function `buffer-file-name'." ;;;###autoload (defalias 'clang-format 'clang-format-region) +;; Format on save minor mode. + +(defun clang-format--on-save-buffer-hook () + "The hook to run on buffer saving to format the buffer." + ;; Demote errors as this is user configurable, we can't be sure it wont error. + (when (with-demoted-errors "clang-format: Error %S" + (funcall clang-format-on-save-p)) + (clang-format-buffer)) + ;; Continue to save. + nil) + +(defun clang-format--on-save-enable () + "Disable the minor mode." + (add-hook 'before-save-hook #'clang-format--on-save-buffer-hook nil t)) + +(defun clang-format--on-save-disable () + "Enable the minor mode." + (remove-hook 'before-save-hook #'clang-format--on-save-buffer-hook t)) + +;; Default value for `clang-format-on-save-p'. +(defun clang-format-on-save-check-config-exists () + "Return non-nil when `.clang-format' is found in a parent directory." + ;; Unlikely but possible this is nil. + (let ((filepath buffer-file-name)) + (cond + (filepath + (not (null (locate-dominating-file (file-name-directory filepath) ".clang-format")))) + (t + nil)))) + +;;;###autoload +(define-minor-mode clang-format-on-save-mode + "Clang-format on save minor mode." + :global nil + :lighter "" + :keymap nil + + (cond + (clang-format-on-save-mode + (clang-format--on-save-enable)) + (t + (clang-format--on-save-disable)))) + (provide 'clang-format) ;;; clang-format.el ends here diff --git a/compiler-rt/lib/hwasan/CMakeLists.txt b/compiler-rt/lib/hwasan/CMakeLists.txt index 086079c7536e5d..afafa0c4a92761 100644 --- a/compiler-rt/lib/hwasan/CMakeLists.txt +++ b/compiler-rt/lib/hwasan/CMakeLists.txt @@ -24,16 +24,19 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH}) if(${arch} MATCHES "aarch64") list(APPEND HWASAN_RTL_SOURCES hwasan_setjmp_aarch64.S - hwasan_tag_mismatch_aarch64.S) + hwasan_tag_mismatch_aarch64.S + ) endif() if(${arch} MATCHES "riscv64") list(APPEND HWASAN_RTL_SOURCES hwasan_setjmp_riscv64.S - hwasan_tag_mismatch_riscv64.S) + hwasan_tag_mismatch_riscv64.S + ) endif() if(${arch} MATCHES "x86_64") list(APPEND HWASAN_RTL_SOURCES - hwasan_setjmp_x86_64.S) + hwasan_setjmp_x86_64.S + ) endif() endforeach() diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp index bcb7baa6c530d2..5c44c000ae577b 100644 --- a/compiler-rt/lib/lsan/lsan_common.cpp +++ b/compiler-rt/lib/lsan/lsan_common.cpp @@ -394,10 +394,10 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { } template -void ScanExtraStack(const InternalMmapVector &ranges, Frontier *frontier, - Accessor &accessor) { +void ScanRanges(const InternalMmapVector &ranges, Frontier *frontier, + const char *region_type, Accessor &accessor) { for (uptr i = 0; i < ranges.size(); i++) { - ScanForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK", + ScanForPointers(ranges[i].begin, ranges[i].end, frontier, region_type, kReachable, accessor); } } @@ -405,7 +405,7 @@ void ScanExtraStack(const InternalMmapVector &ranges, Frontier *frontier, void ScanExtraStackRanges(const InternalMmapVector &ranges, Frontier *frontier) { DirectMemoryAccessor accessor; - ScanExtraStack(ranges, frontier, accessor); + ScanRanges(ranges, frontier, "FAKE STACK", accessor); } # if SANITIZER_FUCHSIA @@ -499,7 +499,7 @@ static void ProcessThread(tid_t os_id, uptr sp, ScanForPointers(stack_begin, stack_end, frontier, "STACK", kReachable, accessor); GetThreadExtraStackRangesLocked(os_id, &extra_ranges); - ScanExtraStack(extra_ranges, frontier, accessor); + ScanRanges(extra_ranges, frontier, "FAKE STACK", accessor); } if (flags()->use_tls) { @@ -521,13 +521,14 @@ static void ProcessThread(tid_t os_id, uptr sp, } } # if SANITIZER_ANDROID + extra_ranges.clear(); auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/, void *arg) -> void { - ScanForPointers( - reinterpret_cast(dtls_begin), reinterpret_cast(dtls_end), - reinterpret_cast(arg), "DTLS", kReachable, accessor); + reinterpret_cast *>(arg)->push_back( + {reinterpret_cast(dtls_begin), + reinterpret_cast(dtls_end)}); }; - + ScanRanges(extra_ranges, frontier, "DTLS", accessor); // FIXME: There might be a race-condition here (and in Bionic) if the // thread is suspended in the middle of updating its DTLS. IOWs, we // could scan already freed memory. (probably fine for now) diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.h b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.h index 4132db672e394d..1edded090f8ce1 100644 --- a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.h +++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.h @@ -12,6 +12,7 @@ #include "flang/Optimizer/Dialect/CUF/Attributes/CUFAttr.h" #include "flang/Optimizer/Dialect/CUF/CUFDialect.h" #include "flang/Optimizer/Dialect/FIRType.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/IR/OpDefinition.h" #define GET_OP_CLASSES diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td index 98d1ef529738c7..d34a8af0394a44 100644 --- a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td +++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td @@ -18,6 +18,7 @@ include "flang/Optimizer/Dialect/CUF/CUFDialect.td" include "flang/Optimizer/Dialect/CUF/Attributes/CUFAttr.td" include "flang/Optimizer/Dialect/FIRTypes.td" include "flang/Optimizer/Dialect/FIRAttr.td" +include "mlir/Dialect/LLVMIR/LLVMOpBase.td" include "mlir/Interfaces/LoopLikeInterface.td" include "mlir/IR/BuiltinAttributes.td" @@ -288,15 +289,30 @@ def cuf_KernelOp : cuf_Op<"kernel", [AttrSizedOperandSegments, let hasVerifier = 1; } +def cuf_RegisterModuleOp : cuf_Op<"register_module", []> { + let summary = "Register a CUDA module"; + + let arguments = (ins + SymbolRefAttr:$name + ); + + let assemblyFormat = [{ + $name attr-dict `->` type($modulePtr) + }]; + + let results = (outs LLVM_AnyPointer:$modulePtr); +} + def cuf_RegisterKernelOp : cuf_Op<"register_kernel", []> { let summary = "Register a CUDA kernel"; let arguments = (ins - SymbolRefAttr:$name + SymbolRefAttr:$name, + LLVM_AnyPointer:$modulePtr ); let assemblyFormat = [{ - $name attr-dict + $name `(` $modulePtr `:` type($modulePtr) `)`attr-dict }]; let hasVerifier = 1; diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.h b/flang/include/flang/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.h new file mode 100644 index 00000000000000..f3edb7fca649d0 --- /dev/null +++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.h @@ -0,0 +1,29 @@ +//===- CUFToLLVMIRTranslation.h - CUF Dialect to LLVM IR --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This provides registration calls for GPU dialect to LLVM IR translation. +// +//===----------------------------------------------------------------------===// + +#ifndef FLANG_OPTIMIZER_DIALECT_CUF_GPUTOLLVMIRTRANSLATION_H_ +#define FLANG_OPTIMIZER_DIALECT_CUF_GPUTOLLVMIRTRANSLATION_H_ + +namespace mlir { +class DialectRegistry; +class MLIRContext; +} // namespace mlir + +namespace cuf { + +/// Register the CUF dialect and the translation from it to the LLVM IR in +/// the given registry. +void registerCUFDialectTranslation(mlir::DialectRegistry ®istry); + +} // namespace cuf + +#endif // FLANG_OPTIMIZER_DIALECT_CUF_GPUTOLLVMIRTRANSLATION_H_ diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td index 1c0ce08f5b4838..c070bc22ff20cc 100644 --- a/flang/include/flang/Optimizer/OpenMP/Passes.td +++ b/flang/include/flang/Optimizer/OpenMP/Passes.td @@ -22,6 +22,19 @@ def MapInfoFinalizationPass let dependentDialects = ["mlir::omp::OpenMPDialect"]; } +def MapsForPrivatizedSymbolsPass + : Pass<"omp-maps-for-privatized-symbols", "mlir::func::FuncOp"> { + let summary = "Creates MapInfoOp instances for privatized symbols when needed"; + let description = [{ + Adds omp.map.info operations for privatized symbols on omp.target ops + In certain situations, such as when an allocatable is privatized, its + descriptor is needed in the alloc region of the privatizer. This results + in the use of the descriptor inside the target region. As such, the + descriptor then needs to be mapped. This pass adds such MapInfoOp operations. + }]; + let dependentDialects = ["mlir::omp::OpenMPDialect"]; +} + def MarkDeclareTargetPass : Pass<"omp-mark-declare-target", "mlir::ModuleOp"> { let summary = "Marks all functions called by an OpenMP declare target function as declare target"; diff --git a/flang/include/flang/Optimizer/Support/InitFIR.h b/flang/include/flang/Optimizer/Support/InitFIR.h index 04a5dd323e5508..1c61c367199923 100644 --- a/flang/include/flang/Optimizer/Support/InitFIR.h +++ b/flang/include/flang/Optimizer/Support/InitFIR.h @@ -14,6 +14,7 @@ #define FORTRAN_OPTIMIZER_SUPPORT_INITFIR_H #include "flang/Optimizer/Dialect/CUF/CUFDialect.h" +#include "flang/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.h" #include "flang/Optimizer/Dialect/FIRDialect.h" #include "flang/Optimizer/HLFIR/HLFIRDialect.h" #include "mlir/Conversion/Passes.h" @@ -61,6 +62,7 @@ inline void addFIRExtensions(mlir::DialectRegistry ®istry, if (addFIRInlinerInterface) addFIRInlinerExtension(registry); addFIRToLLVMIRExtension(registry); + cuf::registerCUFDialectTranslation(registry); } inline void loadNonCodegenDialects(mlir::MLIRContext &context) { diff --git a/flang/include/flang/Optimizer/Transforms/CufOpConversion.h b/flang/include/flang/Optimizer/Transforms/CufOpConversion.h index 79ce4ac5c6cbc0..0a71cdfddec1ab 100644 --- a/flang/include/flang/Optimizer/Transforms/CufOpConversion.h +++ b/flang/include/flang/Optimizer/Transforms/CufOpConversion.h @@ -18,12 +18,14 @@ class LLVMTypeConverter; namespace mlir { class DataLayout; +class SymbolTable; } namespace cuf { void populateCUFToFIRConversionPatterns(const fir::LLVMTypeConverter &converter, mlir::DataLayout &dl, + const mlir::SymbolTable &symtab, mlir::RewritePatternSet &patterns); } // namespace cuf diff --git a/flang/include/flang/Runtime/CUDA/registration.h b/flang/include/flang/Runtime/CUDA/registration.h new file mode 100644 index 00000000000000..cbe202c4d23e0d --- /dev/null +++ b/flang/include/flang/Runtime/CUDA/registration.h @@ -0,0 +1,28 @@ +//===-- include/flang/Runtime/CUDA/registration.h ---------------*- C -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef FORTRAN_RUNTIME_CUDA_REGISTRATION_H_ +#define FORTRAN_RUNTIME_CUDA_REGISTRATION_H_ + +#include "flang/Runtime/entry-names.h" +#include + +namespace Fortran::runtime::cuda { + +extern "C" { + +/// Register a CUDA module. +void *RTDECL(CUFRegisterModule)(void *data); + +/// Register a device function. +void RTDECL(CUFRegisterFunction)(void **module, const char *fct); + +} // extern "C" + +} // namespace Fortran::runtime::cuda +#endif // FORTRAN_RUNTIME_CUDA_REGISTRATION_H_ diff --git a/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt index b2221199995d58..5d4bd0785971f7 100644 --- a/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt +++ b/flang/lib/Optimizer/Dialect/CUF/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(Attributes) add_flang_library(CUFDialect CUFDialect.cpp CUFOps.cpp + CUFToLLVMIRTranslation.cpp DEPENDS MLIRIR diff --git a/flang/lib/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.cpp b/flang/lib/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.cpp new file mode 100644 index 00000000000000..c6c9f96b811352 --- /dev/null +++ b/flang/lib/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.cpp @@ -0,0 +1,104 @@ +//===- CUFToLLVMIRTranslation.cpp - Translate CUF dialect to LLVM IR ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a translation between the MLIR CUF dialect and LLVM IR. +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Dialect/CUF/CUFToLLVMIRTranslation.h" +#include "flang/Optimizer/Dialect/CUF/CUFOps.h" +#include "flang/Runtime/entry-names.h" +#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h" +#include "mlir/Target/LLVMIR/ModuleTranslation.h" +#include "llvm/ADT/TypeSwitch.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/FormatVariadic.h" + +using namespace mlir; + +namespace { + +LogicalResult registerModule(cuf::RegisterModuleOp op, + llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + std::string binaryIdentifier = + op.getName().getLeafReference().str() + "_bin_cst"; + llvm::Module *module = moduleTranslation.getLLVMModule(); + llvm::Value *binary = module->getGlobalVariable(binaryIdentifier, true); + if (!binary) + return op.emitError() << "Couldn't find the binary: " << binaryIdentifier; + + llvm::Type *ptrTy = builder.getPtrTy(0); + llvm::FunctionCallee fct = module->getOrInsertFunction( + RTNAME_STRING(CUFRegisterModule), + llvm::FunctionType::get(ptrTy, ArrayRef({ptrTy}), false)); + auto *handle = builder.CreateCall(fct, {binary}); + moduleTranslation.mapValue(op->getResults().front()) = handle; + return mlir::success(); +} + +llvm::Value *getOrCreateFunctionName(llvm::Module *module, + llvm::IRBuilderBase &builder, + llvm::StringRef moduleName, + llvm::StringRef kernelName) { + std::string globalName = + std::string(llvm::formatv("{0}_{1}_kernel_name", moduleName, kernelName)); + + if (llvm::GlobalVariable *gv = module->getGlobalVariable(globalName)) + return gv; + + return builder.CreateGlobalString(kernelName, globalName); +} + +LogicalResult registerKernel(cuf::RegisterKernelOp op, + llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + llvm::Module *module = moduleTranslation.getLLVMModule(); + llvm::Type *ptrTy = builder.getPtrTy(0); + llvm::FunctionCallee fct = module->getOrInsertFunction( + RTNAME_STRING(CUFRegisterFunction), + llvm::FunctionType::get(ptrTy, ArrayRef({ptrTy, ptrTy}), + false)); + llvm::Value *modulePtr = moduleTranslation.lookupValue(op.getModulePtr()); + builder.CreateCall( + fct, {modulePtr, getOrCreateFunctionName(module, builder, + op.getKernelModuleName().str(), + op.getKernelName().str())}); + return mlir::success(); +} + +class CUFDialectLLVMIRTranslationInterface + : public LLVMTranslationDialectInterface { +public: + using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface; + + LogicalResult + convertOperation(Operation *operation, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) const override { + return llvm::TypeSwitch(operation) + .Case([&](cuf::RegisterModuleOp op) { + return registerModule(op, builder, moduleTranslation); + }) + .Case([&](cuf::RegisterKernelOp op) { + return registerKernel(op, builder, moduleTranslation); + }) + .Default([&](Operation *op) { + return op->emitError("unsupported GPU operation: ") << op->getName(); + }); + } +}; + +} // namespace + +void cuf::registerCUFDialectTranslation(DialectRegistry ®istry) { + registry.insert(); + registry.addExtension(+[](MLIRContext *ctx, cuf::CUFDialect *dialect) { + dialect->addInterfaces(); + }); +} diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt index 92051634f0378b..035d0d5ca46c76 100644 --- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt +++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt @@ -2,6 +2,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_flang_library(FlangOpenMPTransforms FunctionFiltering.cpp + MapsForPrivatizedSymbols.cpp MapInfoFinalization.cpp MarkDeclareTarget.cpp diff --git a/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp new file mode 100644 index 00000000000000..2fa55844aec7c7 --- /dev/null +++ b/flang/lib/Optimizer/OpenMP/MapsForPrivatizedSymbols.cpp @@ -0,0 +1,156 @@ +//===- MapsForPrivatizedSymbols.cpp +//-----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +/// \file +/// An OpenMP dialect related pass for FIR/HLFIR which creates MapInfoOp +/// instances for certain privatized symbols. +/// For example, if an allocatable variable is used in a private clause attached +/// to a omp.target op, then the allocatable variable's descriptor will be +/// needed on the device (e.g. GPU). This descriptor needs to be separately +/// mapped onto the device. This pass creates the necessary omp.map.info ops for +/// this. +//===----------------------------------------------------------------------===// +// TODO: +// 1. Before adding omp.map.info, check if we already have an omp.map.info for +// the variable in question. +// 2. Generalize this for more than just omp.target ops. +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/Dialect/FIRType.h" +#include "flang/Optimizer/Dialect/Support/KindMapping.h" +#include "flang/Optimizer/HLFIR/HLFIROps.h" +#include "flang/Optimizer/OpenMP/Passes.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/SymbolTable.h" +#include "mlir/Pass/Pass.h" +#include "llvm/Frontend/OpenMP/OMPConstants.h" +#include "llvm/Support/Debug.h" +#include + +#define DEBUG_TYPE "omp-maps-for-privatized-symbols" + +namespace flangomp { +#define GEN_PASS_DEF_MAPSFORPRIVATIZEDSYMBOLSPASS +#include "flang/Optimizer/OpenMP/Passes.h.inc" +} // namespace flangomp +using namespace mlir; +namespace { +class MapsForPrivatizedSymbolsPass + : public flangomp::impl::MapsForPrivatizedSymbolsPassBase< + MapsForPrivatizedSymbolsPass> { + + bool privatizerNeedsMap(omp::PrivateClauseOp &privatizer) { + Region &allocRegion = privatizer.getAllocRegion(); + Value blockArg0 = allocRegion.getArgument(0); + if (blockArg0.use_empty()) + return false; + return true; + } + omp::MapInfoOp createMapInfo(Location loc, Value var, + fir::FirOpBuilder &builder) { + uint64_t mapTypeTo = static_cast< + std::underlying_type_t>( + llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_TO); + Operation *definingOp = var.getDefiningOp(); + auto declOp = llvm::dyn_cast_or_null(definingOp); + assert(declOp && + "Expected defining Op of privatized var to be hlfir.declare"); + + // We want the first result of the hlfir.declare op because our goal + // is to map the descriptor (fir.box or fir.boxchar) and the first + // result for hlfir.declare is the descriptor if a the symbol being + // decalred needs a descriptor. + Value varPtr = declOp.getBase(); + + // If we do not have a reference to descritor, but the descriptor itself + // then we need to store that on the stack so that we can map the + // address of the descriptor. + if (mlir::isa(varPtr.getType()) || + mlir::isa(varPtr.getType())) { + OpBuilder::InsertPoint savedInsPoint = builder.saveInsertionPoint(); + mlir::Block *allocaBlock = builder.getAllocaBlock(); + assert(allocaBlock && "No allocablock found for a funcOp"); + builder.setInsertionPointToStart(allocaBlock); + auto alloca = builder.create(loc, varPtr.getType()); + builder.restoreInsertionPoint(savedInsPoint); + builder.create(loc, varPtr, alloca); + varPtr = alloca; + } + return builder.create( + loc, varPtr.getType(), varPtr, + TypeAttr::get(llvm::cast(varPtr.getType()) + .getElementType()), + /*varPtrPtr=*/Value{}, + /*members=*/SmallVector{}, + /*member_index=*/DenseIntElementsAttr{}, + /*bounds=*/ValueRange{}, + builder.getIntegerAttr(builder.getIntegerType(64, /*isSigned=*/false), + mapTypeTo), + builder.getAttr( + omp::VariableCaptureKind::ByRef), + StringAttr(), builder.getBoolAttr(false)); + } + void addMapInfoOp(omp::TargetOp targetOp, omp::MapInfoOp mapInfoOp) { + auto argIface = llvm::cast(*targetOp); + unsigned insertIndex = + argIface.getMapBlockArgsStart() + argIface.numMapBlockArgs(); + targetOp.getMapVarsMutable().append(ValueRange{mapInfoOp}); + targetOp.getRegion().insertArgument(insertIndex, mapInfoOp.getType(), + mapInfoOp.getLoc()); + } + void addMapInfoOps(omp::TargetOp targetOp, + llvm::SmallVectorImpl &mapInfoOps) { + for (auto mapInfoOp : mapInfoOps) + addMapInfoOp(targetOp, mapInfoOp); + } + void runOnOperation() override { + ModuleOp module = getOperation()->getParentOfType(); + fir::KindMapping kindMap = fir::getKindMapping(module); + fir::FirOpBuilder builder{module, std::move(kindMap)}; + llvm::DenseMap> + mapInfoOpsForTarget; + + getOperation()->walk([&](omp::TargetOp targetOp) { + if (targetOp.getPrivateVars().empty()) + return; + OperandRange privVars = targetOp.getPrivateVars(); + std::optional privSyms = targetOp.getPrivateSyms(); + SmallVector mapInfoOps; + for (auto [privVar, privSym] : llvm::zip_equal(privVars, *privSyms)) { + + SymbolRefAttr privatizerName = llvm::cast(privSym); + omp::PrivateClauseOp privatizer = + SymbolTable::lookupNearestSymbolFrom( + targetOp, privatizerName); + if (!privatizerNeedsMap(privatizer)) { + continue; + } + builder.setInsertionPoint(targetOp); + Location loc = targetOp.getLoc(); + omp::MapInfoOp mapInfoOp = createMapInfo(loc, privVar, builder); + mapInfoOps.push_back(mapInfoOp); + LLVM_DEBUG(llvm::dbgs() << "MapsForPrivatizedSymbolsPass created ->\n"); + LLVM_DEBUG(mapInfoOp.dump()); + } + if (!mapInfoOps.empty()) { + mapInfoOpsForTarget.insert({targetOp.getOperation(), mapInfoOps}); + } + }); + if (!mapInfoOpsForTarget.empty()) { + for (auto &[targetOp, mapInfoOps] : mapInfoOpsForTarget) { + addMapInfoOps(static_cast(targetOp), mapInfoOps); + } + } + } +}; +} // namespace diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp index 3fa5c54403bd8c..3c139f7e93405c 100644 --- a/flang/lib/Optimizer/Passes/Pipelines.cpp +++ b/flang/lib/Optimizer/Passes/Pipelines.cpp @@ -243,6 +243,7 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, /// rather than the host device. void createOpenMPFIRPassPipeline(mlir::PassManager &pm, bool isTargetDevice) { pm.addPass(flangomp::createMapInfoFinalizationPass()); + pm.addPass(flangomp::createMapsForPrivatizedSymbolsPass()); pm.addPass(flangomp::createMarkDeclareTargetPass()); if (isTargetDevice) pm.addPass(flangomp::createFunctionFilteringPass()); diff --git a/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp index 3db24226e75042..f260437e710417 100644 --- a/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp +++ b/flang/lib/Optimizer/Transforms/CUFAddConstructor.cpp @@ -62,12 +62,15 @@ struct CUFAddConstructor // Register kernels auto gpuMod = symTab.lookup(cudaModName); if (gpuMod) { + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(ctx); + auto registeredMod = builder.create( + loc, llvmPtrTy, mlir::SymbolRefAttr::get(ctx, gpuMod.getName())); for (auto func : gpuMod.getOps()) { if (func.isKernel()) { auto kernelName = mlir::SymbolRefAttr::get( builder.getStringAttr(cudaModName), {mlir::SymbolRefAttr::get(builder.getContext(), func.getName())}); - builder.create(loc, kernelName); + builder.create(loc, kernelName, registeredMod); } } } diff --git a/flang/lib/Optimizer/Transforms/CufOpConversion.cpp b/flang/lib/Optimizer/Transforms/CufOpConversion.cpp index 91ef1259332de9..629f0c69f8cb5d 100644 --- a/flang/lib/Optimizer/Transforms/CufOpConversion.cpp +++ b/flang/lib/Optimizer/Transforms/CufOpConversion.cpp @@ -20,6 +20,7 @@ #include "flang/Runtime/CUDA/descriptor.h" #include "flang/Runtime/CUDA/memory.h" #include "flang/Runtime/allocatable.h" +#include "mlir/Dialect/GPU/IR/GPUDialect.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -77,6 +78,69 @@ static bool hasDoubleDescriptors(OpTy op) { return false; } +static mlir::Value createConvertOp(mlir::PatternRewriter &rewriter, + mlir::Location loc, mlir::Type toTy, + mlir::Value val) { + if (val.getType() != toTy) + return rewriter.create(loc, toTy, val); + return val; +} + +mlir::Value getDeviceAddress(mlir::PatternRewriter &rewriter, + mlir::OpOperand &operand, + const mlir::SymbolTable &symtab) { + mlir::Value v = operand.get(); + auto declareOp = v.getDefiningOp(); + if (!declareOp) + return v; + + auto addrOfOp = declareOp.getMemref().getDefiningOp(); + if (!addrOfOp) + return v; + + auto globalOp = symtab.lookup( + addrOfOp.getSymbol().getRootReference().getValue()); + + if (!globalOp) + return v; + + bool isDevGlobal{false}; + auto attr = globalOp.getDataAttrAttr(); + if (attr) { + switch (attr.getValue()) { + case cuf::DataAttribute::Device: + case cuf::DataAttribute::Managed: + case cuf::DataAttribute::Pinned: + isDevGlobal = true; + break; + default: + break; + } + } + if (!isDevGlobal) + return v; + mlir::OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(operand.getOwner()); + auto loc = declareOp.getLoc(); + auto mod = declareOp->getParentOfType(); + fir::FirOpBuilder builder(rewriter, mod); + + mlir::func::FuncOp callee = + fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = callee.getFunctionType(); + auto toTy = fTy.getInput(0); + mlir::Value inputArg = + createConvertOp(rewriter, loc, toTy, declareOp.getResult()); + mlir::Value sourceFile = fir::factory::locationToFilename(builder, loc); + mlir::Value sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(2)); + llvm::SmallVector args{fir::runtime::createArguments( + builder, loc, fTy, inputArg, sourceFile, sourceLine)}; + auto call = rewriter.create(loc, callee, args); + + return call->getResult(0); +} + template static mlir::LogicalResult convertOpToCall(OpTy op, mlir::PatternRewriter &rewriter, @@ -363,18 +427,14 @@ struct CufFreeOpConversion : public mlir::OpRewritePattern { } }; -static mlir::Value createConvertOp(mlir::PatternRewriter &rewriter, - mlir::Location loc, mlir::Type toTy, - mlir::Value val) { - if (val.getType() != toTy) - return rewriter.create(loc, toTy, val); - return val; -} - struct CufDataTransferOpConversion : public mlir::OpRewritePattern { using OpRewritePattern::OpRewritePattern; + CufDataTransferOpConversion(mlir::MLIRContext *context, + const mlir::SymbolTable &symtab) + : OpRewritePattern(context), symtab{symtab} {} + mlir::LogicalResult matchAndRewrite(cuf::DataTransferOp op, mlir::PatternRewriter &rewriter) const override { @@ -445,9 +505,11 @@ struct CufDataTransferOpConversion mlir::Value sourceLine = fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); - llvm::SmallVector args{fir::runtime::createArguments( - builder, loc, fTy, op.getDst(), op.getSrc(), bytes, modeValue, - sourceFile, sourceLine)}; + mlir::Value dst = getDeviceAddress(rewriter, op.getDstMutable(), symtab); + mlir::Value src = getDeviceAddress(rewriter, op.getSrcMutable(), symtab); + llvm::SmallVector args{ + fir::runtime::createArguments(builder, loc, fTy, dst, src, bytes, + modeValue, sourceFile, sourceLine)}; builder.create(loc, func, args); rewriter.eraseOp(op); return mlir::success(); @@ -552,6 +614,9 @@ struct CufDataTransferOpConversion } return mlir::success(); } + +private: + const mlir::SymbolTable &symtab; }; class CufOpConversion : public fir::impl::CufOpConversionBase { @@ -565,13 +630,15 @@ class CufOpConversion : public fir::impl::CufOpConversionBase { mlir::ModuleOp module = mlir::dyn_cast(op); if (!module) return signalPassFailure(); + mlir::SymbolTable symtab(module); std::optional dl = fir::support::getOrSetDataLayout(module, /*allowDefaultLayout=*/false); fir::LLVMTypeConverter typeConverter(module, /*applyTBAA=*/false, /*forceUnifiedTBAATree=*/false, *dl); target.addLegalDialect(); - cuf::populateCUFToFIRConversionPatterns(typeConverter, *dl, patterns); + cuf::populateCUFToFIRConversionPatterns(typeConverter, *dl, symtab, + patterns); if (mlir::failed(mlir::applyPartialConversion(getOperation(), target, std::move(patterns)))) { mlir::emitError(mlir::UnknownLoc::get(ctx), @@ -584,9 +651,9 @@ class CufOpConversion : public fir::impl::CufOpConversionBase { void cuf::populateCUFToFIRConversionPatterns( const fir::LLVMTypeConverter &converter, mlir::DataLayout &dl, - mlir::RewritePatternSet &patterns) { + const mlir::SymbolTable &symtab, mlir::RewritePatternSet &patterns) { patterns.insert(patterns.getContext(), &dl, &converter); patterns.insert( - patterns.getContext()); + CufFreeOpConversion>(patterns.getContext()); + patterns.insert(patterns.getContext(), symtab); } diff --git a/flang/runtime/CUDA/CMakeLists.txt b/flang/runtime/CUDA/CMakeLists.txt index 193dd77e934558..86523b419f8711 100644 --- a/flang/runtime/CUDA/CMakeLists.txt +++ b/flang/runtime/CUDA/CMakeLists.txt @@ -18,6 +18,7 @@ add_flang_library(${CUFRT_LIBNAME} allocatable.cpp descriptor.cpp memory.cpp + registration.cpp ) if (BUILD_SHARED_LIBS) diff --git a/flang/runtime/CUDA/registration.cpp b/flang/runtime/CUDA/registration.cpp new file mode 100644 index 00000000000000..aed275e964680e --- /dev/null +++ b/flang/runtime/CUDA/registration.cpp @@ -0,0 +1,31 @@ +//===-- runtime/CUDA/registration.cpp -------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Runtime/CUDA/registration.h" + +#include "cuda_runtime.h" + +namespace Fortran::runtime::cuda { + +extern "C" { + +extern void **__cudaRegisterFatBinary(void *data); +extern void __cudaRegisterFunction(void **fatCubinHandle, const char *hostFun, + char *deviceFun, const char *deviceName, int thread_limit, uint3 *tid, + uint3 *bid, dim3 *bDim, dim3 *gDim, int *wSize); + +void *RTDECL(CUFRegisterModule)(void *data) { + return __cudaRegisterFatBinary(data); +} + +void RTDEF(CUFRegisterFunction)(void **module, const char *fct) { + __cudaRegisterFunction(module, fct, const_cast(fct), fct, -1, + (uint3 *)0, (uint3 *)0, (dim3 *)0, (dim3 *)0, (int *)0); +} +} +} // namespace Fortran::runtime::cuda diff --git a/flang/test/Fir/CUDA/cuda-data-transfer.fir b/flang/test/Fir/CUDA/cuda-data-transfer.fir index ed894aed5534a0..c33c50115b9fc0 100644 --- a/flang/test/Fir/CUDA/cuda-data-transfer.fir +++ b/flang/test/Fir/CUDA/cuda-data-transfer.fir @@ -189,4 +189,47 @@ func.func @_QPsub7() { // CHECK: %[[SRC:.*]] = fir.convert %[[IHOST]]#0 : (!fir.ref>) -> !fir.llvm_ptr // CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %[[BYTES]], %c0{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none +fir.global @_QMmtestsEn(dense<[3, 4, 5, 6, 7]> : tensor<5xi32>) {data_attr = #cuf.cuda} : !fir.array<5xi32> +func.func @_QPsub8() attributes {fir.bindc_name = "t"} { + %c5 = arith.constant 5 : index + %0 = fir.alloca !fir.array<5xi32> {bindc_name = "m", uniq_name = "_QFEm"} + %1 = fir.shape %c5 : (index) -> !fir.shape<1> + %2 = fir.declare %0(%1) {uniq_name = "_QFEm"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> + %3 = fir.address_of(@_QMmtestsEn) : !fir.ref> + %4 = fir.declare %3(%1) {data_attr = #cuf.cuda, uniq_name = "_QMmtestsEn"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> + cuf.data_transfer %4 to %2 {transfer_kind = #cuf.cuda_transfer} : !fir.ref>, !fir.ref> + return +} + +// CHECK-LABEL: func.func @_QPsub8() +// CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<5xi32> +// CHECK: %[[LOCAL:.*]] = fir.declare %[[ALLOCA]] +// CHECK: %[[GBL:.*]] = fir.address_of(@_QMmtestsEn) : !fir.ref> +// CHECK: %[[DECL:.*]] = fir.declare %[[GBL]] +// CHECK: %[[HOST:.*]] = fir.convert %[[DECL]] : (!fir.ref>) -> !fir.llvm_ptr +// CHECK: %[[SRC:.*]] = fir.call @_FortranACUFGetDeviceAddress(%[[HOST]], %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.ref, i32) -> !fir.llvm_ptr +// CHECK: %[[DST:.*]] = fir.convert %[[LOCAL]] : (!fir.ref>) -> !fir.llvm_ptr +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none + + +func.func @_QPsub9() { + %c5 = arith.constant 5 : index + %0 = fir.alloca !fir.array<5xi32> {bindc_name = "m", uniq_name = "_QFtest9Em"} + %1 = fir.shape %c5 : (index) -> !fir.shape<1> + %2 = fir.declare %0(%1) {uniq_name = "_QFtest9Em"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> + %3 = fir.address_of(@_QMmtestsEn) : !fir.ref> + %4 = fir.declare %3(%1) {data_attr = #cuf.cuda, uniq_name = "_QMmtestsEn"} : (!fir.ref>, !fir.shape<1>) -> !fir.ref> + cuf.data_transfer %2 to %4 {transfer_kind = #cuf.cuda_transfer} : !fir.ref>, !fir.ref> + return +} + +// CHECK-LABEL: func.func @_QPsub9() +// CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<5xi32> +// CHECK: %[[LOCAL:.*]] = fir.declare %[[ALLOCA]] +// CHECK: %[[GBL:.*]] = fir.address_of(@_QMmtestsEn) : !fir.ref> +// CHECK: %[[DECL:.*]] = fir.declare %[[GBL]] +// CHECK: %[[HOST:.*]] = fir.convert %[[DECL]] : (!fir.ref>) -> !fir.llvm_ptr +// CHECK: %[[DST:.*]] = fir.call @_FortranACUFGetDeviceAddress(%[[HOST]], %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.ref, i32) -> !fir.llvm_ptr +// CHECK: %[[SRC:.*]] = fir.convert %[[LOCAL]] : (!fir.ref>) -> !fir.llvm_ptr +// CHECK: fir.call @_FortranACUFDataTransferPtrPtr(%[[DST]], %[[SRC]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) : (!fir.llvm_ptr, !fir.llvm_ptr, i64, i32, !fir.ref, i32) -> none } // end of module diff --git a/flang/test/Fir/CUDA/cuda-register-func.fir b/flang/test/Fir/CUDA/cuda-register-func.fir index 277475f0883dcc..6b0cbfd3aca63d 100644 --- a/flang/test/Fir/CUDA/cuda-register-func.fir +++ b/flang/test/Fir/CUDA/cuda-register-func.fir @@ -12,5 +12,6 @@ module attributes {gpu.container_module} { } // CHECK-LABEL: llvm.func internal @__cudaFortranConstructor() -// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device1 -// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device2 +// CHECK: %[[MOD_HANDLE:.*]] = cuf.register_module @cuda_device_mod -> !llvm.ptr +// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device1(%[[MOD_HANDLE]] : !llvm.ptr) +// CHECK: cuf.register_kernel @cuda_device_mod::@_QPsub_device2(%[[MOD_HANDLE]] : !llvm.ptr) diff --git a/flang/test/Fir/cuf-invalid.fir b/flang/test/Fir/cuf-invalid.fir index 8a1eb48576832c..a3b9be3ee8223b 100644 --- a/flang/test/Fir/cuf-invalid.fir +++ b/flang/test/Fir/cuf-invalid.fir @@ -135,8 +135,9 @@ module attributes {gpu.container_module} { } } llvm.func internal @__cudaFortranConstructor() { + %0 = cuf.register_module @cuda_device_mod -> !llvm.ptr // expected-error@+1{{'cuf.register_kernel' op only kernel gpu.func can be registered}} - cuf.register_kernel @cuda_device_mod::@_QPsub_device1 + cuf.register_kernel @cuda_device_mod::@_QPsub_device1(%0 : !llvm.ptr) llvm.return } } @@ -150,8 +151,9 @@ module attributes {gpu.container_module} { } } llvm.func internal @__cudaFortranConstructor() { + %0 = cuf.register_module @cuda_device_mod -> !llvm.ptr // expected-error@+1{{'cuf.register_kernel' op device function not found}} - cuf.register_kernel @cuda_device_mod::@_QPsub_device2 + cuf.register_kernel @cuda_device_mod::@_QPsub_device2(%0 : !llvm.ptr) llvm.return } } @@ -160,8 +162,9 @@ module attributes {gpu.container_module} { module attributes {gpu.container_module} { llvm.func internal @__cudaFortranConstructor() { + %0 = cuf.register_module @cuda_device_mod -> !llvm.ptr // expected-error@+1{{'cuf.register_kernel' op gpu module not found}} - cuf.register_kernel @cuda_device_mod::@_QPsub_device1 + cuf.register_kernel @cuda_device_mod::@_QPsub_device1(%0 : !llvm.ptr) llvm.return } } @@ -170,8 +173,9 @@ module attributes {gpu.container_module} { module attributes {gpu.container_module} { llvm.func internal @__cudaFortranConstructor() { + %0 = cuf.register_module @cuda_device_mod -> !llvm.ptr // expected-error@+1{{'cuf.register_kernel' op expect a module and a kernel name}} - cuf.register_kernel @_QPsub_device1 + cuf.register_kernel @_QPsub_device1(%0 : !llvm.ptr) llvm.return } } @@ -185,8 +189,9 @@ module attributes {gpu.container_module} { } } llvm.func internal @__cudaFortranConstructor() { + %0 = cuf.register_module @cuda_device_mod -> !llvm.ptr // expected-error@+1{{'cuf.register_kernel' op only gpu.kernel llvm.func can be registered}} - cuf.register_kernel @cuda_device_mod::@_QPsub_device1 + cuf.register_kernel @cuda_device_mod::@_QPsub_device1(%0 : !llvm.ptr) llvm.return } } diff --git a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 index a27de1152ce17a..e11525c569ffb8 100644 --- a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 +++ b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-allocatable.f90 @@ -18,22 +18,22 @@ end subroutine target_allocatable ! CHECK-SAME: @[[VAR_PRIVATIZER_SYM:.*]] : ! CHECK-SAME: [[TYPE:!fir.ref>>]] alloc { ! CHECK: ^bb0(%[[PRIV_ARG:.*]]: [[TYPE]]): -! CHECK: %[[PRIV_ALLOC:.*]] = fir.alloca !fir.box> {bindc_name = "alloc_var", {{.*}}} +! CHECK: %[[PRIV_ALLOC:.*]] = fir.alloca [[DESC_TYPE:!fir.box>]] {bindc_name = "alloc_var", {{.*}}} -! CHECK-NEXT: %[[PRIV_ARG_VAL:.*]] = fir.load %[[PRIV_ARG]] : !fir.ref>> -! CHECK-NEXT: %[[PRIV_ARG_BOX:.*]] = fir.box_addr %[[PRIV_ARG_VAL]] : (!fir.box>) -> !fir.heap +! CHECK-NEXT: %[[PRIV_ARG_VAL:.*]] = fir.load %[[PRIV_ARG]] : [[TYPE]] +! CHECK-NEXT: %[[PRIV_ARG_BOX:.*]] = fir.box_addr %[[PRIV_ARG_VAL]] : ([[DESC_TYPE]]) -> !fir.heap ! CHECK-NEXT: %[[PRIV_ARG_ADDR:.*]] = fir.convert %[[PRIV_ARG_BOX]] : (!fir.heap) -> i64 ! CHECK-NEXT: %[[C0:.*]] = arith.constant 0 : i64 ! CHECK-NEXT: %[[ALLOC_COND:.*]] = arith.cmpi ne, %[[PRIV_ARG_ADDR]], %[[C0]] : i64 ! CHECK-NEXT: fir.if %[[ALLOC_COND]] { ! CHECK: %[[PRIV_ALLOCMEM:.*]] = fir.allocmem i32 {fir.must_be_heap = true, {{.*}}} -! CHECK-NEXT: %[[PRIV_ALLOCMEM_BOX:.*]] = fir.embox %[[PRIV_ALLOCMEM]] : (!fir.heap) -> !fir.box> -! CHECK-NEXT: fir.store %[[PRIV_ALLOCMEM_BOX]] to %[[PRIV_ALLOC]] : !fir.ref>> +! CHECK-NEXT: %[[PRIV_ALLOCMEM_BOX:.*]] = fir.embox %[[PRIV_ALLOCMEM]] : (!fir.heap) -> [[DESC_TYPE]] +! CHECK-NEXT: fir.store %[[PRIV_ALLOCMEM_BOX]] to %[[PRIV_ALLOC]] : [[TYPE]] ! CHECK-NEXT: } else { ! CHECK-NEXT: %[[ZERO_BITS:.*]] = fir.zero_bits !fir.heap -! CHECK-NEXT: %[[ZERO_BOX:.*]] = fir.embox %[[ZERO_BITS]] : (!fir.heap) -> !fir.box> -! CHECK-NEXT: fir.store %[[ZERO_BOX]] to %[[PRIV_ALLOC]] : !fir.ref>> +! CHECK-NEXT: %[[ZERO_BOX:.*]] = fir.embox %[[ZERO_BITS]] : (!fir.heap) -> [[DESC_TYPE]] +! CHECK-NEXT: fir.store %[[ZERO_BOX]] to %[[PRIV_ALLOC]] : [[TYPE]] ! CHECK-NEXT: } ! CHECK-NEXT: %[[PRIV_DECL:.*]]:2 = hlfir.declare %[[PRIV_ALLOC]] @@ -63,9 +63,11 @@ end subroutine target_allocatable ! CHECK-LABEL: func.func @_QPtarget_allocatable() { -! CHECK: %[[VAR_ALLOC:.*]] = fir.alloca !fir.box> +! CHECK: %[[VAR_ALLOC:.*]] = fir.alloca [[DESC_TYPE]] ! CHECK-SAME: {bindc_name = "alloc_var", {{.*}}} ! CHECK: %[[VAR_DECL:.*]]:2 = hlfir.declare %[[VAR_ALLOC]] -! CHECK: omp.target private( +! CHECK: %[[MAP_VAR:.*]] = omp.map.info var_ptr(%[[VAR_DECL]]#0 : [[TYPE]], [[DESC_TYPE]]) +! CHECK-SAME: map_clauses(to) capture(ByRef) -> [[TYPE]] +! CHECK: omp.target map_entries(%[[MAP_VAR]] -> %arg0 : [[TYPE]]) private( ! CHECK-SAME: @[[VAR_PRIVATIZER_SYM]] %[[VAR_DECL]]#0 -> %{{.*}} : [[TYPE]]) { diff --git a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-multiple-variables.f90 b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-multiple-variables.f90 index ce98f518581a45..b0c76ff3845f83 100644 --- a/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-multiple-variables.f90 +++ b/flang/test/Lower/OpenMP/DelayedPrivatization/target-private-multiple-variables.f90 @@ -147,12 +147,29 @@ end subroutine target_allocatable ! CHECK-NEXT: } ! CHECK: func.func @_QPtarget_allocatable +! CHECK: %[[CHAR_VAR_DESC_ALLOCA:.*]] = fir.alloca !fir.boxchar<1> +! CHECK: %[[REAL_ARR_DESC_ALLOCA:.*]] = fir.alloca !fir.box> +! CHECK: %[[ALLOC_VAR_ALLOCA:.*]] = fir.alloca !fir.box> {bindc_name = "alloc_var", {{.*}}} +! CHECK: %[[ALLOC_VAR_DECL:.*]]:2 = hlfir.declare %[[ALLOC_VAR_ALLOCA]] ! CHECK: %[[MAPPED_ALLOC:.*]] = fir.alloca i32 {bindc_name = "mapped_var", {{.*}}} ! CHECK-NEXT: %[[MAPPED_DECL:.*]]:2 = hlfir.declare %[[MAPPED_ALLOC]] -! CHECK: %[[MAPPED_MI:.*]] = omp.map.info var_ptr(%[[MAPPED_DECL]]#1 : !fir.ref, i32) - +! CHECK: %[[CHAR_VAR_ALLOC:.*]] = fir.alloca !fir.char<1,?>{{.*}} {bindc_name = "char_var", {{.*}}} +! CHECK: %[[CHAR_VAR_DECL:.*]]:2 = hlfir.declare %[[CHAR_VAR_ALLOC]] typeparams +! CHECK: %[[REAL_ARR_ALLOC:.*]] = fir.alloca !fir.array, {{.*}} {bindc_name = "real_arr", {{.*}}} +! CHECK: %[[REAL_ARR_DECL:.*]]:2 = hlfir.declare %[[REAL_ARR_ALLOC]]({{.*}}) +! CHECK: %[[MAPPED_MI0:.*]] = omp.map.info var_ptr(%[[MAPPED_DECL]]#1 : !fir.ref, i32) {{.*}} +! CHECK: %[[ALLOC_VAR_MAP:.*]] = omp.map.info var_ptr(%[[ALLOC_VAR_DECL]]#0 : !fir.ref>>, !fir.box>) +! CHECK: fir.store %[[REAL_ARR_DECL]]#0 to %[[REAL_ARR_DESC_ALLOCA]] : !fir.ref>> +! CHECK: %[[REAL_ARR_DESC_MAP:.*]] = omp.map.info var_ptr(%[[REAL_ARR_DESC_ALLOCA]] : !fir.ref>>, !fir.box>) +! CHECK: fir.store %[[CHAR_VAR_DECL]]#0 to %[[CHAR_VAR_DESC_ALLOCA]] : !fir.ref> +! CHECK: %[[CHAR_VAR_DESC_MAP:.*]] = omp.map.info var_ptr(%[[CHAR_VAR_DESC_ALLOCA]] : !fir.ref>, !fir.boxchar<1>) ! CHECK: omp.target -! CHECK-SAME: map_entries(%[[MAPPED_MI]] -> %[[MAPPED_ARG:.*]] : !fir.ref) +! CHECK-SAME: map_entries( +! CHECK-SAME: %[[MAPPED_MI0]] -> %[[MAPPED_ARG0:[^,]+]], +! CHECK-SAME: %[[ALLOC_VAR_MAP]] -> %[[MAPPED_ARG1:[^,]+]] +! CHECK-SAME %[[REAL_ARR_DESC_MAP]] -> %[[MAPPED_ARG2:[^,]+]] +! CHECK_SAME %[[CHAR_VAR_DESC_MAP]] -> %[[MAPPED_ARG3:.[^,]+]] : +! CHECK-SAME !fir.ref, !fir.ref>>, !fir.ref>>, !fir.ref>) ! CHECK-SAME: private( ! CHECK-SAME: @[[ALLOC_PRIVATIZER_SYM]] %{{[^[:space:]]+}}#0 -> %[[ALLOC_ARG:[^,]+]], ! CHECK-SAME: @[[REAL_PRIVATIZER_SYM]] %{{[^[:space:]]+}}#0 -> %[[REAL_ARG:[^,]+]], @@ -162,7 +179,6 @@ end subroutine target_allocatable ! CHECK-SAME: @[[CHAR_PRIVATIZER_SYM]] %{{[^[:space:]]+}}#0 -> %[[CHAR_ARG:[^,]+]] : ! CHECK-SAME: !fir.ref>>, !fir.ref, !fir.ref, !fir.box>, !fir.ref>, !fir.boxchar<1>) { ! CHECK-NOT: fir.alloca -! CHECK: hlfir.declare %[[MAPPED_ARG]] ! CHECK: hlfir.declare %[[ALLOC_ARG]] ! CHECK: hlfir.declare %[[REAL_ARG]] ! CHECK: hlfir.declare %[[LB_ARG]] diff --git a/flang/test/Transforms/omp-maps-for-privatized-symbols.fir b/flang/test/Transforms/omp-maps-for-privatized-symbols.fir new file mode 100644 index 00000000000000..d32444aaabf237 --- /dev/null +++ b/flang/test/Transforms/omp-maps-for-privatized-symbols.fir @@ -0,0 +1,48 @@ +// RUN: fir-opt --split-input-file --omp-maps-for-privatized-symbols %s | FileCheck %s +module attributes {omp.is_target_device = false} { + omp.private {type = private} @_QFtarget_simpleEsimple_var_private_ref_box_heap_i32 : !fir.ref>> alloc { + ^bb0(%arg0: !fir.ref>>): + %0 = fir.alloca !fir.box> {bindc_name = "simple_var", pinned, uniq_name = "_QFtarget_simpleEsimple_var"} + %1 = fir.load %arg0 : !fir.ref>> + %5:2 = hlfir.declare %0 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtarget_simpleEsimple_var"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + omp.yield(%5#0 : !fir.ref>>) + } + func.func @_QPtarget_simple() { + %0 = fir.alloca i32 {bindc_name = "a", uniq_name = "_QFtarget_simpleEa"} + %1:2 = hlfir.declare %0 {uniq_name = "_QFtarget_simpleEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) + %2 = fir.alloca !fir.box> {bindc_name = "simple_var", uniq_name = "_QFtarget_simpleEsimple_var"} + %3 = fir.zero_bits !fir.heap + %4 = fir.embox %3 : (!fir.heap) -> !fir.box> + fir.store %4 to %2 : !fir.ref>> + %5:2 = hlfir.declare %2 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtarget_simpleEsimple_var"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + %c2_i32 = arith.constant 2 : i32 + hlfir.assign %c2_i32 to %1#0 : i32, !fir.ref + %6 = omp.map.info var_ptr(%1#1 : !fir.ref, i32) map_clauses(to) capture(ByRef) -> !fir.ref {name = "a"} + omp.target map_entries(%6 -> %arg0 : !fir.ref) private(@_QFtarget_simpleEsimple_var_private_ref_box_heap_i32 %5#0 -> %arg1 : !fir.ref>>) { + %11:2 = hlfir.declare %arg0 {uniq_name = "_QFtarget_simpleEa"} : (!fir.ref) -> (!fir.ref, !fir.ref) + %12:2 = hlfir.declare %arg1 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtarget_simpleEsimple_var"} : (!fir.ref>>) -> (!fir.ref>>, !fir.ref>>) + %c10_i32 = arith.constant 10 : i32 + %13 = fir.load %11#0 : !fir.ref + %14 = arith.addi %c10_i32, %13 : i32 + hlfir.assign %14 to %12#0 realloc : i32, !fir.ref>> + omp.terminator + } + %7 = fir.load %5#1 : !fir.ref>> + %8 = fir.box_addr %7 : (!fir.box>) -> !fir.heap + %9 = fir.convert %8 : (!fir.heap) -> i64 + %c0_i64 = arith.constant 0 : i64 + %10 = arith.cmpi ne, %9, %c0_i64 : i64 + fir.if %10 { + %11 = fir.load %5#1 : !fir.ref>> + %12 = fir.box_addr %11 : (!fir.box>) -> !fir.heap + fir.freemem %12 : !fir.heap + %13 = fir.zero_bits !fir.heap + %14 = fir.embox %13 : (!fir.heap) -> !fir.box> + fir.store %14 to %5#1 : !fir.ref>> + } + return + } +} +// CHECK: %[[MAP0:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref, i32) map_clauses(to) capture(ByRef) -> !fir.ref {name = "a"} +// CHECK: %[[MAP1:.*]] = omp.map.info var_ptr({{.*}} : !fir.ref>>, !fir.box>) map_clauses(to) capture(ByRef) -> !fir.ref>> +// CHECK: omp.target map_entries(%[[MAP0]] -> %arg0, %[[MAP1]] -> %arg1 : !fir.ref, !fir.ref>>) diff --git a/libc/config/gpu/entrypoints.txt b/libc/config/gpu/entrypoints.txt index 2cc54e8a4b970c..38e9f2e685caed 100644 --- a/libc/config/gpu/entrypoints.txt +++ b/libc/config/gpu/entrypoints.txt @@ -567,6 +567,7 @@ if(LIBC_TYPES_HAS_FLOAT16) libc.src.math.llogbf16 libc.src.math.llrintf16 libc.src.math.llroundf16 + libc.src.math.log10f16 libc.src.math.log2f16 libc.src.math.logbf16 libc.src.math.logf16 @@ -589,6 +590,7 @@ if(LIBC_TYPES_HAS_FLOAT16) libc.src.math.setpayloadf16 libc.src.math.setpayloadsigf16 libc.src.math.sinhf16 + libc.src.math.sqrtf16 libc.src.math.tanhf16 libc.src.math.totalorderf16 libc.src.math.totalordermagf16 diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt index 85bb5df358ec3a..b3f94a581c8ad9 100644 --- a/libc/config/linux/aarch64/entrypoints.txt +++ b/libc/config/linux/aarch64/entrypoints.txt @@ -681,6 +681,7 @@ if(LIBC_TYPES_HAS_FLOAT16) libc.src.math.setpayloadf16 libc.src.math.setpayloadsigf16 libc.src.math.sinpif16 + libc.src.math.sqrtf16 libc.src.math.totalorderf16 libc.src.math.totalordermagf16 libc.src.math.truncf16 diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index f40d752840b85a..a2fb97d04584d5 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -661,6 +661,7 @@ if(LIBC_TYPES_HAS_FLOAT16) libc.src.math.llogbf16 libc.src.math.llrintf16 libc.src.math.llroundf16 + libc.src.math.log10f16 libc.src.math.log2f16 libc.src.math.logbf16 libc.src.math.logf16 @@ -684,6 +685,7 @@ if(LIBC_TYPES_HAS_FLOAT16) libc.src.math.setpayloadsigf16 libc.src.math.sinhf16 libc.src.math.sinpif16 + libc.src.math.sqrtf16 libc.src.math.tanhf16 libc.src.math.totalorderf16 libc.src.math.totalordermagf16 diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst index 84ed0e4135eba6..a50e054622e1a4 100644 --- a/libc/docs/math/index.rst +++ b/libc/docs/math/index.rst @@ -312,7 +312,7 @@ Higher Math Functions +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | log | |check| | |check| | | |check| | | 7.12.6.11 | F.10.3.11 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ -| log10 | |check| | |check| | | | | 7.12.6.12 | F.10.3.12 | +| log10 | |check| | |check| | | |check| | | 7.12.6.12 | F.10.3.12 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | log10p1 | | | | | | 7.12.6.13 | F.10.3.13 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ @@ -344,7 +344,7 @@ Higher Math Functions +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | sinpi | |check| | | | |check| | | 7.12.4.13 | F.10.1.13 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ -| sqrt | |check| | |check| | |check| | | |check| | 7.12.7.10 | F.10.4.10 | +| sqrt | |check| | |check| | |check| | |check| | |check| | 7.12.7.10 | F.10.4.10 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ | tan | |check| | |check| | | | | 7.12.4.7 | F.10.1.7 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+ diff --git a/libc/hdr/stdio_overlay.h b/libc/hdr/stdio_overlay.h index cec55abfde7bf6..aef8c448fe49d4 100644 --- a/libc/hdr/stdio_overlay.h +++ b/libc/hdr/stdio_overlay.h @@ -27,6 +27,17 @@ #undef _FORTIFY_SOURCE #endif +#ifdef __USE_EXTERN_INLINES +#define LIBC_OLD_USE_EXTERN_INLINES +#undef __USE_EXTERN_INLINES +#endif + +#ifdef __USE_FORTIFY_LEVEL +#define LIBC_OLD_USE_FORTIFY_LEVEL __USE_FORTIFY_LEVEL +#undef __USE_FORTIFY_LEVEL +#define __USE_FORTIFY_LEVEL 0 +#endif + #ifndef __NO_INLINE__ #define __NO_INLINE__ 1 #define LIBC_SET_NO_INLINE @@ -44,4 +55,15 @@ #undef LIBC_SET_NO_INLINE #endif +#ifdef LIBC_OLD_USE_FORTIFY_LEVEL +#undef __USE_FORTIFY_LEVEL +#define __USE_FORTIFY_LEVEL LIBC_OLD_USE_FORTIFY_LEVEL +#undef LIBC_OLD_USE_FORTIFY_LEVEL +#endif + +#ifdef LIBC_OLD_USE_EXTERN_INLINES +#define __USE_EXTERN_INLINES +#undef LIBC_OLD_USE_EXTERN_INLINES +#endif + #endif // LLVM_LIBC_HDR_STDIO_OVERLAY_H diff --git a/libc/hdr/wchar_overlay.h b/libc/hdr/wchar_overlay.h index a1de9d5085d47b..99a70899779e7c 100644 --- a/libc/hdr/wchar_overlay.h +++ b/libc/hdr/wchar_overlay.h @@ -32,6 +32,17 @@ #define LIBC_SET_NO_INLINE #endif +#ifdef __USE_EXTERN_INLINES +#define LIBC_OLD_USE_EXTERN_INLINES +#undef __USE_EXTERN_INLINES +#endif + +#ifdef __USE_FORTIFY_LEVEL +#define LIBC_OLD_USE_FORTIFY_LEVEL __USE_FORTIFY_LEVEL +#undef __USE_FORTIFY_LEVEL +#define __USE_FORTIFY_LEVEL 0 +#endif + #include #ifdef LIBC_OLD_FORTIFY_SOURCE @@ -44,4 +55,15 @@ #undef LIBC_SET_NO_INLINE #endif +#ifdef LIBC_OLD_USE_FORTIFY_LEVEL +#undef __USE_FORTIFY_LEVEL +#define __USE_FORTIFY_LEVEL LIBC_OLD_USE_FORTIFY_LEVEL +#undef LIBC_OLD_USE_FORTIFY_LEVEL +#endif + +#ifdef LIBC_OLD_USE_EXTERN_INLINES +#define __USE_EXTERN_INLINES +#undef LIBC_OLD_USE_EXTERN_INLINES +#endif + #endif // LLVM_LIBC_HDR_WCHAR_OVERLAY_H diff --git a/libc/newhdrgen/yaml/math.yaml b/libc/newhdrgen/yaml/math.yaml index e0986f00a3b464..3cc4b599c777bf 100644 --- a/libc/newhdrgen/yaml/math.yaml +++ b/libc/newhdrgen/yaml/math.yaml @@ -213,6 +213,13 @@ functions: arguments: - type: _Float16 guard: LIBC_TYPES_HAS_FLOAT16 + - name: coshf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: ddivl standards: - stdc @@ -273,6 +280,13 @@ functions: return_type: float arguments: - type: float + - name: exp10m1f16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: exp2 standards: - stdc @@ -1564,6 +1578,13 @@ functions: return_type: float arguments: - type: float + - name: log10f16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: log1p standards: - stdc @@ -1588,6 +1609,13 @@ functions: return_type: float arguments: - type: float + - name: log2f16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: logb standards: - stdc @@ -1626,6 +1654,13 @@ functions: return_type: float arguments: - type: float + - name: logf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: lrint standards: - stdc @@ -2304,6 +2339,13 @@ functions: return_type: float arguments: - type: float + - name: sinhf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: sinpif16 standards: - stdc @@ -2330,6 +2372,13 @@ functions: arguments: - type: float128 guard: LIBC_TYPES_HAS_FLOAT128 + - name: sqrtf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: sqrtl standards: - stdc @@ -2354,6 +2403,13 @@ functions: return_type: float arguments: - type: float + - name: tanhf16 + standards: + - stdc + return_type: _Float16 + arguments: + - type: _Float16 + guard: LIBC_TYPES_HAS_FLOAT16 - name: totalorder standards: - stdc diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td index d2a073847503ef..d1ebc6ffb5821e 100644 --- a/libc/spec/stdc.td +++ b/libc/spec/stdc.td @@ -642,6 +642,7 @@ def StdC : StandardSpec<"stdc"> { FunctionSpec<"log10", RetValSpec, [ArgSpec]>, FunctionSpec<"log10f", RetValSpec, [ArgSpec]>, + GuardedFunctionSpec<"log10f16", RetValSpec, [ArgSpec], "LIBC_TYPES_HAS_FLOAT16">, FunctionSpec<"log1p", RetValSpec, [ArgSpec]>, FunctionSpec<"log1pf", RetValSpec, [ArgSpec]>, @@ -753,6 +754,7 @@ def StdC : StandardSpec<"stdc"> { FunctionSpec<"sqrt", RetValSpec, [ArgSpec]>, FunctionSpec<"sqrtf", RetValSpec, [ArgSpec]>, FunctionSpec<"sqrtl", RetValSpec, [ArgSpec]>, + GuardedFunctionSpec<"sqrtf16", RetValSpec, [ArgSpec], "LIBC_TYPES_HAS_FLOAT16">, GuardedFunctionSpec<"sqrtf128", RetValSpec, [ArgSpec], "LIBC_TYPES_HAS_FLOAT128">, FunctionSpec<"trunc", RetValSpec, [ArgSpec]>, diff --git a/libc/src/__support/FPUtil/generic/sqrt.h b/libc/src/__support/FPUtil/generic/sqrt.h index 01af4bb7c90092..497ebd145c6b42 100644 --- a/libc/src/__support/FPUtil/generic/sqrt.h +++ b/libc/src/__support/FPUtil/generic/sqrt.h @@ -139,7 +139,8 @@ sqrt(InType x) { for (InStorageType current_bit = ONE >> 1; current_bit; current_bit >>= 1) { r <<= 1; - InStorageType tmp = (y << 1) + current_bit; // 2*y(n - 1) + 2^(-n-1) + // 2*y(n - 1) + 2^(-n-1) + InStorageType tmp = static_cast((y << 1) + current_bit); if (r >= tmp) { r -= tmp; y += current_bit; diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt index 3836d6562a0748..80c1867d2116f6 100644 --- a/libc/src/math/CMakeLists.txt +++ b/libc/src/math/CMakeLists.txt @@ -335,6 +335,7 @@ add_math_entrypoint_object(ldexpf128) add_math_entrypoint_object(log10) add_math_entrypoint_object(log10f) +add_math_entrypoint_object(log10f16) add_math_entrypoint_object(log1p) add_math_entrypoint_object(log1pf) @@ -492,6 +493,7 @@ add_math_entrypoint_object(sinhf16) add_math_entrypoint_object(sqrt) add_math_entrypoint_object(sqrtf) add_math_entrypoint_object(sqrtl) +add_math_entrypoint_object(sqrtf16) add_math_entrypoint_object(sqrtf128) add_math_entrypoint_object(tan) diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 145c0708796600..9491bd178f13dc 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -2195,6 +2195,28 @@ add_entrypoint_object( -O3 ) +add_entrypoint_object( + log10f16 + SRCS + log10f16.cpp + HDRS + ../log10f16.h + DEPENDS + .expxf16 + libc.hdr.errno_macros + libc.hdr.fenv_macros + libc.src.__support.FPUtil.cast + libc.src.__support.FPUtil.except_value_utils + libc.src.__support.FPUtil.fenv_impl + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.multiply_add + libc.src.__support.FPUtil.polyeval + libc.src.__support.macros.optimization + libc.src.__support.macros.properties.cpu_features + COMPILE_OPTIONS + -O3 +) + add_entrypoint_object( log1p SRCS @@ -3249,6 +3271,18 @@ add_entrypoint_object( -O3 ) +add_entrypoint_object( + sqrtf16 + SRCS + sqrtf16.cpp + HDRS + ../sqrtf16.h + DEPENDS + libc.src.__support.FPUtil.sqrt + COMPILE_OPTIONS + -O3 +) + add_entrypoint_object( sqrtf128 SRCS diff --git a/libc/src/math/generic/expxf16.h b/libc/src/math/generic/expxf16.h index 56ed6ee7cc0056..67bb248307519f 100644 --- a/libc/src/math/generic/expxf16.h +++ b/libc/src/math/generic/expxf16.h @@ -316,6 +316,20 @@ constexpr cpp::array LOG2F_F = { 0x1.d053f6p-1f, 0x1.dc899ap-1f, 0x1.e88c6cp-1f, 0x1.f45e08p-1f, }; +// Generated by Sollya with the following commands: +// > display = hexadecimal; +// > for i from 0 to 31 do print(round(log10(1 + i * 2^-5), SG, RN)); +constexpr cpp::array LOG10F_F = { + 0x0p+0f, 0x1.b5e908p-7f, 0x1.af5f92p-6f, 0x1.3ed11ap-5f, + 0x1.a30a9ep-5f, 0x1.02428cp-4f, 0x1.31b306p-4f, 0x1.5fe804p-4f, + 0x1.8cf184p-4f, 0x1.b8de4ep-4f, 0x1.e3bc1ap-4f, 0x1.06cbd6p-3f, + 0x1.1b3e72p-3f, 0x1.2f3b6ap-3f, 0x1.42c7e8p-3f, 0x1.55e8c6p-3f, + 0x1.68a288p-3f, 0x1.7af974p-3f, 0x1.8cf184p-3f, 0x1.9e8e7cp-3f, + 0x1.afd3e4p-3f, 0x1.c0c514p-3f, 0x1.d1653p-3f, 0x1.e1b734p-3f, + 0x1.f1bdeep-3f, 0x1.00be06p-2f, 0x1.087a08p-2f, 0x1.101432p-2f, + 0x1.178da6p-2f, 0x1.1ee778p-2f, 0x1.2622bp-2f, 0x1.2d404cp-2f, +}; + // Generated by Sollya with the following commands: // > display = hexadecimal; // > for i from 0 to 31 do print(round(1 / (1 + i * 2^-5), SG, RN)); diff --git a/libc/src/math/generic/log10f16.cpp b/libc/src/math/generic/log10f16.cpp new file mode 100644 index 00000000000000..990bcabaf68718 --- /dev/null +++ b/libc/src/math/generic/log10f16.cpp @@ -0,0 +1,164 @@ +//===-- Half-precision log10(x) function ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/log10f16.h" +#include "expxf16.h" +#include "hdr/errno_macros.h" +#include "hdr/fenv_macros.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/PolyEval.h" +#include "src/__support/FPUtil/cast.h" +#include "src/__support/FPUtil/except_value_utils.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/common.h" +#include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" +#include "src/__support/macros/properties/cpu_features.h" + +namespace LIBC_NAMESPACE_DECL { + +#ifdef LIBC_TARGET_CPU_HAS_FMA +static constexpr size_t N_LOG10F16_EXCEPTS = 11; +#else +static constexpr size_t N_LOG10F16_EXCEPTS = 17; +#endif + +static constexpr fputil::ExceptValues + LOG10F16_EXCEPTS = {{ + // (input, RZ output, RU offset, RD offset, RN offset) + // x = 0x1.e3cp-3, log10f16(x) = -0x1.40cp-1 (RZ) + {0x338fU, 0xb903U, 0U, 1U, 0U}, + // x = 0x1.fep-3, log10f16(x) = -0x1.35p-1 (RZ) + {0x33f8U, 0xb8d4U, 0U, 1U, 1U}, +#ifndef LIBC_TARGET_CPU_HAS_FMA + // x = 0x1.394p-1, log10f16(x) = -0x1.b4cp-3 (RZ) + {0x38e5U, 0xb2d3U, 0U, 1U, 1U}, +#endif + // x = 0x1.ea8p-1, log10f16(x) = -0x1.31p-6 (RZ) + {0x3baaU, 0xa4c4U, 0U, 1U, 1U}, + // x = 0x1.ebp-1, log10f16(x) = -0x1.29cp-6 (RZ) + {0x3bacU, 0xa4a7U, 0U, 1U, 1U}, + // x = 0x1.f3p-1, log10f16(x) = -0x1.6dcp-7 (RZ) + {0x3bccU, 0xa1b7U, 0U, 1U, 1U}, +// x = 0x1.f38p-1, log10f16(x) = -0x1.5f8p-7 (RZ) +#ifndef LIBC_TARGET_CPU_HAS_FMA + {0x3bceU, 0xa17eU, 0U, 1U, 1U}, + // x = 0x1.fd8p-1, log10f16(x) = -0x1.168p-9 (RZ) + {0x3bf6U, 0x985aU, 0U, 1U, 1U}, + // x = 0x1.ff8p-1, log10f16(x) = -0x1.bccp-12 (RZ) + {0x3bfeU, 0x8ef3U, 0U, 1U, 1U}, + // x = 0x1.374p+0, log10f16(x) = 0x1.5b8p-4 (RZ) + {0x3cddU, 0x2d6eU, 1U, 0U, 1U}, + // x = 0x1.3ecp+1, log10f16(x) = 0x1.958p-2 (RZ) + {0x40fbU, 0x3656U, 1U, 0U, 1U}, +#endif + // x = 0x1.4p+3, log10f16(x) = 0x1p+0 (RZ) + {0x4900U, 0x3c00U, 0U, 0U, 0U}, + // x = 0x1.9p+6, log10f16(x) = 0x1p+1 (RZ) + {0x5640U, 0x4000U, 0U, 0U, 0U}, + // x = 0x1.f84p+6, log10f16(x) = 0x1.0ccp+1 (RZ) + {0x57e1U, 0x4033U, 1U, 0U, 0U}, + // x = 0x1.f4p+9, log10f16(x) = 0x1.8p+1 (RZ) + {0x63d0U, 0x4200U, 0U, 0U, 0U}, + // x = 0x1.388p+13, log10f16(x) = 0x1p+2 (RZ) + {0x70e2U, 0x4400U, 0U, 0U, 0U}, + // x = 0x1.674p+13, log10f16(x) = 0x1.03cp+2 (RZ) + {0x719dU, 0x440fU, 1U, 0U, 0U}, + }}; + +LLVM_LIBC_FUNCTION(float16, log10f16, (float16 x)) { + using FPBits = fputil::FPBits; + FPBits x_bits(x); + + uint16_t x_u = x_bits.uintval(); + + // If x <= 0, or x is 1, or x is +inf, or x is NaN. + if (LIBC_UNLIKELY(x_u == 0U || x_u == 0x3c00U || x_u >= 0x7c00U)) { + // log10(NaN) = NaN + if (x_bits.is_nan()) { + if (x_bits.is_signaling_nan()) { + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + return x; + } + + // log10(+/-0) = −inf + if ((x_u & 0x7fffU) == 0U) { + fputil::raise_except_if_required(FE_DIVBYZERO); + return FPBits::inf(Sign::NEG).get_val(); + } + + if (x_u == 0x3c00U) + return FPBits::zero().get_val(); + + // When x < 0. + if (x_u > 0x8000U) { + fputil::set_errno_if_required(EDOM); + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + // log10(+inf) = +inf + return FPBits::inf().get_val(); + } + + if (auto r = LOG10F16_EXCEPTS.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); + + // To compute log10(x), we perform the following range reduction: + // x = 2^m * 1.mant, + // log10(x) = m * log10(2) + log10(1.mant). + // To compute log10(1.mant), let f be the highest 6 bits including the hidden + // bit, and d be the difference (1.mant - f), i.e., the remaining 5 bits of + // the mantissa, then: + // log10(1.mant) = log10(f) + log10(1.mant / f) + // = log10(f) + log10(1 + d/f) + // since d/f is sufficiently small. + // We store log10(f) and 1/f in the lookup tables LOG10F_F and ONE_OVER_F_F + // respectively. + + int m = -FPBits::EXP_BIAS; + + // When x is subnormal, normalize it. + if ((x_u & FPBits::EXP_MASK) == 0U) { + // Can't pass an integer to fputil::cast directly. + constexpr float NORMALIZE_EXP = 1U << FPBits::FRACTION_LEN; + x_bits = FPBits(x_bits.get_val() * fputil::cast(NORMALIZE_EXP)); + x_u = x_bits.uintval(); + m -= FPBits::FRACTION_LEN; + } + + uint16_t mant = x_bits.get_mantissa(); + // Leading 10 - 5 = 5 bits of the mantissa. + int f = mant >> 5; + // Unbiased exponent. + m += x_u >> FPBits::FRACTION_LEN; + + // Set bits to 1.mant instead of 2^m * 1.mant. + x_bits.set_biased_exponent(FPBits::EXP_BIAS); + float mant_f = x_bits.get_val(); + // v = 1.mant * 1/f - 1 = d/f + float v = fputil::multiply_add(mant_f, ONE_OVER_F_F[f], -1.0f); + + // Degree-3 minimax polynomial generated by Sollya with the following + // commands: + // > display = hexadecimal; + // > P = fpminimax(log10(1 + x)/x, 2, [|SG...|], [-2^-5, 2^-5]); + // > x * P; + float log10p1_d_over_f = + v * fputil::polyeval(v, 0x1.bcb7bp-2f, -0x1.bce168p-3f, 0x1.28acb8p-3f); + // log10(1.mant) = log10(f) + log10(1 + d/f) + float log10_1_mant = LOG10F_F[f] + log10p1_d_over_f; + return fputil::cast( + fputil::multiply_add(static_cast(m), LOG10F_2, log10_1_mant)); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/generic/sqrtf16.cpp b/libc/src/math/generic/sqrtf16.cpp new file mode 100644 index 00000000000000..0aa4a201b3e68c --- /dev/null +++ b/libc/src/math/generic/sqrtf16.cpp @@ -0,0 +1,20 @@ +//===-- Implementation of sqrtf16 function --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/sqrtf16.h" +#include "src/__support/FPUtil/sqrt.h" +#include "src/__support/common.h" +#include "src/__support/macros/config.h" + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(float16, sqrtf16, (float16 x)) { + return fputil::sqrt(x); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/log10f16.h b/libc/src/math/log10f16.h new file mode 100644 index 00000000000000..298deb370e0b0f --- /dev/null +++ b/libc/src/math/log10f16.h @@ -0,0 +1,21 @@ +//===-- Implementation header for log10f16 ----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_LOG10F16_H +#define LLVM_LIBC_SRC_MATH_LOG10F16_H + +#include "src/__support/macros/config.h" +#include "src/__support/macros/properties/types.h" + +namespace LIBC_NAMESPACE_DECL { + +float16 log10f16(float16 x); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_MATH_LOG10F16_H diff --git a/libc/src/math/sqrtf16.h b/libc/src/math/sqrtf16.h new file mode 100644 index 00000000000000..bb09c4fdaf8d00 --- /dev/null +++ b/libc/src/math/sqrtf16.h @@ -0,0 +1,21 @@ +//===-- Implementation header for sqrtf16 -----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_SQRTF16_H +#define LLVM_LIBC_SRC_MATH_SQRTF16_H + +#include "src/__support/macros/config.h" +#include "src/__support/macros/properties/types.h" + +namespace LIBC_NAMESPACE_DECL { + +float16 sqrtf16(float16 x); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_MATH_SQRTF16_H diff --git a/libc/src/string/CMakeLists.txt b/libc/src/string/CMakeLists.txt index 787188ab3beb91..b33cbc5358d60d 100644 --- a/libc/src/string/CMakeLists.txt +++ b/libc/src/string/CMakeLists.txt @@ -138,6 +138,7 @@ add_entrypoint_object( DEPENDS .strcpy .string_utils + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -240,6 +241,7 @@ add_entrypoint_object( .string_utils libc.include.stdlib libc.src.errno.errno + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -270,7 +272,7 @@ add_entrypoint_object( strlcat.h DEPENDS .string_utils - libc.include.string + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -281,7 +283,7 @@ add_entrypoint_object( strlcpy.h DEPENDS .string_utils - libc.include.string + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -292,7 +294,7 @@ add_entrypoint_object( strlen.h DEPENDS .string_utils - libc.include.string + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -304,6 +306,7 @@ add_entrypoint_object( DEPENDS .strncpy .string_utils + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( @@ -346,6 +349,7 @@ add_entrypoint_object( .string_utils libc.include.stdlib libc.src.__support.CPP.new + libc.include.llvm-libc-types.size_t ) add_entrypoint_object( diff --git a/libc/src/string/strcat.h b/libc/src/string/strcat.h index 90a7fd2e41337e..82860196ce29a2 100644 --- a/libc/src/string/strcat.h +++ b/libc/src/string/strcat.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRCAT_H #define LLVM_LIBC_SRC_STRING_STRCAT_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strcpy.h b/libc/src/string/strcpy.h index d4f3e81fdc733a..9e0c3dbc39ef90 100644 --- a/libc/src/string/strcpy.h +++ b/libc/src/string/strcpy.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRCPY_H #define LLVM_LIBC_SRC_STRING_STRCPY_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strdup.h b/libc/src/string/strdup.h index 45303a3efeb493..2744e53d45d475 100644 --- a/libc/src/string/strdup.h +++ b/libc/src/string/strdup.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRDUP_H #define LLVM_LIBC_SRC_STRING_STRDUP_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strlcat.h b/libc/src/string/strlcat.h index ffe97af62a543c..9dc8f3a3bc0d18 100644 --- a/libc/src/string/strlcat.h +++ b/libc/src/string/strlcat.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRLCAT_H #define LLVM_LIBC_SRC_STRING_STRLCAT_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strlcpy.h b/libc/src/string/strlcpy.h index 058e7653b1b91f..45b2c2a2ec26b4 100644 --- a/libc/src/string/strlcpy.h +++ b/libc/src/string/strlcpy.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRLCPY_H #define LLVM_LIBC_SRC_STRING_STRLCPY_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strlen.h b/libc/src/string/strlen.h index f07bf73ace3de6..093edcf479bcf2 100644 --- a/libc/src/string/strlen.h +++ b/libc/src/string/strlen.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRLEN_H #define LLVM_LIBC_SRC_STRING_STRLEN_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strncat.h b/libc/src/string/strncat.h index 1a130799f39658..f37d9a7bc1544a 100644 --- a/libc/src/string/strncat.h +++ b/libc/src/string/strncat.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRNCAT_H #define LLVM_LIBC_SRC_STRING_STRNCAT_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/src/string/strndup.h b/libc/src/string/strndup.h index 03370cc8d7dce1..78cde7b33e13b1 100644 --- a/libc/src/string/strndup.h +++ b/libc/src/string/strndup.h @@ -9,8 +9,8 @@ #ifndef LLVM_LIBC_SRC_STRING_STRNDUP_H #define LLVM_LIBC_SRC_STRING_STRNDUP_H +#include "include/llvm-libc-types/size_t.h" #include "src/__support/macros/config.h" -#include namespace LIBC_NAMESPACE_DECL { diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt index c0209a13287028..b46ef4028915ba 100644 --- a/libc/test/src/math/CMakeLists.txt +++ b/libc/test/src/math/CMakeLists.txt @@ -1501,6 +1501,17 @@ add_fp_unittest( libc.src.math.sqrtl ) +add_fp_unittest( + sqrtf16_test + NEED_MPFR + SUITE + libc-math-unittests + SRCS + sqrtf16_test.cpp + DEPENDS + libc.src.math.sqrtf16 +) + add_fp_unittest( generic_sqrtf_test NEED_MPFR @@ -1857,6 +1868,17 @@ add_fp_unittest( libc.src.__support.FPUtil.fp_bits ) +add_fp_unittest( + log10f16_test + NEED_MPFR + SUITE + libc-math-unittests + SRCS + log10f16_test.cpp + DEPENDS + libc.src.math.log10f16 +) + add_fp_unittest( log1p_test NEED_MPFR diff --git a/libc/test/src/math/log10f16_test.cpp b/libc/test/src/math/log10f16_test.cpp new file mode 100644 index 00000000000000..a71e3309ac5f08 --- /dev/null +++ b/libc/test/src/math/log10f16_test.cpp @@ -0,0 +1,40 @@ +//===-- Exhaustive test for log10f16 --------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/log10f16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" +#include "utils/MPFRWrapper/MPFRUtils.h" + +using LlvmLibcLog10f16Test = LIBC_NAMESPACE::testing::FPTest; + +namespace mpfr = LIBC_NAMESPACE::testing::mpfr; + +// Range: [0, Inf]; +static constexpr uint16_t POS_START = 0x0000U; +static constexpr uint16_t POS_STOP = 0x7c00U; + +// Range: [-Inf, 0]; +static constexpr uint16_t NEG_START = 0x8000U; +static constexpr uint16_t NEG_STOP = 0xfc00U; + +TEST_F(LlvmLibcLog10f16Test, PositiveRange) { + for (uint16_t v = POS_START; v <= POS_STOP; ++v) { + float16 x = FPBits(v).get_val(); + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, + LIBC_NAMESPACE::log10f16(x), 0.5); + } +} + +TEST_F(LlvmLibcLog10f16Test, NegativeRange) { + for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { + float16 x = FPBits(v).get_val(); + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log10, x, + LIBC_NAMESPACE::log10f16(x), 0.5); + } +} diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt index 8bba7a083da4d7..269e92c5900628 100644 --- a/libc/test/src/math/smoke/CMakeLists.txt +++ b/libc/test/src/math/smoke/CMakeLists.txt @@ -2900,6 +2900,18 @@ add_fp_unittest( libc.src.math.sqrtl ) +add_fp_unittest( + sqrtf16_test + SUITE + libc-math-smoke-tests + SRCS + sqrtf16_test.cpp + HDRS + SqrtTest.h + DEPENDS + libc.src.math.sqrtf16 +) + add_fp_unittest( sqrtf128_test SUITE @@ -3643,6 +3655,19 @@ add_fp_unittest( libc.src.__support.FPUtil.fp_bits ) +add_fp_unittest( + log10f16_test + SUITE + libc-math-smoke-tests + SRCS + log10f16_test.cpp + DEPENDS + libc.hdr.fenv_macros + libc.src.errno.errno + libc.src.math.log10f16 + libc.src.__support.FPUtil.cast +) + add_fp_unittest( log1p_test SUITE diff --git a/libc/test/src/math/smoke/log10f16_test.cpp b/libc/test/src/math/smoke/log10f16_test.cpp new file mode 100644 index 00000000000000..471e1989333268 --- /dev/null +++ b/libc/test/src/math/smoke/log10f16_test.cpp @@ -0,0 +1,50 @@ +//===-- Unittests for log10f16 --------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "hdr/fenv_macros.h" +#include "src/__support/FPUtil/cast.h" +#include "src/errno/libc_errno.h" +#include "src/math/log10f16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" + +using LlvmLibcLog10f16Test = LIBC_NAMESPACE::testing::FPTest; + +TEST_F(LlvmLibcLog10f16Test, SpecialNumbers) { + LIBC_NAMESPACE::libc_errno = 0; + + EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::log10f16(aNaN)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::log10f16(sNaN), FE_INVALID); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::log10f16(inf)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::log10f16(neg_inf)); + EXPECT_MATH_ERRNO(EDOM); + + EXPECT_FP_EQ_WITH_EXCEPTION_ALL_ROUNDING( + neg_inf, LIBC_NAMESPACE::log10f16(zero), FE_DIVBYZERO); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_WITH_EXCEPTION_ALL_ROUNDING( + neg_inf, LIBC_NAMESPACE::log10f16(neg_zero), FE_DIVBYZERO); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING( + zero, + LIBC_NAMESPACE::log10f16(LIBC_NAMESPACE::fputil::cast(1.0))); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING( + aNaN, + LIBC_NAMESPACE::log10f16(LIBC_NAMESPACE::fputil::cast(-1.0))); + EXPECT_MATH_ERRNO(EDOM); +} diff --git a/libc/test/src/math/smoke/sqrtf16_test.cpp b/libc/test/src/math/smoke/sqrtf16_test.cpp new file mode 100644 index 00000000000000..d62049661eecbf --- /dev/null +++ b/libc/test/src/math/smoke/sqrtf16_test.cpp @@ -0,0 +1,13 @@ +//===-- Unittests for sqrtf16 ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "SqrtTest.h" + +#include "src/math/sqrtf16.h" + +LIST_SQRT_TESTS(float16, LIBC_NAMESPACE::sqrtf16) diff --git a/libc/test/src/math/sqrtf16_test.cpp b/libc/test/src/math/sqrtf16_test.cpp new file mode 100644 index 00000000000000..f6e8996761245d --- /dev/null +++ b/libc/test/src/math/sqrtf16_test.cpp @@ -0,0 +1,28 @@ +//===-- Exhaustive test for sqrtf16 ---------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/sqrtf16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" +#include "utils/MPFRWrapper/MPFRUtils.h" + +using LlvmLibcSqrtf16Test = LIBC_NAMESPACE::testing::FPTest; + +namespace mpfr = LIBC_NAMESPACE::testing::mpfr; + +// Range: [0, Inf]; +static constexpr uint16_t POS_START = 0x0000U; +static constexpr uint16_t POS_STOP = 0x7c00U; + +TEST_F(LlvmLibcSqrtf16Test, PositiveRange) { + for (uint16_t v = POS_START; v <= POS_STOP; ++v) { + float16 x = FPBits(v).get_val(); + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Sqrt, x, + LIBC_NAMESPACE::sqrtf16(x), 0.5); + } +} diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt index 3431ea7dab386b..a107314518b1fa 100644 --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -967,7 +967,6 @@ set(files limits list locale - locale.h map math.h mdspan @@ -998,7 +997,6 @@ set(files stdbool.h stddef.h stdexcept - stdint.h stdio.h stdlib.h stop_token diff --git a/libcxx/include/__iterator/reverse_iterator.h b/libcxx/include/__iterator/reverse_iterator.h index 50c0f21eaa286b..5e88d86ad5e9b2 100644 --- a/libcxx/include/__iterator/reverse_iterator.h +++ b/libcxx/include/__iterator/reverse_iterator.h @@ -136,10 +136,12 @@ class _LIBCPP_TEMPLATE_VIS reverse_iterator _LIBCPP_HIDE_FROM_ABI constexpr pointer operator->() const requires is_pointer_v<_Iter> || requires(const _Iter __i) { __i.operator->(); } { + _Iter __tmp = current; + --__tmp; if constexpr (is_pointer_v<_Iter>) { - return std::prev(current); + return __tmp; } else { - return std::prev(current).operator->(); + return __tmp.operator->(); } } #else diff --git a/libcxx/include/clocale b/libcxx/include/clocale index c689a64be288a3..4d53aa7eb29b29 100644 --- a/libcxx/include/clocale +++ b/libcxx/include/clocale @@ -38,14 +38,6 @@ lconv* localeconv(); #include -#ifndef _LIBCPP_LOCALE_H -# error tried including but didn't find libc++'s header. \ - This usually means that your header search paths are not configured properly. \ - The header search paths should contain the C++ Standard Library headers before \ - any C Standard Library, and you are probably using compiler flags that make that \ - not be the case. -#endif - #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif diff --git a/libcxx/include/cstdint b/libcxx/include/cstdint index 8c4782859426dd..9c9b2323d06ea9 100644 --- a/libcxx/include/cstdint +++ b/libcxx/include/cstdint @@ -144,14 +144,6 @@ Types: #include -#ifndef _LIBCPP_STDINT_H -# error tried including but didn't find libc++'s header. \ - This usually means that your header search paths are not configured properly. \ - The header search paths should contain the C++ Standard Library headers before \ - any C Standard Library, and you are probably using compiler flags that make that \ - not be the case. -#endif - #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif diff --git a/libcxx/include/locale.h b/libcxx/include/locale.h deleted file mode 100644 index 425bf47d437ac8..00000000000000 --- a/libcxx/include/locale.h +++ /dev/null @@ -1,46 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP_LOCALE_H -#define _LIBCPP_LOCALE_H - -/* - locale.h synopsis - -Macros: - - LC_ALL - LC_COLLATE - LC_CTYPE - LC_MONETARY - LC_NUMERIC - LC_TIME - -Types: - - lconv - -Functions: - - setlocale - localeconv - -*/ - -#include <__config> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if __has_include_next() -# include_next -#endif - -#endif // _LIBCPP_LOCALE_H diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap index 5a0e199394d018..06e93d2452904d 100644 --- a/libcxx/include/module.modulemap +++ b/libcxx/include/module.modulemap @@ -2184,10 +2184,6 @@ module std_inttypes_h [system] { header "inttypes.h" export * } -module std_locale_h [system] { - header "locale.h" - export * -} module std_math_h [system] { header "math.h" export * @@ -2204,10 +2200,6 @@ module std_stddef_h [system] { // 's __need_* macros require textual inclusion. textual header "stddef.h" } -module std_stdint_h [system] { - header "stdint.h" - export * -} module std_stdio_h [system] { // 's __need_* macros require textual inclusion. textual header "stdio.h" diff --git a/libcxx/include/stdint.h b/libcxx/include/stdint.h deleted file mode 100644 index 35e5b8cbdad264..00000000000000 --- a/libcxx/include/stdint.h +++ /dev/null @@ -1,127 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP_STDINT_H -// AIX system headers need stdint.h to be re-enterable while _STD_TYPES_T -// is defined until an inclusion of it without _STD_TYPES_T occurs, in which -// case the header guard macro is defined. -#if !defined(_AIX) || !defined(_STD_TYPES_T) -# define _LIBCPP_STDINT_H -#endif // _STD_TYPES_T - -/* - stdint.h synopsis - -Macros: - - INT8_MIN - INT16_MIN - INT32_MIN - INT64_MIN - - INT8_MAX - INT16_MAX - INT32_MAX - INT64_MAX - - UINT8_MAX - UINT16_MAX - UINT32_MAX - UINT64_MAX - - INT_LEAST8_MIN - INT_LEAST16_MIN - INT_LEAST32_MIN - INT_LEAST64_MIN - - INT_LEAST8_MAX - INT_LEAST16_MAX - INT_LEAST32_MAX - INT_LEAST64_MAX - - UINT_LEAST8_MAX - UINT_LEAST16_MAX - UINT_LEAST32_MAX - UINT_LEAST64_MAX - - INT_FAST8_MIN - INT_FAST16_MIN - INT_FAST32_MIN - INT_FAST64_MIN - - INT_FAST8_MAX - INT_FAST16_MAX - INT_FAST32_MAX - INT_FAST64_MAX - - UINT_FAST8_MAX - UINT_FAST16_MAX - UINT_FAST32_MAX - UINT_FAST64_MAX - - INTPTR_MIN - INTPTR_MAX - UINTPTR_MAX - - INTMAX_MIN - INTMAX_MAX - - UINTMAX_MAX - - PTRDIFF_MIN - PTRDIFF_MAX - - SIG_ATOMIC_MIN - SIG_ATOMIC_MAX - - SIZE_MAX - - WCHAR_MIN - WCHAR_MAX - - WINT_MIN - WINT_MAX - - INT8_C(value) - INT16_C(value) - INT32_C(value) - INT64_C(value) - - UINT8_C(value) - UINT16_C(value) - UINT32_C(value) - UINT64_C(value) - - INTMAX_C(value) - UINTMAX_C(value) - -*/ - -#include <__config> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -/* C99 stdlib (e.g. glibc < 2.18) does not provide macros needed - for C++11 unless __STDC_LIMIT_MACROS and __STDC_CONSTANT_MACROS - are defined -*/ -#if defined(__cplusplus) && !defined(__STDC_LIMIT_MACROS) -# define __STDC_LIMIT_MACROS -#endif -#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) -# define __STDC_CONSTANT_MACROS -#endif - -#if __has_include_next() -# include_next -#endif - -#endif // _LIBCPP_STDINT_H diff --git a/libcxx/test/libcxx/depr/depr.c.headers/extern_c.pass.cpp b/libcxx/test/libcxx/depr/depr.c.headers/extern_c.pass.cpp index 9fa4021e5c1ead..63ca6643797132 100644 --- a/libcxx/test/libcxx/depr/depr.c.headers/extern_c.pass.cpp +++ b/libcxx/test/libcxx/depr/depr.c.headers/extern_c.pass.cpp @@ -26,9 +26,6 @@ extern "C" { #include #include #include -#ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include -#endif #include #include #include diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/equal.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/equal.pass.cpp index fcf8d88fcf62be..6fe575ebdd9a0d 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/equal.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/equal.pass.cpp @@ -33,6 +33,10 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(bidirectional_iterator(s), bidirectional_iterator(s+1), false); test(random_access_iterator(s), random_access_iterator(s), true); test(random_access_iterator(s), random_access_iterator(s+1), false); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), true); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), false); +#endif test(s, s, true); test(s, s+1, false); return true; diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater-equal.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater-equal.pass.cpp index fdcd02abb0d8ed..b2bfdb56d646ed 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater-equal.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater-equal.pass.cpp @@ -32,6 +32,11 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(random_access_iterator(s), random_access_iterator(s), true); test(random_access_iterator(s), random_access_iterator(s+1), true); test(random_access_iterator(s+1), random_access_iterator(s), false); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), true); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), true); + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s), false); +#endif test(s, s, true); test(s, s+1, true); test(s+1, s, false); diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater.pass.cpp index dce331e519646f..38f9258de31f5e 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/greater.pass.cpp @@ -32,6 +32,11 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(random_access_iterator(s), random_access_iterator(s), false); test(random_access_iterator(s), random_access_iterator(s+1), true); test(random_access_iterator(s+1), random_access_iterator(s), false); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), false); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), true); + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s), false); +#endif test(s, s, false); test(s, s+1, true); test(s+1, s, false); diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less-equal.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less-equal.pass.cpp index e9cea6250a7645..a57930b111314d 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less-equal.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less-equal.pass.cpp @@ -32,6 +32,11 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(random_access_iterator(s), random_access_iterator(s), true); test(random_access_iterator(s), random_access_iterator(s+1), false); test(random_access_iterator(s+1), random_access_iterator(s), true); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), true); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), false); + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s), true); +#endif test(s, s, true); test(s, s+1, false); test(s+1, s, true); diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less.pass.cpp index b66147cf3a03c3..4cd3f249d033e1 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/less.pass.cpp @@ -32,6 +32,11 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(random_access_iterator(s), random_access_iterator(s), false); test(random_access_iterator(s), random_access_iterator(s+1), false); test(random_access_iterator(s+1), random_access_iterator(s), true); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), false); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), false); + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s), true); +#endif test(s, s, false); test(s, s+1, false); test(s+1, s, true); diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/not-equal.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/not-equal.pass.cpp index 37a6ff1302ce77..509ac297c3cba6 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/not-equal.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cmp/not-equal.pass.cpp @@ -33,6 +33,10 @@ TEST_CONSTEXPR_CXX17 bool tests() { test(bidirectional_iterator(s), bidirectional_iterator(s+1), true); test(random_access_iterator(s), random_access_iterator(s), false); test(random_access_iterator(s), random_access_iterator(s+1), true); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s), false); + test(cpp20_random_access_iterator(s), cpp20_random_access_iterator(s + 1), true); +#endif test(s, s, false); test(s, s+1, true); return true; diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/assign.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/assign.pass.cpp index 0e5123a49e2b56..f9d2efa7c2a8cc 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/assign.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/assign.pass.cpp @@ -59,6 +59,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { Derived d; test >(bidirectional_iterator(&d)); test >(random_access_iterator(&d)); +#if TEST_STD_VER >= 20 + test >(cpp20_random_access_iterator(&d)); +#endif test(&d); char c = '\0'; diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.default.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.default.pass.cpp index fcb96de91d1a02..90047b19f5a63a 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.default.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.default.pass.cpp @@ -26,6 +26,9 @@ TEST_CONSTEXPR_CXX17 void test() { TEST_CONSTEXPR_CXX17 bool tests() { test >(); test >(); +#if TEST_STD_VER >= 20 + test >(); +#endif test(); test(); return true; diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.iter.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.iter.pass.cpp index 801b2cf879ce5b..72e77b08564219 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.iter.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.iter.pass.cpp @@ -28,6 +28,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char s[] = "123"; test(bidirectional_iterator(s)); test(random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s)); +#endif test(s); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.reverse_iterator.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.reverse_iterator.pass.cpp index 8f315e83f6d7b4..fa967b45b1d9f8 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.reverse_iterator.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.cons/ctor.reverse_iterator.pass.cpp @@ -33,6 +33,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { Derived d; test >(bidirectional_iterator(&d)); test >(random_access_iterator(&d)); +#if TEST_STD_VER >= 20 + test >(cpp20_random_access_iterator(&d)); +#endif test(&d); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.conv/base.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.conv/base.pass.cpp index 4fb33f54260457..35ed17583c8555 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.conv/base.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.conv/base.pass.cpp @@ -18,20 +18,28 @@ #include "test_macros.h" #include "test_iterators.h" -TEST_CONSTEXPR_CXX17 bool test() { - typedef bidirectional_iterator Iter; - int i = 0; - Iter iter(&i); - std::reverse_iterator const reverse(iter); - std::reverse_iterator::iterator_type base = reverse.base(); - assert(base == Iter(&i)); - return true; +template +TEST_CONSTEXPR_CXX17 void test() { + int i = 0; + Iter iter(&i); + std::reverse_iterator const reverse(iter); + typename std::reverse_iterator::iterator_type base = reverse.base(); + assert(base == Iter(&i)); +} + +TEST_CONSTEXPR_CXX17 bool tests() { + test >(); + test >(); +#if TEST_STD_VER >= 20 + test>(); +#endif + return true; } int main(int, char**) { - test(); + tests(); #if TEST_STD_VER > 14 - static_assert(test(), ""); + static_assert(tests(), ""); #endif - return 0; + return 0; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/arrow.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/arrow.pass.cpp index 15d18d9145ef0c..665a1a89223bc4 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/arrow.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/arrow.pass.cpp @@ -24,6 +24,55 @@ #include "test_macros.h" +#if TEST_STD_VER >= 20 +// C++20 bidirectional_iterator that does not satisfy the Cpp17BidirectionalIterator named requirement. +template +class cpp20_bidirectional_iterator_with_arrow { + It it_; + +public: + using iterator_category = std::input_iterator_tag; + using iterator_concept = std::bidirectional_iterator_tag; + using value_type = std::iterator_traits::value_type; + using difference_type = std::iterator_traits::difference_type; + + cpp20_bidirectional_iterator_with_arrow() : it_() {} + explicit cpp20_bidirectional_iterator_with_arrow(It it) : it_(it) {} + + decltype(auto) operator*() const { return *it_; } + + auto operator->() const { + if constexpr (std::is_pointer_v) { + return it_; + } else { + return it_.operator->(); + } + } + + cpp20_bidirectional_iterator_with_arrow& operator++() { + ++it_; + return *this; + } + cpp20_bidirectional_iterator_with_arrow& operator--() { + --it_; + return *this; + } + cpp20_bidirectional_iterator_with_arrow operator++(int) { return cpp20_bidirectional_iterator_with_arrow(it_++); } + cpp20_bidirectional_iterator_with_arrow operator--(int) { return cpp20_bidirectional_iterator_with_arrow(it_--); } + + friend bool + operator==(const cpp20_bidirectional_iterator_with_arrow& x, const cpp20_bidirectional_iterator_with_arrow& y) { + return x.it_ == y.it_; + } + friend bool + operator!=(const cpp20_bidirectional_iterator_with_arrow& x, const cpp20_bidirectional_iterator_with_arrow& y) { + return x.it_ != y.it_; + } + + friend It base(const cpp20_bidirectional_iterator_with_arrow& i) { return i.it_; } +}; +#endif + class A { int data_; @@ -113,6 +162,16 @@ int main(int, char**) static_assert(it1->get() == gC.get(), ""); } +#endif +#if TEST_STD_VER >= 20 + { + // The underlying iterator models c++20 bidirectional_iterator, + // but does not satisfy c++17 BidirectionalIterator named requirement + B data[] = {1, 2, 3}; + cpp20_bidirectional_iterator_with_arrow iter(data + 3); + auto ri = std::make_reverse_iterator(iter); + assert(ri->get() == 3); + } #endif { ((void)gC); diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/bracket.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/bracket.pass.cpp index 37a857ceefa83d..8b45bfa09b4fe7 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/bracket.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/bracket.pass.cpp @@ -33,6 +33,10 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "1234567890"; test(random_access_iterator(s+5), 4, '1'); test(random_access_iterator(s+5), 0, '5'); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 4, '1'); + test(cpp20_random_access_iterator(s + 5), 0, '5'); +#endif test(s+5, 4, '1'); test(s+5, 0, '5'); return true; diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/dereference.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/dereference.pass.cpp index 292c6da9a7733e..c3a489085c68b0 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/dereference.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.elem/dereference.pass.cpp @@ -21,6 +21,7 @@ #include #include "test_macros.h" +#include "test_iterators.h" class A { @@ -47,6 +48,10 @@ int main(int, char**) { A a; test(&a+1, A()); + test(random_access_iterator(&a + 1), A()); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(&a + 1), A()); +#endif #if TEST_STD_VER > 14 { diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/decrement-assign.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/decrement-assign.pass.cpp index 8c83ec1e9389f9..91c2d9363619bf 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/decrement-assign.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/decrement-assign.pass.cpp @@ -30,6 +30,9 @@ TEST_CONSTEXPR_CXX17 void test(It i, typename std::iterator_traits::differen TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "1234567890"; test(random_access_iterator(s+5), 5, random_access_iterator(s+10)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 5, cpp20_random_access_iterator(s + 10)); +#endif test(s+5, 5, s+10); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/increment-assign.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/increment-assign.pass.cpp index e32fac9fc24fe1..2a2746f2cc52bd 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/increment-assign.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/increment-assign.pass.cpp @@ -29,7 +29,10 @@ TEST_CONSTEXPR_CXX17 void test(It i, typename std::iterator_traits::differen TEST_CONSTEXPR_CXX17 bool tests() { char const* s = "1234567890"; - test(random_access_iterator(s+5), 5, random_access_iterator(s)); + test(random_access_iterator(s + 5), 5, random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 5, cpp20_random_access_iterator(s)); +#endif test(s+5, 5, s); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/minus.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/minus.pass.cpp index f2474dd7669f2c..759cacad94e24c 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/minus.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/minus.pass.cpp @@ -28,7 +28,10 @@ TEST_CONSTEXPR_CXX17 void test(It i, typename std::iterator_traits::differen TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "1234567890"; - test(random_access_iterator(s+5), 5, random_access_iterator(s+10)); + test(random_access_iterator(s + 5), 5, random_access_iterator(s + 10)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 5, cpp20_random_access_iterator(s + 10)); +#endif test(s+5, 5, s+10); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/plus.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/plus.pass.cpp index 5673425e796757..24fa84e4f37c8b 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/plus.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/plus.pass.cpp @@ -28,7 +28,10 @@ TEST_CONSTEXPR_CXX17 void test(It i, typename std::iterator_traits::differen TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "1234567890"; - test(random_access_iterator(s+5), 5, random_access_iterator(s)); + test(random_access_iterator(s + 5), 5, random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 5, cpp20_random_access_iterator(s)); +#endif test(s+5, 5, s); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postdecrement.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postdecrement.pass.cpp index 24bedad314b7e8..f0551b5efece09 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postdecrement.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postdecrement.pass.cpp @@ -30,6 +30,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "123"; test(bidirectional_iterator(s+1), bidirectional_iterator(s+2)); test(random_access_iterator(s+1), random_access_iterator(s+2)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s + 2)); +#endif test(s+1, s+2); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postincrement.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postincrement.pass.cpp index e15bfb2fd15096..f1d3ea21a5b860 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postincrement.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/postincrement.pass.cpp @@ -30,6 +30,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "123"; test(bidirectional_iterator(s+1), bidirectional_iterator(s)); test(random_access_iterator(s+1), random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s)); +#endif test(s+1, s); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/predecrement.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/predecrement.pass.cpp index 2fbd530a085dcc..5a2ac785703672 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/predecrement.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/predecrement.pass.cpp @@ -30,6 +30,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "123"; test(bidirectional_iterator(s+1), bidirectional_iterator(s+2)); test(random_access_iterator(s+1), random_access_iterator(s+2)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s + 2)); +#endif test(s+1, s+2); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/preincrement.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/preincrement.pass.cpp index 5efc8a39e22aa8..6087eedd2449f2 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/preincrement.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nav/preincrement.pass.cpp @@ -30,6 +30,9 @@ TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "123"; test(bidirectional_iterator(s+1), bidirectional_iterator(s)); test(random_access_iterator(s+1), random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 1), cpp20_random_access_iterator(s)); +#endif test(s+1, s); return true; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/make_reverse_iterator.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/make_reverse_iterator.pass.cpp index 401eecb2a3b838..4a4e474a550835 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/make_reverse_iterator.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/make_reverse_iterator.pass.cpp @@ -22,24 +22,34 @@ #include "test_iterators.h" template -TEST_CONSTEXPR_CXX17 void test(It i) { - const std::reverse_iterator r = std::make_reverse_iterator(i); - assert(r.base() == i); +TEST_CONSTEXPR_CXX17 void test_one(It i) { + const std::reverse_iterator r = std::make_reverse_iterator(i); + assert(r.base() == i); +} + +template +TEST_CONSTEXPR_CXX17 void test() { + const char* s = "1234567890"; + It b(s); + It e(s + 10); + while (b != e) + test_one(b++); } TEST_CONSTEXPR_CXX17 bool tests() { - const char* s = "1234567890"; - random_access_iterator b(s); - random_access_iterator e(s+10); - while (b != e) - test (b++); - return true; + test(); + test>(); + test>(); +#if TEST_STD_VER >= 20 + test>(); +#endif + return true; } int main(int, char**) { - tests(); + tests(); #if TEST_STD_VER > 14 - static_assert(tests(), ""); + static_assert(tests(), ""); #endif - return 0; + return 0; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/minus.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/minus.pass.cpp index f7f74d145d73c6..676f6e1b491695 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/minus.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/minus.pass.cpp @@ -23,45 +23,63 @@ #include "test_macros.h" #include "test_iterators.h" -template struct HasMinus : std::false_type {}; -template struct HasMinus : std::true_type {}; +template +struct HasMinus : std::false_type {}; +template +struct HasMinus : std::true_type {}; + +// Test non-subtractable base iterator types +static_assert(HasMinus, std::reverse_iterator >::value, ""); +static_assert(HasMinus, std::reverse_iterator >::value, ""); + +#if TEST_STD_VER >= 11 +static_assert(!HasMinus, std::reverse_iterator >::value, ""); +static_assert(!HasMinus >, + std::reverse_iterator > >::value, + ""); +#endif template -TEST_CONSTEXPR_CXX17 void test(It1 l, It2 r, std::ptrdiff_t x) { - const std::reverse_iterator r1(l); - const std::reverse_iterator r2(r); - assert((r1 - r2) == x); +TEST_CONSTEXPR_CXX17 void test_one(It1 l, It2 r, std::ptrdiff_t x) { + const std::reverse_iterator r1(l); + const std::reverse_iterator r2(r); + assert((r1 - r2) == x); } -TEST_CONSTEXPR_CXX17 bool tests() { - using PC = const char*; - char s[3] = {0}; - - // Test same base iterator type - test(s, s, 0); - test(s, s+1, 1); - test(s+1, s, -1); +template +TEST_CONSTEXPR_CXX17 void test() { + // Test same base iterator type + char s[3] = {0}; - // Test different (but subtractable) base iterator types - test(PC(s), s, 0); - test(PC(s), s+1, 1); - test(PC(s+1), s, -1); + test_one(Iter(s), Iter(s), 0); + test_one(Iter(s), Iter(s + 1), 1); + test_one(Iter(s + 1), Iter(s), -1); +} - // Test non-subtractable base iterator types - static_assert( HasMinus, std::reverse_iterator >::value, ""); - static_assert( HasMinus, std::reverse_iterator >::value, ""); -#if TEST_STD_VER >= 11 - static_assert(!HasMinus, std::reverse_iterator >::value, ""); - static_assert(!HasMinus >, std::reverse_iterator > >::value, ""); +TEST_CONSTEXPR_CXX17 bool tests() { + { + test(); + test >(); +#if TEST_STD_VER >= 20 + test>(); #endif + } + { + // Test different (but subtractable) base iterator types + using PC = const char*; + char s[3] = {0}; + test_one(PC(s), s, 0); + test_one(PC(s), s + 1, 1); + test_one(PC(s + 1), s, -1); + } - return true; + return true; } int main(int, char**) { - tests(); + tests(); #if TEST_STD_VER > 14 - static_assert(tests(), ""); + static_assert(tests(), ""); #endif - return 0; + return 0; } diff --git a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/plus.pass.cpp b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/plus.pass.cpp index aeb9f89dd48725..9ead123781bc86 100644 --- a/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/plus.pass.cpp +++ b/libcxx/test/std/iterators/predef.iterators/reverse.iterators/reverse.iter.nonmember/plus.pass.cpp @@ -29,6 +29,9 @@ TEST_CONSTEXPR_CXX17 void test(It i, typename std::iterator_traits::differen TEST_CONSTEXPR_CXX17 bool tests() { const char* s = "1234567890"; test(random_access_iterator(s+5), 5, random_access_iterator(s)); +#if TEST_STD_VER >= 20 + test(cpp20_random_access_iterator(s + 5), 5, cpp20_random_access_iterator(s)); +#endif test(s+5, 5, s); return true; } diff --git a/libcxx/utils/libcxx/header_information.py b/libcxx/utils/libcxx/header_information.py index 6bebf3302ffae9..2ed52e8c1dbf22 100644 --- a/libcxx/utils/libcxx/header_information.py +++ b/libcxx/utils/libcxx/header_information.py @@ -15,7 +15,6 @@ # headers with #error directives "ios": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", - "locale.h": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", # transitive includers of the above headers "clocale": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", "codecvt": "!defined(_LIBCPP_HAS_NO_LOCALIZATION)", @@ -53,7 +52,6 @@ "istream": "// UNSUPPORTED: no-localization", "latch": "// UNSUPPORTED: no-threads, c++03, c++11, c++14, c++17", "locale": "// UNSUPPORTED: no-localization", - "locale.h": "// UNSUPPORTED: no-localization", "mutex": "// UNSUPPORTED: no-threads, c++03", "ostream": "// UNSUPPORTED: no-localization", "print": "// UNSUPPORTED: no-filesystem, c++03, c++11, c++14, c++17, c++20, availability-fp_to_chars-missing", # TODO PRINT investigate diff --git a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp index 2a3aba28fb6ca5..32e6fb43d988ff 100644 --- a/libunwind/src/UnwindCursor.hpp +++ b/libunwind/src/UnwindCursor.hpp @@ -2150,9 +2150,9 @@ bool UnwindCursor::getInfoFromTBTable(pint_t pc, R ®isters) { dlsym(libHandle, "__xlcxx_personality_v0")); if (xlcPersonalityV0 == NULL) { _LIBUNWIND_TRACE_UNWINDING("dlsym() failed with errno=%d\n", errno); + dlclose(libHandle); assert(0 && "dlsym() failed"); } - dlclose(libHandle); errno = saveErrno; } xlcPersonalityV0InitLock.unlock(); diff --git a/lld/ELF/AArch64ErrataFix.cpp b/lld/ELF/AArch64ErrataFix.cpp index cd8fbf16f5b839..f9e03ce5bbe4db 100644 --- a/lld/ELF/AArch64ErrataFix.cpp +++ b/lld/ELF/AArch64ErrataFix.cpp @@ -417,7 +417,7 @@ void Patch843419Section::writeTo(uint8_t *buf) { // Return address is the next instruction after the one we have just copied. uint64_t s = getLDSTAddr() + 4; - uint64_t p = patchSym->getVA() + 4; + uint64_t p = patchSym->getVA(ctx) + 4; ctx.target->relocateNoSym(buf + 4, R_AARCH64_JUMP26, s - p); } diff --git a/lld/ELF/ARMErrataFix.cpp b/lld/ELF/ARMErrataFix.cpp index 630084afd509ce..6d759d7dec1d8a 100644 --- a/lld/ELF/ARMErrataFix.cpp +++ b/lld/ELF/ARMErrataFix.cpp @@ -218,7 +218,7 @@ static bool branchDestInFirstRegion(Ctx &ctx, const InputSection *isec, // or the PLT. if (r) { uint64_t dst = - (r->expr == R_PLT_PC) ? r->sym->getPltVA(ctx) : r->sym->getVA(); + r->expr == R_PLT_PC ? r->sym->getPltVA(ctx) : r->sym->getVA(ctx); // Account for Thumb PC bias, usually cancelled to 0 by addend of -4. destAddr = dst + r->addend + 4; } else { @@ -449,7 +449,7 @@ static void implementPatch(ScanResult sr, InputSection *isec, // Thunk from the patch to the target. uint64_t dstSymAddr = (sr.rel->expr == R_PLT_PC) ? sr.rel->sym->getPltVA(ctx) - : sr.rel->sym->getVA(); + : sr.rel->sym->getVA(ctx); destIsARM = (dstSymAddr & 1) == 0; } psec = make(ctx, isec, sr.off, sr.instr, destIsARM); diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp index 260307ac4c3dcb..f4f867d019136e 100644 --- a/lld/ELF/Arch/AArch64.cpp +++ b/lld/ELF/Arch/AArch64.cpp @@ -360,7 +360,7 @@ void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const { void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const { if (ctx.arg.writeAddends) - write64(ctx, buf, s.getVA()); + write64(ctx, buf, s.getVA(ctx)); } void AArch64::writePltHeader(uint8_t *buf) const { @@ -416,7 +416,7 @@ bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file, if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 && type != R_AARCH64_PLT32) return false; - uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(a); + uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(ctx, a); return !inBranchRange(type, branchAddr, dst); } @@ -808,7 +808,7 @@ bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel, Symbol &sym = *adrpRel.sym; // Check if the address difference is within 1MiB range. - int64_t val = sym.getVA() - (secAddr + addRel.offset); + int64_t val = sym.getVA(ctx) - (secAddr + addRel.offset); if (val < -1024 * 1024 || val >= 1024 * 1024) return false; @@ -874,7 +874,7 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel, return false; // Check if the address difference is within 4GB range. int64_t val = - getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset); + getAArch64Page(sym.getVA(ctx)) - getAArch64Page(secAddr + adrpRel.offset); if (val != llvm::SignExtend64(val, 33)) return false; @@ -890,11 +890,11 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel, ctx.target->relocate( buf + adrpSymRel.offset, adrpSymRel, - SignExtend64(getAArch64Page(sym.getVA()) - + SignExtend64(getAArch64Page(sym.getVA(ctx)) - getAArch64Page(secAddr + adrpSymRel.offset), 64)); ctx.target->relocate(buf + addRel.offset, addRel, - SignExtend64(sym.getVA(), 64)); + SignExtend64(sym.getVA(ctx), 64)); tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf); return true; } diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp index 9bb3604ce61cc5..be3f80337aae71 100644 --- a/lld/ELF/Arch/ARM.cpp +++ b/lld/ELF/Arch/ARM.cpp @@ -213,7 +213,7 @@ void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const { void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const { // An ARM entry is the address of the ifunc resolver function. - write32(ctx, buf, s.getVA()); + write32(ctx, buf, s.getVA(ctx)); } // Long form PLT Header that does not have any restrictions on the displacement @@ -404,26 +404,26 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file, // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb). assert(!useThumbPLTs(ctx) && "If the source is ARM, we should not need Thumb PLTs"); - if (s.isFunc() && expr == R_PC && (s.getVA() & 1)) + if (s.isFunc() && expr == R_PC && (s.getVA(ctx) & 1)) return true; [[fallthrough]]; case R_ARM_CALL: { - uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(); + uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(ctx); return !inBranchRange(type, branchAddr, dst + a) || - (!ctx.arg.armHasBlx && (s.getVA() & 1)); + (!ctx.arg.armHasBlx && (s.getVA(ctx) & 1)); } case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: // Source is Thumb, when all PLT entries are ARM interworking is required. // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM). if ((expr == R_PLT_PC && !useThumbPLTs(ctx)) || - (s.isFunc() && (s.getVA() & 1) == 0)) + (s.isFunc() && (s.getVA(ctx) & 1) == 0)) return true; [[fallthrough]]; case R_ARM_THM_CALL: { - uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(); + uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(ctx); return !inBranchRange(type, branchAddr, dst + a) || - (!ctx.arg.armHasBlx && (s.getVA() & 1) == 0);; + (!ctx.arg.armHasBlx && (s.getVA(ctx) & 1) == 0); } } return false; @@ -1399,7 +1399,7 @@ void ArmCmseSGSection::writeTo(uint8_t *buf) { write16(ctx, p + 4, 0xf000); // B.W S write16(ctx, p + 6, 0xb000); ctx.target->relocateNoSym(p + 4, R_ARM_THM_JUMP24, - s->acleSeSym->getVA() - + s->acleSeSym->getVA(ctx) - (getVA() + s->offset + s->size)); } } @@ -1439,8 +1439,8 @@ void ArmCmseSGSection::finalizeContents() { for (size_t i = 0; i < sgVeneers.size(); ++i) { ArmCmseSGVeneer *s = sgVeneers[i]; s->offset = i * s->size; - Defined(file, StringRef(), s->sym->binding, s->sym->stOther, s->sym->type, - s->offset | 1, s->size, this) + Defined(ctx, file, StringRef(), s->sym->binding, s->sym->stOther, + s->sym->type, s->offset | 1, s->size, this) .overwrite(*s->sym); } } @@ -1466,16 +1466,15 @@ template void elf::writeARMCmseImportLib(Ctx &ctx) { osIsPairs.emplace_back(make(ctx, shstrtab->name, 0, 0), shstrtab); - std::sort(ctx.symtab->cmseSymMap.begin(), ctx.symtab->cmseSymMap.end(), - [](const auto &a, const auto &b) -> bool { - return a.second.sym->getVA() < b.second.sym->getVA(); - }); + llvm::sort(ctx.symtab->cmseSymMap, [&](const auto &a, const auto &b) { + return a.second.sym->getVA(ctx) < b.second.sym->getVA(ctx); + }); // Copy the secure gateway entry symbols to the import library symbol table. for (auto &p : ctx.symtab->cmseSymMap) { Defined *d = cast(p.second.sym); impSymTab->addSymbol(makeDefined( - ctx.internalFile, d->getName(), d->computeBinding(ctx), - /*stOther=*/0, STT_FUNC, d->getVA(), d->getSize(), nullptr)); + ctx, ctx.internalFile, d->getName(), d->computeBinding(ctx), + /*stOther=*/0, STT_FUNC, d->getVA(ctx), d->getSize(), nullptr)); } size_t idx = 0; diff --git a/lld/ELF/Arch/AVR.cpp b/lld/ELF/Arch/AVR.cpp index 4dc605c47059c1..64790f1ce83ab3 100644 --- a/lld/ELF/Arch/AVR.cpp +++ b/lld/ELF/Arch/AVR.cpp @@ -110,7 +110,7 @@ bool AVR::needsThunk(RelExpr expr, RelType type, const InputFile *file, case R_AVR_HI8_LDI_GS: // A thunk is needed if the symbol's virtual address is out of range // [0, 0x1ffff]. - return s.getVA() >= 0x20000; + return s.getVA(ctx) >= 0x20000; default: return false; } diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp index 5923cda2298b4e..876aadcb91511b 100644 --- a/lld/ELF/Arch/LoongArch.cpp +++ b/lld/ELF/Arch/LoongArch.cpp @@ -316,9 +316,9 @@ void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const { void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const { if (ctx.arg.writeAddends) { if (ctx.arg.is64) - write64le(buf, s.getVA()); + write64le(buf, s.getVA(ctx)); else - write32le(buf, s.getVA()); + write32le(buf, s.getVA(ctx)); } } diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp index 1d3000793ca268..d84e85239d2ec2 100644 --- a/lld/ELF/Arch/Mips.cpp +++ b/lld/ELF/Arch/Mips.cpp @@ -96,7 +96,7 @@ RelExpr MIPS::getRelExpr(RelType type, const Symbol &s, // If the target symbol is not preemptible and is not microMIPS, // it might be possible to replace jalr/jr instruction by bal/b. // It depends on the target symbol's offset. - if (!s.isPreemptible && !(s.getVA() & 0x1)) + if (!s.isPreemptible && !(s.getVA(ctx) & 0x1)) return R_PC; return R_NONE; case R_MICROMIPS_JALR: diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp index 3af4101fff606f..2cd526020f7d35 100644 --- a/lld/ELF/Arch/PPC.cpp +++ b/lld/ELF/Arch/PPC.cpp @@ -209,7 +209,7 @@ bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file, return true; if (s.isUndefWeak()) return false; - return !PPC::inBranchRange(type, branchAddr, s.getVA(a)); + return !PPC::inBranchRange(type, branchAddr, s.getVA(ctx, a)); } uint32_t PPC::getThunkSectionSpacing() const { return 0x2000000; } diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp index d937492fe440d7..d0f59681ccbd3c 100644 --- a/lld/ELF/Arch/PPC64.cpp +++ b/lld/ELF/Arch/PPC64.cpp @@ -254,7 +254,7 @@ static bool addOptional(Ctx &ctx, StringRef name, uint64_t value, Symbol *sym = ctx.symtab->find(name); if (!sym || sym->isDefined()) return false; - sym->resolve(ctx, Defined{ctx.internalFile, StringRef(), STB_GLOBAL, + sym->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL, STV_HIDDEN, STT_FUNC, value, /*size=*/0, /*section=*/nullptr}); defined.push_back(cast(sym)); @@ -404,7 +404,7 @@ static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel, assert(!d->isGnuIFunc()); // Two instructions can materialize a 32-bit signed offset from the toc base. - uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase(ctx); + uint64_t tocRelative = d->getVA(ctx, addend) - getPPC64TocBase(ctx); if (!isInt<32>(tocRelative)) return false; @@ -1452,7 +1452,7 @@ bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file, // a range-extending thunk. // See the comment in getRelocTargetVA() about R_PPC64_CALL. return !inBranchRange(type, branchAddr, - s.getVA(a) + + s.getVA(ctx, a) + getPPC64GlobalEntryToLocalEntryOffset(s.stOther)); } diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp index 1ae016e4de01ee..e80dfbd4351b1e 100644 --- a/lld/ELF/Arch/RISCV.cpp +++ b/lld/ELF/Arch/RISCV.cpp @@ -214,9 +214,9 @@ void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const { void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const { if (ctx.arg.writeAddends) { if (ctx.arg.is64) - write64le(buf, s.getVA()); + write64le(buf, s.getVA(ctx)); else - write32le(buf, s.getVA()); + write32le(buf, s.getVA(ctx)); } } @@ -466,7 +466,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { case INTERNAL_R_RISCV_GPREL_I: case INTERNAL_R_RISCV_GPREL_S: { Defined *gp = ctx.sym.riscvGlobalPointer; - int64_t displace = SignExtend64(val - gp->getVA(), bits); + int64_t displace = SignExtend64(val - gp->getVA(ctx), bits); checkInt(ctx, loc, displace, 12, rel); uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15); if (rel.type == INTERNAL_R_RISCV_GPREL_I) @@ -657,7 +657,8 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { const Relocation &rel1 = relocs[i + 1]; if (rel.type == R_RISCV_SET_ULEB128 && rel1.type == R_RISCV_SUB_ULEB128 && rel.offset == rel1.offset) { - auto val = rel.sym->getVA(rel.addend) - rel1.sym->getVA(rel1.addend); + auto val = rel.sym->getVA(ctx, rel.addend) - + rel1.sym->getVA(ctx, rel1.addend); if (overwriteULEB128(loc, val) >= 0x80) errorOrWarn(sec.getLocation(rel.offset) + ": ULEB128 value " + Twine(val) + " exceeds available space; references '" + @@ -737,7 +738,7 @@ static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc, const uint64_t insnPair = read64le(sec.content().data() + r.offset); const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7); const uint64_t dest = - (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA()) + r.addend; + (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA(ctx)) + r.addend; const int64_t displace = dest - loc; if (rvc && isInt<12>(displace) && rd == 0) { @@ -757,9 +758,9 @@ static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc, } // Relax local-exec TLS when hi20 is zero. -static void relaxTlsLe(const InputSection &sec, size_t i, uint64_t loc, - Relocation &r, uint32_t &remove) { - uint64_t val = r.sym->getVA(r.addend); +static void relaxTlsLe(Ctx &ctx, const InputSection &sec, size_t i, + uint64_t loc, Relocation &r, uint32_t &remove) { + uint64_t val = r.sym->getVA(ctx, r.addend); if (hi20(val) != 0) return; uint32_t insn = read32le(sec.content().data() + r.offset); @@ -791,7 +792,7 @@ static void relaxHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i, if (!gp) return; - if (!isInt<12>(r.sym->getVA(r.addend) - gp->getVA())) + if (!isInt<12>(r.sym->getVA(ctx, r.addend) - gp->getVA(ctx))) return; switch (r.type) { @@ -851,7 +852,7 @@ static bool relax(Ctx &ctx, InputSection &sec) { case R_RISCV_TPREL_LO12_I: case R_RISCV_TPREL_LO12_S: if (relaxable(relocs, i)) - relaxTlsLe(sec, i, loc, r, remove); + relaxTlsLe(ctx, sec, i, loc, r, remove); break; case R_RISCV_HI20: case R_RISCV_LO12_I: @@ -863,7 +864,7 @@ static bool relax(Ctx &ctx, InputSection &sec) { // For TLSDESC=>LE, we can use the short form if hi20 is zero. tlsdescRelax = relaxable(relocs, i); toLeShortForm = tlsdescRelax && r.expr == R_RELAX_TLS_GD_TO_LE && - !hi20(r.sym->getVA(r.addend)); + !hi20(r.sym->getVA(ctx, r.addend)); [[fallthrough]]; case R_RISCV_TLSDESC_LOAD_LO12: // For TLSDESC=>LE/IE, AUIPC and L[DW] are removed if relaxable. diff --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp index 584379638ad981..106b530c31b28b 100644 --- a/lld/ELF/Arch/SystemZ.cpp +++ b/lld/ELF/Arch/SystemZ.cpp @@ -188,7 +188,7 @@ void SystemZ::writeGotPlt(uint8_t *buf, const Symbol &s) const { void SystemZ::writeIgotPlt(uint8_t *buf, const Symbol &s) const { if (ctx.arg.writeAddends) - write64be(buf, s.getVA()); + write64be(buf, s.getVA(ctx)); } void SystemZ::writePltHeader(uint8_t *buf) const { diff --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp index 58199cdb99a284..a36212a5b1690a 100644 --- a/lld/ELF/Arch/X86.cpp +++ b/lld/ELF/Arch/X86.cpp @@ -181,7 +181,7 @@ void X86::writeGotPlt(uint8_t *buf, const Symbol &s) const { void X86::writeIgotPlt(uint8_t *buf, const Symbol &s) const { // An x86 entry is the address of the ifunc resolver function. - write32le(buf, s.getVA()); + write32le(buf, s.getVA(ctx)); } RelType X86::getDynRel(RelType type) const { diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp index df2983f2022818..d32ba638b740c5 100644 --- a/lld/ELF/Arch/X86_64.cpp +++ b/lld/ELF/Arch/X86_64.cpp @@ -429,7 +429,7 @@ void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const { void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const { // An x86 entry is the address of the ifunc resolver function (for -z rel). if (ctx.arg.writeAddends) - write64le(buf, s.getVA()); + write64le(buf, s.getVA(ctx)); } void X86_64::writePltHeader(uint8_t *buf) const { diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index fb77e67e9fc5ca..0d7712f904dab8 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -2375,7 +2375,7 @@ static void replaceCommonSymbols(Ctx &ctx) { auto *bss = make(ctx, "COMMON", s->size, s->alignment); bss->file = s->file; ctx.inputSections.push_back(bss); - Defined(s->file, StringRef(), s->binding, s->stOther, s->type, + Defined(ctx, s->file, StringRef(), s->binding, s->stOther, s->type, /*value=*/0, s->size, bss) .overwrite(*s); } diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp index 3d02ef8b77abaa..0d3db373138874 100644 --- a/lld/ELF/InputFiles.cpp +++ b/lld/ELF/InputFiles.cpp @@ -1156,14 +1156,14 @@ void ObjFile::initializeSymbols(const object::ELFFile &obj) { fatal(toString(this) + ": common symbol '" + sym->getName() + "' has invalid alignment: " + Twine(value)); hasCommonSyms = true; - sym->resolve(ctx, CommonSymbol{this, StringRef(), binding, stOther, type, - value, size}); + sym->resolve(ctx, CommonSymbol{ctx, this, StringRef(), binding, stOther, + type, value, size}); continue; } // Handle global defined symbols. Defined::section will be set in postParse. - sym->resolve(ctx, Defined{this, StringRef(), binding, stOther, type, value, - size, nullptr}); + sym->resolve(ctx, Defined{ctx, this, StringRef(), binding, stOther, type, + value, size, nullptr}); } // Undefined symbols (excluding those defined relative to non-prevailing @@ -1219,7 +1219,7 @@ void ObjFile::initSectionsAndLocalSyms(bool ignoreComdats) { new (symbols[i]) Undefined(this, name, STB_LOCAL, eSym.st_other, type, /*discardedSecIdx=*/secIdx); else - new (symbols[i]) Defined(this, name, STB_LOCAL, eSym.st_other, type, + new (symbols[i]) Defined(ctx, this, name, STB_LOCAL, eSym.st_other, type, eSym.st_value, eSym.st_size, sec); symbols[i]->partition = 1; symbols[i]->isUsedInRegularObj = true; @@ -1765,11 +1765,12 @@ static void createBitcodeSymbol(Ctx &ctx, Symbol *&sym, } if (objSym.isCommon()) { - sym->resolve(ctx, CommonSymbol{&f, StringRef(), binding, visibility, + sym->resolve(ctx, CommonSymbol{ctx, &f, StringRef(), binding, visibility, STT_OBJECT, objSym.getCommonAlignment(), objSym.getCommonSize()}); } else { - Defined newSym(&f, StringRef(), binding, visibility, type, 0, 0, nullptr); + Defined newSym(ctx, &f, StringRef(), binding, visibility, type, 0, 0, + nullptr); if (objSym.canBeOmittedFromSymbolTable()) newSym.exportDynamic = false; sym->resolve(ctx, newSym); @@ -1849,14 +1850,14 @@ void BinaryFile::parse() { llvm::StringSaver &saver = lld::saver(); - ctx.symtab->addAndCheckDuplicate(ctx, Defined{this, saver.save(s + "_start"), - STB_GLOBAL, STV_DEFAULT, - STT_OBJECT, 0, 0, section}); ctx.symtab->addAndCheckDuplicate( - ctx, Defined{this, saver.save(s + "_end"), STB_GLOBAL, STV_DEFAULT, + ctx, Defined{ctx, this, saver.save(s + "_start"), STB_GLOBAL, STV_DEFAULT, + STT_OBJECT, 0, 0, section}); + ctx.symtab->addAndCheckDuplicate( + ctx, Defined{ctx, this, saver.save(s + "_end"), STB_GLOBAL, STV_DEFAULT, STT_OBJECT, data.size(), 0, section}); ctx.symtab->addAndCheckDuplicate( - ctx, Defined{this, saver.save(s + "_size"), STB_GLOBAL, STV_DEFAULT, + ctx, Defined{ctx, this, saver.save(s + "_size"), STB_GLOBAL, STV_DEFAULT, STT_OBJECT, data.size(), 0, nullptr}); } diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 2e9e8a7007bbf8..3b48fbe07bb082 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -70,8 +70,10 @@ InputSectionBase::InputSectionBase(InputFile *file, uint64_t flags, // If SHF_COMPRESSED is set, parse the header. The legacy .zdebug format is no // longer supported. - if (flags & SHF_COMPRESSED) + if (flags & SHF_COMPRESSED) { + Ctx &ctx = file->ctx; invokeELFT(parseCompressedHeader,); + } } // SHF_INFO_LINK and SHF_GROUP are normally resolved and not copied to the @@ -508,7 +510,8 @@ void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf, } if (RelTy::HasAddend) - p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr; + p->r_addend = + sym.getVA(ctx, addend) - section->getOutputSection()->addr; // For SHF_ALLOC sections relocated by REL, append a relocation to // sec->relocations so that relocateAlloc transitively called by // writeSections will update the implicit addend. Non-SHF_ALLOC sections @@ -701,7 +704,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) { // Variant 1. case EM_ARM: case EM_AARCH64: - return s.getVA(0) + ctx.arg.wordsize * 2 + + return s.getVA(ctx, 0) + ctx.arg.wordsize * 2 + ((tls->p_vaddr - ctx.arg.wordsize * 2) & (tls->p_align - 1)); case EM_MIPS: case EM_PPC: @@ -709,7 +712,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) { // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library // data and 0xf000 of the program's TLS segment. - return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000; + return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000; case EM_LOONGARCH: case EM_RISCV: // See the comment in handleTlsRelocation. For TLSDESC=>IE, @@ -717,7 +720,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) { // `tls` may be null, the return value is ignored. if (s.type != STT_TLS) return 0; - return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)); + return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1)); // Variant 2. case EM_HEXAGON: @@ -725,7 +728,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) { case EM_SPARCV9: case EM_386: case EM_X86_64: - return s.getVA(0) - tls->p_memsz - + return s.getVA(ctx, 0) - tls->p_memsz - ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1)); default: llvm_unreachable("unhandled ctx.arg.emachine"); @@ -743,13 +746,13 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, case R_AARCH64_AUTH: case R_RISCV_ADD: case R_RISCV_LEB128: - return r.sym->getVA(a); + return r.sym->getVA(ctx, a); case R_ADDEND: return a; case R_RELAX_HINT: return 0; case R_ARM_SBREL: - return r.sym->getVA(a) - getARMStaticBase(*r.sym); + return r.sym->getVA(ctx, a) - getARMStaticBase(*r.sym); case R_GOT: case R_RELAX_TLS_GD_TO_IE_ABS: return r.sym->getGotVA(ctx) + a; @@ -767,9 +770,9 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, return ctx.in.gotPlt->getVA() + a - p; case R_GOTREL: case R_PPC64_RELAX_TOC: - return r.sym->getVA(a) - ctx.in.got->getVA(); + return r.sym->getVA(ctx, a) - ctx.in.got->getVA(); case R_GOTPLTREL: - return r.sym->getVA(a) - ctx.in.gotPlt->getVA(); + return r.sym->getVA(ctx, a) - ctx.in.gotPlt->getVA(); case R_GOTPLT: case R_RELAX_TLS_GD_TO_IE_GOTPLT: return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA(); @@ -795,7 +798,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, r.type); return getLoongArchPageDelta(r.sym->getGotVA(ctx) + a, p, r.type); case R_MIPS_GOTREL: - return r.sym->getVA(a) - ctx.in.mipsGot->getGp(file); + return r.sym->getVA(ctx, a) - ctx.in.mipsGot->getGp(file); case R_MIPS_GOT_GP: return ctx.in.mipsGot->getGp(file) + a; case R_MIPS_GOT_GP_PC: { @@ -836,16 +839,16 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) - ctx.in.mipsGot->getGp(file); case R_AARCH64_PAGE_PC: { - uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(a); + uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(ctx, a); return getAArch64Page(val) - getAArch64Page(p); } case R_RISCV_PC_INDIRECT: { if (const Relocation *hiRel = getRISCVPCRelHi20(this, r)) - return getRelocTargetVA(ctx, *hiRel, r.sym->getVA()); + return getRelocTargetVA(ctx, *hiRel, r.sym->getVA(ctx)); return 0; } case R_LOONGARCH_PAGE_PC: - return getLoongArchPageDelta(r.sym->getVA(a), p, r.type); + return getLoongArchPageDelta(r.sym->getVA(ctx, a), p, r.type); case R_PC: case R_ARM_PCA: { uint64_t dest; @@ -868,9 +871,9 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, else if (ctx.arg.emachine == EM_RISCV) dest = getRISCVUndefinedRelativeWeakVA(r.type, p) + a; else - dest = r.sym->getVA(a); + dest = r.sym->getVA(ctx, a); } else { - dest = r.sym->getVA(a); + dest = r.sym->getVA(ctx, a); } return dest - p; } @@ -891,7 +894,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, // target VA computation. return r.sym->getPltVA(ctx) - p; case R_PPC64_CALL: { - uint64_t symVA = r.sym->getVA(a); + uint64_t symVA = r.sym->getVA(ctx, a); // If we have an undefined weak symbol, we might get here with a symbol // address of zero. That could overflow, but the code must be unreachable, // so don't bother doing anything at all. @@ -910,7 +913,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r, return getPPC64TocBase(ctx) + a; case R_RELAX_GOT_PC: case R_PPC64_RELAX_GOT_PC: - return r.sym->getVA(a) - p; + return r.sym->getVA(ctx, a) - p; case R_RELAX_TLS_GD_TO_LE: case R_RELAX_TLS_IE_TO_LE: case R_RELAX_TLS_LD_TO_LE: @@ -1016,8 +1019,8 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf, if (!ds && tombstone) { val = *tombstone; } else { - val = sym.getVA(addend) - - (f->getRelocTargetSym(*it).getVA(0) + getAddend(*it)); + val = sym.getVA(ctx, addend) - + (f->getRelocTargetSym(*it).getVA(ctx) + getAddend(*it)); } if (overwriteULEB128(bufLoc, val) >= 0x80) errorOrWarn(getLocation(offset) + ": ULEB128 value " + Twine(val) + @@ -1083,7 +1086,8 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf, // sections. if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL || expr == R_RISCV_ADD) { - target.relocateNoSym(bufLoc, type, SignExtend64(sym.getVA(addend))); + target.relocateNoSym(bufLoc, type, + SignExtend64(sym.getVA(ctx, addend))); continue; } @@ -1116,7 +1120,7 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf, warn(msg); target.relocateNoSym( bufLoc, type, - SignExtend64(sym.getVA(addend - offset - outSecOff))); + SignExtend64(sym.getVA(ctx, addend - offset - outSecOff))); } } diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp index 0560065ffa4780..d2088b4c648180 100644 --- a/lld/ELF/LinkerScript.cpp +++ b/lld/ELF/LinkerScript.cpp @@ -227,8 +227,8 @@ void LinkerScript::addSymbol(SymbolAssignment *cmd) { // write expressions like this: `alignment = 16; . = ALIGN(., alignment)`. uint64_t symValue = value.sec ? 0 : value.getValue(); - Defined newSym(createInternalFile(ctx, cmd->location), cmd->name, STB_GLOBAL, - visibility, value.type, symValue, 0, sec); + Defined newSym(ctx, createInternalFile(ctx, cmd->location), cmd->name, + STB_GLOBAL, visibility, value.type, symValue, 0, sec); Symbol *sym = ctx.symtab->insert(cmd->name); sym->mergeProperties(newSym); @@ -244,7 +244,7 @@ void LinkerScript::declareSymbol(SymbolAssignment *cmd) { return; uint8_t visibility = cmd->hidden ? STV_HIDDEN : STV_DEFAULT; - Defined newSym(ctx.internalFile, cmd->name, STB_GLOBAL, visibility, + Defined newSym(ctx, ctx.internalFile, cmd->name, STB_GLOBAL, visibility, STT_NOTYPE, 0, 0, nullptr); // If the symbol is already defined, its order is 0 (with absence indicating diff --git a/lld/ELF/MapFile.cpp b/lld/ELF/MapFile.cpp index 6bbc1ecc646fdd..afaf04dc72fe6c 100644 --- a/lld/ELF/MapFile.cpp +++ b/lld/ELF/MapFile.cpp @@ -65,10 +65,10 @@ static std::vector getSymbols(Ctx &ctx) { } // Returns a map from sections to their symbols. -static SymbolMapTy getSectionSyms(ArrayRef syms) { +static SymbolMapTy getSectionSyms(Ctx &ctx, ArrayRef syms) { SymbolMapTy ret; for (Defined *dr : syms) - ret[dr->section].emplace_back(dr, dr->getVA()); + ret[dr->section].emplace_back(dr, dr->getVA(ctx)); // Sort symbols by address. We want to print out symbols in the // order in the output file rather than the order they appeared @@ -95,7 +95,7 @@ getSymbolStrings(Ctx &ctx, ArrayRef syms) { parallelFor(0, syms.size(), [&](size_t i) { raw_string_ostream os(strs[i]); OutputSection *osec = syms[i]->getOutputSection(); - uint64_t vma = syms[i]->getVA(); + uint64_t vma = syms[i]->getVA(ctx); uint64_t lma = osec ? osec->getLMA() + vma - osec->getVA(0) : 0; writeHeader(ctx, os, vma, lma, syms[i]->getSize(), 1); os << indent16 << toString(*syms[i]); @@ -149,7 +149,7 @@ static void printEhFrame(Ctx &ctx, raw_ostream &os, const EhFrameSection *sec) { static void writeMapFile(Ctx &ctx, raw_fd_ostream &os) { // Collect symbol info that we want to print out. std::vector syms = getSymbols(ctx); - SymbolMapTy sectionSyms = getSectionSyms(syms); + SymbolMapTy sectionSyms = getSectionSyms(ctx, syms); DenseMap symStr = getSymbolStrings(ctx, syms); // Print out the header line. diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp index 309039fe7e204a..6cae7cf8f8599d 100644 --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -624,7 +624,7 @@ encodeOneCrel(Ctx &ctx, raw_svector_ostream &os, if (d) { SectionBase *section = d->section; assert(section->isLive()); - addend = sym.getVA(addend) - section->getOutputSection()->addr; + addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr; } else { // Encode R_*_NONE(symidx=0). symidx = type = addend = 0; @@ -882,7 +882,11 @@ void OutputSection::checkDynRelAddends(Ctx &ctx) { // for input .rel[a]. sections which we simply pass through to the // output. We skip over those and only look at the synthetic relocation // sections created during linking. - const auto *sec = dyn_cast(sections[i]); + if (!SyntheticSection::classof(sections[i]) || + !is_contained({ELF::SHT_REL, ELF::SHT_RELA, ELF::SHT_RELR}, + sections[i]->type)) + return; + const auto *sec = cast(sections[i]); if (!sec) return; for (const DynamicReloc &rel : sec->relocs) { diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp index 0188d658f9210e..d40348a7b30d8f 100644 --- a/lld/ELF/Relocations.cpp +++ b/lld/ELF/Relocations.cpp @@ -315,10 +315,10 @@ static SmallSet getSymbolsAt(Ctx &ctx, SharedSymbol &ss) { // in .bss and in the case of a canonical plt entry it is in .plt. This function // replaces the existing symbol with a Defined pointing to the appropriate // location. -static void replaceWithDefined(Symbol &sym, SectionBase &sec, uint64_t value, - uint64_t size) { +static void replaceWithDefined(Ctx &ctx, Symbol &sym, SectionBase &sec, + uint64_t value, uint64_t size) { Symbol old = sym; - Defined(sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, + Defined(ctx, sym.file, StringRef(), sym.binding, sym.stOther, sym.type, value, size, &sec) .overwrite(sym); @@ -398,7 +398,7 @@ template static void addCopyRelSymbol(Ctx &ctx, SharedSymbol &ss) { // dynamic symbol for each one. This causes the copy relocation to correctly // interpose any aliases. for (SharedSymbol *sym : getSymbolsAt(ctx, ss)) - replaceWithDefined(*sym, *sec, 0, sym->size); + replaceWithDefined(ctx, *sym, *sec, 0, sym->size); ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->copyRel, *sec, 0, ss); } @@ -1807,7 +1807,7 @@ void elf::postScanRelocations(Ctx &ctx) { } else { assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT)); if (!sym.isDefined()) { - replaceWithDefined(sym, *ctx.in.plt, + replaceWithDefined(ctx, sym, *ctx.in.plt, ctx.target->pltHeaderSize + ctx.target->pltEntrySize * sym.getPltIdx(ctx), 0); @@ -2257,7 +2257,7 @@ std::pair ThunkCreator::getThunk(InputSection *isec, if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) && t->isCompatibleWith(*isec, rel) && ctx.target->inBranchRange(rel.type, src, - t->getThunkTargetSym()->getVA(-pcBias))) + t->getThunkTargetSym()->getVA(ctx, -pcBias))) return std::make_pair(t, false); // No existing compatible Thunk in range, create a new one @@ -2281,7 +2281,8 @@ std::pair ThunkCreator::getSyntheticLandingPad(Defined &d, // relocation back to its original non-Thunk target. bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) { if (Thunk *t = thunks.lookup(rel.sym)) { - if (ctx.target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend))) + if (ctx.target->inBranchRange(rel.type, src, + rel.sym->getVA(ctx, rel.addend))) return true; rel.sym = &t->destination; rel.addend = t->addend; diff --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp index 3caa609338e068..da35bf858cb371 100644 --- a/lld/ELF/Symbols.cpp +++ b/lld/ELF/Symbols.cpp @@ -44,13 +44,13 @@ LLVM_ATTRIBUTE_UNUSED static inline void assertSymbols() { } // Returns a symbol for an error message. -static std::string maybeDemangleSymbol(StringRef symName) { - return elf::ctx.arg.demangle ? demangle(symName.str()) : symName.str(); +static std::string maybeDemangleSymbol(Ctx &ctx, StringRef symName) { + return ctx.arg.demangle ? demangle(symName.str()) : symName.str(); } std::string lld::toString(const elf::Symbol &sym) { StringRef name = sym.getName(); - std::string ret = maybeDemangleSymbol(name); + std::string ret = maybeDemangleSymbol(ctx, name); const char *suffix = sym.getVersionSuffix(); if (*suffix == '@') @@ -58,7 +58,7 @@ std::string lld::toString(const elf::Symbol &sym) { return ret; } -static uint64_t getSymVA(const Symbol &sym, int64_t addend) { +static uint64_t getSymVA(Ctx &ctx, const Symbol &sym, int64_t addend) { switch (sym.kind()) { case Symbol::DefinedKind: { auto &d = cast(sym); @@ -141,8 +141,8 @@ static uint64_t getSymVA(const Symbol &sym, int64_t addend) { llvm_unreachable("invalid symbol kind"); } -uint64_t Symbol::getVA(int64_t addend) const { - return getSymVA(*this, addend) + addend; +uint64_t Symbol::getVA(Ctx &ctx, int64_t addend) const { + return getSymVA(ctx, *this, addend) + addend; } uint64_t Symbol::getGotVA(Ctx &ctx) const { @@ -617,7 +617,7 @@ void Symbol::resolve(Ctx &ctx, const LazySymbol &other) { // For common objects, we want to look for global or weak definitions that // should be extracted as the canonical definition instead. - if (LLVM_UNLIKELY(isCommon()) && elf::ctx.arg.fortranCommon && + if (LLVM_UNLIKELY(isCommon()) && ctx.arg.fortranCommon && other.file->shouldExtractForCommon(getName())) { ctx.backwardReferences.erase(this); other.overwrite(*this); diff --git a/lld/ELF/Symbols.h b/lld/ELF/Symbols.h index 010ae9742378b9..339f32e05f1625 100644 --- a/lld/ELF/Symbols.h +++ b/lld/ELF/Symbols.h @@ -210,7 +210,7 @@ class Symbol { bool isInGot(Ctx &ctx) const { return getGotIdx(ctx) != uint32_t(-1); } bool isInPlt(Ctx &ctx) const { return getPltIdx(ctx) != uint32_t(-1); } - uint64_t getVA(int64_t addend = 0) const; + uint64_t getVA(Ctx &, int64_t addend = 0) const; uint64_t getGotOffset(Ctx &) const; uint64_t getGotVA(Ctx &) const; @@ -363,8 +363,9 @@ class Symbol { // Represents a symbol that is defined in the current output file. class Defined : public Symbol { public: - Defined(InputFile *file, StringRef name, uint8_t binding, uint8_t stOther, - uint8_t type, uint64_t value, uint64_t size, SectionBase *section) + Defined(Ctx &ctx, InputFile *file, StringRef name, uint8_t binding, + uint8_t stOther, uint8_t type, uint64_t value, uint64_t size, + SectionBase *section) : Symbol(DefinedKind, file, name, binding, stOther, type), value(value), size(size), section(section) { exportDynamic = ctx.arg.exportDynamic; @@ -401,7 +402,7 @@ class Defined : public Symbol { // section. (Therefore, the later passes don't see any CommonSymbols.) class CommonSymbol : public Symbol { public: - CommonSymbol(InputFile *file, StringRef name, uint8_t binding, + CommonSymbol(Ctx &ctx, InputFile *file, StringRef name, uint8_t binding, uint8_t stOther, uint8_t type, uint64_t alignment, uint64_t size) : Symbol(CommonKind, file, name, binding, stOther, type), alignment(alignment), size(size) { diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp index e18e7a32df86c7..7a344635a1cb53 100644 --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -276,8 +276,8 @@ InputSection *elf::createInterpSection(Ctx &ctx) { Defined *elf::addSyntheticLocal(Ctx &ctx, StringRef name, uint8_t type, uint64_t value, uint64_t size, InputSectionBase §ion) { - Defined *s = makeDefined(section.file, name, STB_LOCAL, STV_DEFAULT, type, - value, size, §ion); + Defined *s = makeDefined(ctx, section.file, name, STB_LOCAL, STV_DEFAULT, + type, value, size, §ion); if (ctx.in.symTab) ctx.in.symTab->addSymbol(s); @@ -749,7 +749,7 @@ void MipsGotSection::addEntry(InputFile &file, Symbol &sym, int64_t addend, if (const OutputSection *os = sym.getOutputSection()) g.pagesMap.insert({os, {}}); else - g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(addend))}, 0}); + g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(ctx, addend))}, 0}); } else if (sym.isTls()) g.tls.insert({&sym, 0}); else if (sym.isPreemptible && expr == R_ABS) @@ -808,10 +808,11 @@ uint64_t MipsGotSection::getPageEntryOffset(const InputFile *f, uint64_t index = 0; if (const OutputSection *outSec = sym.getOutputSection()) { uint64_t secAddr = getMipsPageAddr(outSec->addr); - uint64_t symAddr = getMipsPageAddr(sym.getVA(addend)); + uint64_t symAddr = getMipsPageAddr(sym.getVA(ctx, addend)); index = g.pagesMap.lookup(outSec).firstIndex + (symAddr - secAddr) / 0xffff; } else { - index = g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(addend))}); + index = + g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(ctx, addend))}); } return index * ctx.arg.wordsize; } @@ -1099,7 +1100,7 @@ uint64_t MipsGotSection::getGp(const InputFile *f) const { // returns "common" _gp value. For secondary GOTs calculate // individual _gp values. if (!f || f->mipsGotIndex == uint32_t(-1) || f->mipsGotIndex == 0) - return ctx.sym.mipsGp->getVA(0); + return ctx.sym.mipsGp->getVA(ctx, 0); return getVA() + gots[f->mipsGotIndex].startIndex * ctx.arg.wordsize + 0x7ff0; } @@ -1124,7 +1125,7 @@ void MipsGotSection::writeTo(uint8_t *buf) { auto write = [&](size_t i, const Symbol *s, int64_t a) { uint64_t va = a; if (s) - va = s->getVA(a); + va = s->getVA(ctx, a); writeUint(ctx, buf + i * ctx.arg.wordsize, va); }; // Write 'page address' entries to the local part of the GOT. @@ -1522,10 +1523,10 @@ DynamicSection::computeContents() { if (Symbol *b = ctx.symtab->find(ctx.arg.init)) if (b->isDefined()) - addInt(DT_INIT, b->getVA()); + addInt(DT_INIT, b->getVA(ctx)); if (Symbol *b = ctx.symtab->find(ctx.arg.fini)) if (b->isDefined()) - addInt(DT_FINI, b->getVA()); + addInt(DT_FINI, b->getVA(ctx)); } if (part.verSym && part.verSym->isNeeded()) @@ -2288,7 +2289,7 @@ template void SymbolTableSection::writeTo(uint8_t *buf) { const uint32_t shndx = getSymSectionIndex(sym); if (isDefinedHere) { eSym->st_shndx = shndx; - eSym->st_value = sym->getVA(); + eSym->st_value = sym->getVA(ctx); // Copy symbol size if it is a defined symbol. st_size is not // significant for undefined symbols, so whether copying it or not is up // to us if that's the case. We'll leave it as zero because by not @@ -3241,7 +3242,7 @@ void DebugNamesSection::getNameRelocs( Relocs rels) { for (const RelTy &rel : rels) { Symbol &sym = file.getRelocTargetSym(rel); - relocs[rel.r_offset] = sym.getVA(getAddend(rel)); + relocs[rel.r_offset] = sym.getVA(ctx, getAddend(rel)); } } @@ -4356,11 +4357,11 @@ void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) { for (auto entry : entries) { const Symbol *sym = entry.first; int64_t addend = entry.second; - assert(sym->getVA()); + assert(sym->getVA(ctx)); // Need calls to branch to the local entry-point since a long-branch // must be a local-call. write64(ctx, buf, - sym->getVA(addend) + + sym->getVA(ctx, addend) + getPPC64GlobalEntryToLocalEntryOffset(sym->stOther)); buf += 8; } @@ -4616,7 +4617,7 @@ createMemtagGlobalDescriptors(Ctx &ctx, for (const Symbol *sym : symbols) { if (!includeInSymtab(ctx, *sym)) continue; - const uint64_t addr = sym->getVA(); + const uint64_t addr = sym->getVA(ctx); const uint64_t size = sym->getSize(); if (addr <= kMemtagGranuleSize && buf != nullptr) @@ -4653,8 +4654,8 @@ createMemtagGlobalDescriptors(Ctx &ctx, bool MemtagGlobalDescriptors::updateAllocSize(Ctx &ctx) { size_t oldSize = getSize(); std::stable_sort(symbols.begin(), symbols.end(), - [](const Symbol *s1, const Symbol *s2) { - return s1->getVA() < s2->getVA(); + [&ctx = ctx](const Symbol *s1, const Symbol *s2) { + return s1->getVA(ctx) < s2->getVA(ctx); }); return oldSize != getSize(); } @@ -4681,8 +4682,8 @@ static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec, if (!s || s->isDefined() || s->isCommon()) return nullptr; - s->resolve(ctx, Defined{ctx.internalFile, StringRef(), STB_GLOBAL, stOther, - STT_NOTYPE, val, + s->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL, + stOther, STT_NOTYPE, val, /*size=*/0, sec}); s->isUsedInRegularObj = true; return cast(s); diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h index d64c4aad8c552b..3573767671feb1 100644 --- a/lld/ELF/SyntheticSections.h +++ b/lld/ELF/SyntheticSections.h @@ -547,13 +547,7 @@ class RelocationBaseSection : public SyntheticSection { void mergeRels(); void partitionRels(); void finalizeContents() override; - static bool classof(const SectionBase *d) { - return SyntheticSection::classof(d) && - (d->type == llvm::ELF::SHT_RELA || d->type == llvm::ELF::SHT_REL || - d->type == llvm::ELF::SHT_RELR || - (d->type == llvm::ELF::SHT_AARCH64_AUTH_RELR && - elf::ctx.arg.emachine == llvm::ELF::EM_AARCH64)); - } + int32_t dynamicTag, sizeDynamicTag; SmallVector relocs; diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp index 971b2724b3e26f..94c0b2409c6c7c 100644 --- a/lld/ELF/Thunks.cpp +++ b/lld/ELF/Thunks.cpp @@ -464,7 +464,7 @@ class PPC64R2SaveStub final : public Thunk { // This is similar to the handling for ARMThunk. bool mayUseShortThunk = true; int64_t computeOffset() const { - return destination.getVA() - (getThunkTargetSym()->getVA() + 4); + return destination.getVA(ctx) - (getThunkTargetSym()->getVA(ctx) + 4); } }; @@ -550,7 +550,7 @@ void Thunk::setOffset(uint64_t newOffset) { // AArch64 Thunk base class. static uint64_t getAArch64ThunkDestVA(Ctx &ctx, const Symbol &s, int64_t a) { - uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(a); + uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(ctx, a); return v; } @@ -558,7 +558,7 @@ bool AArch64Thunk::getMayUseShortThunk() { if (!mayUseShortThunk) return false; uint64_t s = getAArch64ThunkDestVA(ctx, destination, addend); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); mayUseShortThunk = llvm::isInt<28>(s - p); return mayUseShortThunk; } @@ -569,7 +569,7 @@ void AArch64Thunk::writeTo(uint8_t *buf) { return; } uint64_t s = getAArch64ThunkDestVA(ctx, destination, addend); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); write32(ctx, buf, 0x14000000); // b S ctx.target->relocateNoSym(buf, R_AARCH64_CALL26, s - p); } @@ -592,7 +592,7 @@ void AArch64ABSLongThunk::writeLong(uint8_t *buf) { // AArch64BTILandingPadThunk that defines landingPad. assert(!mayNeedLandingPad || landingPad != nullptr); uint64_t s = mayNeedLandingPad - ? landingPad->getVA(0) + ? landingPad->getVA(ctx, 0) : getAArch64ThunkDestVA(ctx, destination, addend); memcpy(buf, data, sizeof(data)); ctx.target->relocateNoSym(buf + 8, R_AARCH64_ABS64, s); @@ -621,9 +621,9 @@ void AArch64ADRPThunk::writeLong(uint8_t *buf) { // AArch64BTILandingPadThunk that defines landingPad. assert(!mayNeedLandingPad || landingPad != nullptr); uint64_t s = mayNeedLandingPad - ? landingPad->getVA(0) + ? landingPad->getVA(ctx, 0) : getAArch64ThunkDestVA(ctx, destination, addend); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); memcpy(buf, data, sizeof(data)); ctx.target->relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21, getAArch64Page(s) - getAArch64Page(p)); @@ -656,8 +656,8 @@ bool AArch64BTILandingPadThunk::getMayUseShortThunk() { return false; // If the target is the following instruction then we can fall // through without the indirect branch. - uint64_t s = destination.getVA(addend); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t s = destination.getVA(ctx, addend); + uint64_t p = getThunkTargetSym()->getVA(ctx); // This function is called before addresses are stable. We need to // work out the range from the thunk to the next section but the // address of the start of the next section depends on the size of @@ -670,8 +670,8 @@ bool AArch64BTILandingPadThunk::getMayUseShortThunk() { } void AArch64BTILandingPadThunk::writeLong(uint8_t *buf) { - uint64_t s = destination.getVA(addend); - uint64_t p = getThunkTargetSym()->getVA() + 4; + uint64_t s = destination.getVA(ctx, addend); + uint64_t p = getThunkTargetSym()->getVA(ctx) + 4; write32(ctx, buf, 0xd503245f); // BTI c write32(ctx, buf + 4, 0x14000000); // B S ctx.target->relocateNoSym(buf + 4, R_AARCH64_CALL26, s - p); @@ -679,7 +679,7 @@ void AArch64BTILandingPadThunk::writeLong(uint8_t *buf) { // ARM Target Thunks static uint64_t getARMThunkDestVA(Ctx &ctx, const Symbol &s) { - uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(); + uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(ctx); return SignExtend64<32>(v); } @@ -693,7 +693,7 @@ bool ARMThunk::getMayUseShortThunk() { mayUseShortThunk = false; return false; } - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); int64_t offset = s - p - 8; mayUseShortThunk = llvm::isInt<26>(offset); return mayUseShortThunk; @@ -706,7 +706,7 @@ void ARMThunk::writeTo(uint8_t *buf) { } uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); int64_t offset = s - p - 8; write32(ctx, buf, 0xea000000); // b S ctx.target->relocateNoSym(buf, R_ARM_JUMP24, offset); @@ -736,7 +736,7 @@ bool ThumbThunk::getMayUseShortThunk() { mayUseShortThunk = false; return false; } - uint64_t p = getThunkTargetSym()->getVA() & ~1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~1; int64_t offset = s - p - 4; mayUseShortThunk = llvm::isInt<25>(offset); return mayUseShortThunk; @@ -749,7 +749,7 @@ void ThumbThunk::writeTo(uint8_t *buf) { } uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); int64_t offset = s - p - 4; write16(ctx, buf + 0, 0xf000); // b.w S write16(ctx, buf + 2, 0xb000); @@ -806,7 +806,7 @@ void ARMV7PILongThunk::writeLong(uint8_t *buf) { write32(ctx, buf + 8, 0xe08cc00f); // L1: add ip, ip, pc write32(ctx, buf + 12, 0xe12fff1c); // bx ip uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t p = getThunkTargetSym()->getVA(ctx); int64_t offset = s - p - 16; ctx.target->relocateNoSym(buf, R_ARM_MOVW_PREL_NC, offset); ctx.target->relocateNoSym(buf + 4, R_ARM_MOVT_PREL, offset); @@ -826,7 +826,7 @@ void ThumbV7PILongThunk::writeLong(uint8_t *buf) { write16(ctx, buf + 8, 0x44fc); // L1: add ip, pc write16(ctx, buf + 10, 0x4760); // bx ip uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; int64_t offset = s - p - 12; ctx.target->relocateNoSym(buf, R_ARM_THM_MOVW_PREL_NC, offset); ctx.target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_PREL, offset); @@ -904,7 +904,7 @@ void ThumbV6MPILongThunk::writeLong(uint8_t *buf) { 0x46c0); // nop ; pad to 4-byte boundary write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 4) uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12); } @@ -992,7 +992,7 @@ void ARMV4PILongBXThunk::writeLong(uint8_t *buf) { write32(ctx, buf + 8, 0xe12fff1c); // bx ip write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8) uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12); } @@ -1009,7 +1009,7 @@ void ARMV4PILongThunk::writeLong(uint8_t *buf) { write32(ctx, buf + 4, 0xe08ff00c); // L1: add pc, pc, r12 write32(ctx, buf + 8, 0x00000000); // L2: .word S - (P + (L1 - P) + 8) uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; ctx.target->relocateNoSym(buf + 8, R_ARM_REL32, s - p - 12); } @@ -1029,7 +1029,7 @@ void ThumbV4PILongBXThunk::writeLong(uint8_t *buf) { write32(ctx, buf + 8, 0xe08cf00f); // L1: add pc, r12, pc write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8) uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 16); } @@ -1051,7 +1051,7 @@ void ThumbV4PILongThunk::writeLong(uint8_t *buf) { write32(ctx, buf + 12, 0xe12fff1c); // bx ip write32(ctx, buf + 16, 0x00000000); // L2: .word S - (P + (L1 - P) + 8) uint64_t s = getARMThunkDestVA(ctx, destination); - uint64_t p = getThunkTargetSym()->getVA() & ~0x1; + uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1; ctx.target->relocateNoSym(buf + 16, R_ARM_REL32, s - p - 16); } @@ -1067,7 +1067,7 @@ void ThumbV4PILongThunk::addSymbols(ThunkSection &isec) { // Use the long jump which covers a range up to 8MiB. void AVRThunk::writeTo(uint8_t *buf) { write32(ctx, buf, 0x940c); // jmp func - ctx.target->relocateNoSym(buf, R_AVR_CALL, destination.getVA()); + ctx.target->relocateNoSym(buf, R_AVR_CALL, destination.getVA(ctx)); } void AVRThunk::addSymbols(ThunkSection &isec) { @@ -1077,7 +1077,7 @@ void AVRThunk::addSymbols(ThunkSection &isec) { // Write MIPS LA25 thunk code to call PIC function from the non-PIC one. void MipsThunk::writeTo(uint8_t *buf) { - uint64_t s = destination.getVA(); + uint64_t s = destination.getVA(ctx); write32(ctx, buf, 0x3c190000); // lui $25, %hi(func) write32(ctx, buf + 4, 0x08000000 | (s >> 2)); // j func write32(ctx, buf + 8, 0x27390000); // addiu $25, $25, %lo(func) @@ -1099,7 +1099,7 @@ InputSection *MipsThunk::getTargetInputSection() const { // Write microMIPS R2-R5 LA25 thunk code // to call PIC function from the non-PIC one. void MicroMipsThunk::writeTo(uint8_t *buf) { - uint64_t s = destination.getVA(); + uint64_t s = destination.getVA(ctx); write16(ctx, buf, 0x41b9); // lui $25, %hi(func) write16(ctx, buf + 4, 0xd400); // j func write16(ctx, buf + 8, 0x3339); // addiu $25, $25, %lo(func) @@ -1124,8 +1124,8 @@ InputSection *MicroMipsThunk::getTargetInputSection() const { // Write microMIPS R6 LA25 thunk code // to call PIC function from the non-PIC one. void MicroMipsR6Thunk::writeTo(uint8_t *buf) { - uint64_t s = destination.getVA(); - uint64_t p = getThunkTargetSym()->getVA(); + uint64_t s = destination.getVA(ctx); + uint64_t p = getThunkTargetSym()->getVA(ctx); write16(ctx, buf, 0x1320); // lui $25, %hi(func) write16(ctx, buf + 4, 0x3339); // addiu $25, $25, %lo(func) write16(ctx, buf + 8, 0x9400); // bc func @@ -1213,9 +1213,9 @@ void PPC32LongThunk::addSymbols(ThunkSection &isec) { void PPC32LongThunk::writeTo(uint8_t *buf) { auto ha = [](uint32_t v) -> uint16_t { return (v + 0x8000) >> 16; }; auto lo = [](uint32_t v) -> uint16_t { return v; }; - uint32_t d = destination.getVA(addend); + uint32_t d = destination.getVA(ctx, addend); if (ctx.arg.isPic) { - uint32_t off = d - (getThunkTargetSym()->getVA() + 8); + uint32_t off = d - (getThunkTargetSym()->getVA(ctx) + 8); write32(ctx, buf + 0, 0x7c0802a6); // mflr r12,0 write32(ctx, buf + 4, 0x429f0005); // bcl r20,r31,.+4 write32(ctx, buf + 8, 0x7d8802a6); // mtctr r12 @@ -1269,7 +1269,7 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) { write32(ctx, buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b } else if (isInt<34>(offset)) { int nextInstOffset; - uint64_t tocOffset = destination.getVA() - getPPC64TocBase(ctx); + uint64_t tocOffset = destination.getVA(ctx) - getPPC64TocBase(ctx); if (tocOffset >> 16 > 0) { const uint64_t addi = ADDI_R12_TO_R12_NO_DISP | (tocOffset & 0xffff); const uint64_t addis = @@ -1306,8 +1306,8 @@ bool PPC64R2SaveStub::isCompatibleWith(const InputSection &isec, void PPC64R12SetupStub::writeTo(uint8_t *buf) { int64_t offset = - (gotPlt ? destination.getGotPltVA(ctx) : destination.getVA()) - - getThunkTargetSym()->getVA(); + (gotPlt ? destination.getGotPltVA(ctx) : destination.getVA(ctx)) - + getThunkTargetSym()->getVA(ctx); if (!isInt<34>(offset)) reportRangeError(ctx, buf, offset, 34, destination, "R12 setup stub offset"); @@ -1393,7 +1393,7 @@ static Thunk *addThunkAArch64(Ctx &ctx, RelType type, Symbol &s, int64_t a) { // TODO: use B for short Thumb->Arm thunks instead of LDR (this doesn't work for // Arm->Thumb, as in Arm state no BX PC trick; it doesn't switch state). static Thunk *addThunkArmv4(Ctx &ctx, RelType reloc, Symbol &s, int64_t a) { - bool thumb_target = s.getVA(a) & 1; + bool thumb_target = s.getVA(ctx, a) & 1; switch (reloc) { case R_ARM_PC24: diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp index 2cd4478d00cf5d..ecd4f5e470833c 100644 --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -145,8 +145,8 @@ static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec, if (!s || s->isDefined() || s->isCommon()) return nullptr; - s->resolve(ctx, Defined{ctx.internalFile, StringRef(), STB_GLOBAL, stOther, - STT_NOTYPE, val, + s->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL, + stOther, STT_NOTYPE, val, /*size=*/0, sec}); s->isUsedInRegularObj = true; return cast(s); @@ -158,7 +158,7 @@ void elf::addReservedSymbols(Ctx &ctx) { if (ctx.arg.emachine == EM_MIPS) { auto addAbsolute = [&](StringRef name) { Symbol *sym = - ctx.symtab->addSymbol(Defined{ctx.internalFile, name, STB_GLOBAL, + ctx.symtab->addSymbol(Defined{ctx, ctx.internalFile, name, STB_GLOBAL, STV_HIDDEN, STT_NOTYPE, 0, 0, nullptr}); sym->isUsedInRegularObj = true; return cast(sym); @@ -211,9 +211,9 @@ void elf::addReservedSymbols(Ctx &ctx) { if (ctx.arg.emachine == EM_PPC64) gotOff = 0x8000; - s->resolve(ctx, - Defined{ctx.internalFile, StringRef(), STB_GLOBAL, STV_HIDDEN, - STT_NOTYPE, gotOff, /*size=*/0, ctx.out.elfHeader}); + s->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL, + STV_HIDDEN, STT_NOTYPE, gotOff, /*size=*/0, + ctx.out.elfHeader}); ctx.sym.globalOffsetTable = cast(s); } @@ -534,7 +534,7 @@ template void Writer::addSectionSymbols() { // Set the symbol to be relative to the output section so that its st_value // equals the output section address. Note, there may be a gap between the // start of the output section and isec. - ctx.in.symTab->addSymbol(makeDefined(isec->file, "", STB_LOCAL, + ctx.in.symTab->addSymbol(makeDefined(ctx, isec->file, "", STB_LOCAL, /*stOther=*/0, STT_SECTION, /*value=*/0, /*size=*/0, &osec)); } @@ -1504,9 +1504,9 @@ template void Writer::finalizeAddressDependentContent() { // .rela.dyn. See also AArch64::relocate. if (part.relrAuthDyn) { auto it = llvm::remove_if( - part.relrAuthDyn->relocs, [&part](const RelativeReloc &elem) { + part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) { const Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx]; - if (isInt<32>(reloc.sym->getVA(reloc.addend))) + if (isInt<32>(reloc.sym->getVA(ctx, reloc.addend))) return false; part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, elem.inputSec, reloc.offset, @@ -1690,10 +1690,9 @@ static void removeUnusedSyntheticSections(Ctx &ctx) { // finalizeAddressDependentContent, making .rela.dyn no longer empty. // Conservatively keep .rela.dyn. .relr.auth.dyn can be made empty, but // we would fail to remove it here. - if (ctx.arg.emachine == EM_AARCH64 && ctx.arg.relrPackDynRelocs) - if (auto *relSec = dyn_cast(sec)) - if (relSec == ctx.mainPart->relaDyn.get()) - return false; + if (ctx.arg.emachine == EM_AARCH64 && ctx.arg.relrPackDynRelocs && + sec == ctx.mainPart->relaDyn.get()) + return false; unused.insert(sec); return true; }); @@ -1734,7 +1733,7 @@ template void Writer::finalizeSections() { // https://sourceware.org/ml/binutils/2002-03/msg00360.html if (ctx.mainPart->dynamic->parent) { Symbol *s = ctx.symtab->addSymbol(Defined{ - ctx.internalFile, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE, + ctx, ctx.internalFile, "_DYNAMIC", STB_WEAK, STV_HIDDEN, STT_NOTYPE, /*value=*/0, /*size=*/0, ctx.mainPart->dynamic.get()}); s->isUsedInRegularObj = true; } @@ -1775,7 +1774,7 @@ template void Writer::finalizeSections() { // define _TLS_MODULE_BASE_ relative to the first TLS section. Symbol *s = ctx.symtab->find("_TLS_MODULE_BASE_"); if (s && s->isUndefined()) { - s->resolve(ctx, Defined{ctx.internalFile, StringRef(), STB_GLOBAL, + s->resolve(ctx, Defined{ctx, ctx.internalFile, StringRef(), STB_GLOBAL, STV_HIDDEN, STT_TLS, /*value=*/0, 0, /*section=*/nullptr}); ctx.sym.tlsModuleBase = cast(s); @@ -2713,7 +2712,7 @@ template void Writer::checkSections() { static uint64_t getEntryAddr(Ctx &ctx) { // Case 1, 2 or 3 if (Symbol *b = ctx.symtab->find(ctx.arg.entry)) - return b->getVA(); + return b->getVA(ctx); // Case 4 uint64_t addr; diff --git a/lldb/include/lldb/Core/Module.h b/lldb/include/lldb/Core/Module.h index 5589c1c9a350dc..23257e429ad0d6 100644 --- a/lldb/include/lldb/Core/Module.h +++ b/lldb/include/lldb/Core/Module.h @@ -30,6 +30,7 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLFunctionalExtras.h" +#include "llvm/ADT/StableHashing.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Chrono.h" @@ -1057,8 +1058,11 @@ class Module : public std::enable_shared_from_this, /// time for the symbol tables can be aggregated here. StatsDuration m_symtab_index_time; - std::once_flag m_optimization_warning; - std::once_flag m_language_warning; + /// A set of hashes of all warnings and errors, to avoid reporting them + /// multiple times to the same Debugger. + llvm::DenseMap> + m_shown_diagnostics; + std::recursive_mutex m_diagnostic_mutex; void SymbolIndicesToSymbolContextList(Symtab *symtab, std::vector &symbol_indexes, @@ -1086,6 +1090,7 @@ class Module : public std::enable_shared_from_this, void ReportWarning(const llvm::formatv_object_base &payload); void ReportError(const llvm::formatv_object_base &payload); void ReportErrorIfModifyDetected(const llvm::formatv_object_base &payload); + std::once_flag *GetDiagnosticOnceFlag(llvm::StringRef msg); }; } // namespace lldb_private diff --git a/lldb/include/lldb/Symbol/CompilerType.h b/lldb/include/lldb/Symbol/CompilerType.h index 70dacdcb7986fc..096a8f1ab68e8b 100644 --- a/lldb/include/lldb/Symbol/CompilerType.h +++ b/lldb/include/lldb/Symbol/CompilerType.h @@ -279,6 +279,8 @@ class CompilerType { ConstString GetDisplayTypeName() const; + ConstString GetMangledTypeName() const; + uint32_t GetTypeInfo(CompilerType *pointee_or_element_compiler_type = nullptr) const; diff --git a/lldb/include/lldb/Symbol/TypeSystem.h b/lldb/include/lldb/Symbol/TypeSystem.h index 7d48f9b316138c..416445a60bd017 100644 --- a/lldb/include/lldb/Symbol/TypeSystem.h +++ b/lldb/include/lldb/Symbol/TypeSystem.h @@ -237,6 +237,10 @@ class TypeSystem : public PluginInterface, virtual ConstString GetDisplayTypeName(lldb::opaque_compiler_type_t type) = 0; + // Defaults to GetTypeName(type). Override if your language desires + // specialized behavior. + virtual ConstString GetMangledTypeName(lldb::opaque_compiler_type_t type); + virtual uint32_t GetTypeInfo(lldb::opaque_compiler_type_t type, CompilerType *pointee_or_element_compiler_type) = 0; diff --git a/lldb/include/lldb/Target/Language.h b/lldb/include/lldb/Target/Language.h index 41d8eeef469eab..c9cddee6baa2da 100644 --- a/lldb/include/lldb/Target/Language.h +++ b/lldb/include/lldb/Target/Language.h @@ -363,6 +363,15 @@ class Language : public PluginInterface { return false; } + /// Returns a boolean indicating whether two symbol contexts are equal for the + /// purposes of frame comparison. If the plugin has no opinion, it should + /// return nullopt. + virtual std::optional + AreEqualForFrameComparison(const SymbolContext &sc1, + const SymbolContext &sc2) const { + return {}; + } + /// Returns true if this Language supports exception breakpoints on throw via /// a corresponding LanguageRuntime plugin. virtual bool SupportsExceptionBreakpointsOnThrow() const { return false; } diff --git a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules index f81db9bc06d8a8..d0045ac9f91a77 100644 --- a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules +++ b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules @@ -13,6 +13,13 @@ # the building of the a.out executable program. For example, # DYLIB_ONLY := YES # +# When specifying one of the DYLIB_*_SOURCES variables, DYLIB_NAME +# controls the (platform-dependent) name of the produced dylib. E.g., +# on Darwin, if "DYLIB_NAME := foo", the generated dylib will be called +# "libfoo.dylib". +# +# DYLIB_NAME := foo +# # Specifying FRAMEWORK and its variants has the effect of building a NeXT-style # framework. # FRAMEWORK := "Foo" diff --git a/lldb/source/Core/DataFileCache.cpp b/lldb/source/Core/DataFileCache.cpp index a8127efc1df064..ef0e07a8b03420 100644 --- a/lldb/source/Core/DataFileCache.cpp +++ b/lldb/source/Core/DataFileCache.cpp @@ -264,14 +264,12 @@ bool CacheSignature::Decode(const lldb_private::DataExtractor &data, } uint32_t ConstStringTable::Add(ConstString s) { - auto pos = m_string_to_offset.find(s); - if (pos != m_string_to_offset.end()) - return pos->second; - const uint32_t offset = m_next_offset; - m_strings.push_back(s); - m_string_to_offset[s] = offset; - m_next_offset += s.GetLength() + 1; - return offset; + auto [pos, inserted] = m_string_to_offset.try_emplace(s, m_next_offset); + if (inserted) { + m_strings.push_back(s); + m_next_offset += s.GetLength() + 1; + } + return pos->second; } static const llvm::StringRef kStringTableIdentifier("STAB"); diff --git a/lldb/source/Core/Module.cpp b/lldb/source/Core/Module.cpp index 88cc957e91fac4..03eb81459b29bc 100644 --- a/lldb/source/Core/Module.cpp +++ b/lldb/source/Core/Module.cpp @@ -1093,8 +1093,8 @@ void Module::ReportWarningOptimization( ss << file_name << " was compiled with optimization - stepping may behave " "oddly; variables may not be available."; - Debugger::ReportWarning(std::string(ss.GetString()), debugger_id, - &m_optimization_warning); + llvm::StringRef msg = ss.GetString(); + Debugger::ReportWarning(msg.str(), debugger_id, GetDiagnosticOnceFlag(msg)); } void Module::ReportWarningUnsupportedLanguage( @@ -1104,8 +1104,8 @@ void Module::ReportWarningUnsupportedLanguage( << Language::GetNameForLanguageType(language) << "\". " "Inspection of frame variables will be limited."; - Debugger::ReportWarning(std::string(ss.GetString()), debugger_id, - &m_language_warning); + llvm::StringRef msg = ss.GetString(); + Debugger::ReportWarning(msg.str(), debugger_id, GetDiagnosticOnceFlag(msg)); } void Module::ReportErrorIfModifyDetected( @@ -1125,20 +1125,29 @@ void Module::ReportErrorIfModifyDetected( } } +std::once_flag *Module::GetDiagnosticOnceFlag(llvm::StringRef msg) { + std::lock_guard guard(m_diagnostic_mutex); + auto &once_ptr = m_shown_diagnostics[llvm::stable_hash_name(msg)]; + if (!once_ptr) + once_ptr = std::make_unique(); + return once_ptr.get(); +} + void Module::ReportError(const llvm::formatv_object_base &payload) { StreamString strm; GetDescription(strm.AsRawOstream(), lldb::eDescriptionLevelBrief); - strm.PutChar(' '); - strm.PutCString(payload.str()); - Debugger::ReportError(strm.GetString().str()); + std::string msg = payload.str(); + strm << ' ' << msg; + Debugger::ReportError(strm.GetString().str(), {}, GetDiagnosticOnceFlag(msg)); } void Module::ReportWarning(const llvm::formatv_object_base &payload) { StreamString strm; GetDescription(strm.AsRawOstream(), lldb::eDescriptionLevelFull); - strm.PutChar(' '); - strm.PutCString(payload.str()); - Debugger::ReportWarning(std::string(strm.GetString())); + std::string msg = payload.str(); + strm << ' ' << msg; + Debugger::ReportWarning(strm.GetString().str(), {}, + GetDiagnosticOnceFlag(msg)); } void Module::LogMessage(Log *log, const llvm::formatv_object_base &payload) { diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp index bfac3f4fea8d40..c990972ca64bcf 100644 --- a/lldb/source/Interpreter/CommandInterpreter.cpp +++ b/lldb/source/Interpreter/CommandInterpreter.cpp @@ -839,7 +839,7 @@ void CommandInterpreter::LoadCommandDictionary() { "argument displays at most that many frames. The argument 'all' " "displays all threads. Use 'settings set frame-format' to customize " "the printing of individual frames and 'settings set thread-format' " - "to customize the thread header. Frame recognizers may filter the" + "to customize the thread header. Frame recognizers may filter the " "list. Use 'thread backtrace -u (--unfiltered)' to see them all.", "bt [ | all]", 0, false)); if (bt_regex_cmd_up) { diff --git a/lldb/source/Interpreter/Options.cpp b/lldb/source/Interpreter/Options.cpp index 6a90b2cc9b9896..893a3b71604ba8 100644 --- a/lldb/source/Interpreter/Options.cpp +++ b/lldb/source/Interpreter/Options.cpp @@ -251,12 +251,9 @@ Option *Options::GetLongOptions() { m_getopt_table[i].flag = nullptr; m_getopt_table[i].val = short_opt; - if (option_seen.find(short_opt) == option_seen.end()) { - option_seen[short_opt] = i; - } else if (short_opt) { + auto [pos, inserted] = option_seen.try_emplace(short_opt, i); + if (!inserted && short_opt) { m_getopt_table[i].val = 0; - std::map::const_iterator pos = - option_seen.find(short_opt); StreamString strm; if (defs[i].HasShortOption()) Debugger::ReportError( diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp index 630ad7e20ab7e0..db9a6dd197b3a6 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp @@ -1136,6 +1136,29 @@ ClangASTImporter::ASTImporterDelegate::ImportImpl(Decl *From) { void ClangASTImporter::ASTImporterDelegate::ImportDefinitionTo( clang::Decl *to, clang::Decl *from) { + Log *log = GetLog(LLDBLog::Expressions); + + auto getDeclName = [](Decl const *decl) { + std::string name_string; + if (auto const *from_named_decl = dyn_cast(decl)) { + llvm::raw_string_ostream name_stream(name_string); + from_named_decl->printName(name_stream); + } + + return name_string; + }; + + if (log) { + if (auto *D = GetAlreadyImportedOrNull(from); D && D != to) { + LLDB_LOG( + log, + "[ClangASTImporter] ERROR: overwriting an already imported decl " + "'{0:x}' ('{1}') from '{2:x}' with '{3:x}'. Likely due to a name " + "conflict when importing '{1}'.", + D, getDeclName(from), from, to); + } + } + // We might have a forward declaration from a shared library that we // gave external lexical storage so that Clang asks us about the full // definition when it needs it. In this case the ASTImporter isn't aware @@ -1145,8 +1168,6 @@ void ClangASTImporter::ASTImporterDelegate::ImportDefinitionTo( // tell the ASTImporter that 'to' was imported from 'from'. MapImported(from, to); - Log *log = GetLog(LLDBLog::Expressions); - if (llvm::Error err = ImportDefinition(from)) { LLDB_LOG_ERROR(log, std::move(err), "[ClangASTImporter] Error during importing definition: {0}"); @@ -1158,18 +1179,13 @@ void ClangASTImporter::ASTImporterDelegate::ImportDefinitionTo( to_tag->setCompleteDefinition(from_tag->isCompleteDefinition()); if (Log *log_ast = GetLog(LLDBLog::AST)) { - std::string name_string; - if (NamedDecl *from_named_decl = dyn_cast(from)) { - llvm::raw_string_ostream name_stream(name_string); - from_named_decl->printName(name_stream); - } LLDB_LOG(log_ast, "==== [ClangASTImporter][TUDecl: {0:x}] Imported " "({1}Decl*){2:x}, named {3} (from " "(Decl*){4:x})", static_cast(to->getTranslationUnitDecl()), - from->getDeclKindName(), static_cast(to), name_string, - static_cast(from)); + from->getDeclKindName(), static_cast(to), + getDeclName(from), static_cast(from)); // Log the AST of the TU. std::string ast_string; diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp index 9287d4baf19e9c..e5b8eee8d08c24 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp @@ -2069,13 +2069,15 @@ void SymbolFileDWARF::UpdateExternalModuleListIfNeeded() { Status error = ModuleList::GetSharedModule(dwo_module_spec, module_sp, nullptr, nullptr, nullptr); if (!module_sp) { + // ReportWarning also rate-limits based on the warning string, + // but in a -gmodules build, each object file has a similar DAG + // of module dependencies that would all be listed here. GetObjectFile()->GetModule()->ReportWarning( - "{0:x16}: unable to locate module needed for external types: " - "{1}\nerror: {2}\nDebugging will be degraded due to missing " - "types. Rebuilding the project will regenerate the needed " - "module files.", - die.GetOffset(), dwo_module_spec.GetFileSpec().GetPath().c_str(), - error.AsCString("unknown error")); + "{0}", error.AsCString("unknown error")); + GetObjectFile()->GetModule()->ReportWarning( + "Unable to locate module needed for external types.\n" + "Debugging will be degraded due to missing types. Rebuilding the " + "project will regenerate the needed module files."); continue; } @@ -2095,12 +2097,11 @@ void SymbolFileDWARF::UpdateExternalModuleListIfNeeded() { if (dwo_id != dwo_dwo_id) { GetObjectFile()->GetModule()->ReportWarning( - "{0:x16}: Module {1} is out-of-date (hash mismatch). Type " - "information " - "from this module may be incomplete or inconsistent with the rest of " - "the program. Rebuilding the project will regenerate the needed " - "module files.", - die.GetOffset(), dwo_module_spec.GetFileSpec().GetPath().c_str()); + "Module {0} is out-of-date (hash mismatch).\n" + "Type information from this module may be incomplete or inconsistent " + "with the rest of the program. Rebuilding the project will " + "regenerate the needed module files.", + dwo_module_spec.GetFileSpec().GetPath()); } } } diff --git a/lldb/source/Symbol/CompilerType.cpp b/lldb/source/Symbol/CompilerType.cpp index f8da9ef7b7640d..e9e6e3bf2600ce 100644 --- a/lldb/source/Symbol/CompilerType.cpp +++ b/lldb/source/Symbol/CompilerType.cpp @@ -540,6 +540,14 @@ ConstString CompilerType::GetDisplayTypeName() const { return ConstString(""); } +ConstString CompilerType::GetMangledTypeName() const { + if (IsValid()) { + if (auto type_system_sp = GetTypeSystem()) + return type_system_sp->GetMangledTypeName(m_type); + } + return ConstString(""); +} + uint32_t CompilerType::GetTypeInfo( CompilerType *pointee_or_element_compiler_type) const { if (IsValid()) diff --git a/lldb/source/Symbol/TypeSystem.cpp b/lldb/source/Symbol/TypeSystem.cpp index 931ce1b0203a93..f7d634ffa2dec5 100644 --- a/lldb/source/Symbol/TypeSystem.cpp +++ b/lldb/source/Symbol/TypeSystem.cpp @@ -157,6 +157,10 @@ bool TypeSystem::IsMeaninglessWithoutDynamicResolution(void *type) { return false; } +ConstString TypeSystem::GetMangledTypeName(void *type) { + return GetTypeName(type, false); +} + ConstString TypeSystem::DeclGetMangledName(void *opaque_decl) { return ConstString(); } diff --git a/lldb/source/Target/ThreadPlanStepOverRange.cpp b/lldb/source/Target/ThreadPlanStepOverRange.cpp index 934f23b3b21a28..ef5b4b5c434d16 100644 --- a/lldb/source/Target/ThreadPlanStepOverRange.cpp +++ b/lldb/source/Target/ThreadPlanStepOverRange.cpp @@ -11,6 +11,7 @@ #include "lldb/Symbol/CompileUnit.h" #include "lldb/Symbol/Function.h" #include "lldb/Symbol/LineTable.h" +#include "lldb/Target/Language.h" #include "lldb/Target/Process.h" #include "lldb/Target/RegisterContext.h" #include "lldb/Target/Target.h" @@ -103,6 +104,10 @@ void ThreadPlanStepOverRange::SetupAvoidNoDebug( bool ThreadPlanStepOverRange::IsEquivalentContext( const SymbolContext &context) { + if (Language *language = Language::FindPlugin(context.GetLanguage())) + if (std::optional maybe_equivalent = + language->AreEqualForFrameComparison(context, m_addr_context)) + return *maybe_equivalent; // Match as much as is specified in the m_addr_context: This is a fairly // loose sanity check. Note, sometimes the target doesn't get filled in so I // left out the target check. And sometimes the module comes in as the .o diff --git a/lldb/test/Shell/SymbolFile/DWARF/TestDedupWarnings.test b/lldb/test/Shell/SymbolFile/DWARF/TestDedupWarnings.test new file mode 100644 index 00000000000000..d4fcf78d01b81c --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/TestDedupWarnings.test @@ -0,0 +1,22 @@ +# REQUIRES: system-darwin +# Test the rate-limiting of module not found warnings. +# RUN: rm -rf %t +# RUN: mkdir -p %t + +# RUN: echo 'module "C" { header "c.h" }' >%t/module.modulemap +# RUN: echo 'struct c {};' >>%t/c.h +# RUN: echo '@import C;' >%t/a.m +# RUN: echo 'struct a { struct c c; } a;' >>%t/a.m +# RUN: echo '@import C;' >%t/b.m +# RUN: echo 'struct b { struct c c; } b;' >>%t/b.m +# RUN: echo 'int main() {}' >>%t/b.m + +# RUN: %clang_host -fmodules -Xclang -fmodules-cache-path=%t/cache -I%t -g -gmodules %t/a.m -o %t/a.o -c +# RUN: %clang_host -fmodules -Xclang -fmodules-cache-path=%t/cache -I%t -g -gmodules %t/b.m -o %t/b.o -c +# RUN: %clang_host %t/a.o %t/b.o -o %t/a.out +# RUN: rm -rf %t/cache +# RUN: %lldb %t/a.out -o "b main" -o run -o "p a" -o "p b" -o q 2>&1 | FileCheck %s +# CHECK: {{[ab]}}.o{{.*}}/cache/{{.*}}/C-{{.*}}.pcm' does not exist +# CHECK-NOT: /cache/{{.*}}/C-{.*}.pcm' does not exist +# CHECK: {{[ab]}}.o{{.*}}/cache/{{.*}}/C-{{.*}}.pcm' does not exist +# CHECK-NOT: /cache/{{.*}}/C-{.*}.pcm' does not exist diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h index 76d51ab819f441..9240a3c3127eb4 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -918,6 +918,10 @@ class CombinerHelper { bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo); bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo); + // unmerge_values(anyext(build vector)) -> build vector(anyext) + bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, + BuildFnTy &MatchInfo); + private: /// Checks for legality of an indexed variant of \p LdSt. bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const; diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h index d9f3f4ab3935d3..92d37753791c6e 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h @@ -868,6 +868,14 @@ class GZext : public GCastOp { }; }; +/// Represents an any ext. +class GAnyExt : public GCastOp { +public: + static bool classof(const MachineInstr *MI) { + return MI->getOpcode() == TargetOpcode::G_ANYEXT; + }; +}; + /// Represents a trunc. class GTrunc : public GCastOp { public: diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 61615cb0f7b301..8e0cdc6f1a5e77 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -3223,6 +3223,9 @@ class TargetLoweringBase { /// not legal, but should return true if those types will eventually legalize /// to types that support FMAs. After legalization, it will only be called on /// types that support FMAs (via Legal or Custom actions) + /// + /// Targets that care about soft float support should return false when soft + /// float code is being generated (i.e. use-soft-float). virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const { return false; diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h index fa07b3a9e8b14f..e631e3899fd4de 100644 --- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h +++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h @@ -358,20 +358,21 @@ struct CounterMappingRegion { struct CountedRegion : public CounterMappingRegion { uint64_t ExecutionCount; uint64_t FalseExecutionCount; - bool Folded; + bool TrueFolded; + bool FalseFolded; bool HasSingleByteCoverage; CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount, bool HasSingleByteCoverage) : CounterMappingRegion(R), ExecutionCount(ExecutionCount), - FalseExecutionCount(0), Folded(false), + FalseExecutionCount(0), TrueFolded(false), FalseFolded(true), HasSingleByteCoverage(HasSingleByteCoverage) {} CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount, uint64_t FalseExecutionCount, bool HasSingleByteCoverage) : CounterMappingRegion(R), ExecutionCount(ExecutionCount), - FalseExecutionCount(FalseExecutionCount), Folded(false), - HasSingleByteCoverage(HasSingleByteCoverage) {} + FalseExecutionCount(FalseExecutionCount), TrueFolded(false), + FalseFolded(false), HasSingleByteCoverage(HasSingleByteCoverage) {} }; /// MCDC Record grouping all information together. @@ -719,10 +720,10 @@ struct FunctionRecord { Region.Kind == CounterMappingRegion::MCDCBranchRegion) { CountedBranchRegions.emplace_back(Region, Count, FalseCount, HasSingleByteCoverage); - // If both counters are hard-coded to zero, then this region represents a + // If either counter is hard-coded to zero, then this region represents a // constant-folded branch. - if (Region.Count.isZero() && Region.FalseCount.isZero()) - CountedBranchRegions.back().Folded = true; + CountedBranchRegions.back().TrueFolded = Region.Count.isZero(); + CountedBranchRegions.back().FalseFolded = Region.FalseCount.isZero(); return; } if (CountedRegions.empty()) diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td index d0373a7dadfcf9..ead4149fc11068 100644 --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -420,7 +420,7 @@ def unary_undef_to_zero: GICombineRule< // replaced with undef. def propagate_undef_any_op: GICombineRule< (defs root:$root), - (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC, G_BITCAST):$root, + (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC, G_BITCAST, G_ANYEXT):$root, [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; @@ -428,7 +428,7 @@ def propagate_undef_any_op: GICombineRule< // replaced with undef. def propagate_undef_all_ops: GICombineRule< (defs root:$root), - (match (wip_match_opcode G_SHUFFLE_VECTOR):$root, + (match (wip_match_opcode G_SHUFFLE_VECTOR, G_BUILD_VECTOR):$root, [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]), (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; @@ -832,6 +832,14 @@ def unmerge_dead_to_trunc : GICombineRule< (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }]) >; +// Transform unmerge any build vector -> build vector anyext +def unmerge_anyext_build_vector : GICombineRule< + (defs root:$root, build_fn_matchinfo:$matchinfo), + (match (wip_match_opcode G_UNMERGE_VALUES): $root, + [{ return Helper.matchUnmergeValuesAnyExtBuildVector(*${root}, ${matchinfo}); }]), + (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }]) +>; + // Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0. def unmerge_zext_to_zext : GICombineRule< (defs root:$d), @@ -840,6 +848,16 @@ def unmerge_zext_to_zext : GICombineRule< (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }]) >; +def merge_combines: GICombineGroup<[ + unmerge_anyext_build_vector, + unmerge_merge, + merge_unmerge, + unmerge_cst, + unmerge_undef, + unmerge_dead_to_trunc, + unmerge_zext_to_zext +]>; + // Under certain conditions, transform: // trunc (shl x, K) -> shl (trunc x), K// // trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K)) @@ -1851,7 +1869,6 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, propagate_undef_all_ops, propagate_undef_shuffle_mask, erase_undef_store, - unmerge_undef, insert_extract_vec_elt_out_of_bounds]>; def identity_combines : GICombineGroup<[select_same_val, right_identity_zero, @@ -1909,10 +1926,8 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines, reassocs, ptr_add_immed_chain, cmp_combines, shl_ashr_to_sext_inreg, sext_inreg_of_load, width_reduction_combines, select_combines, - known_bits_simplifications, + known_bits_simplifications, trunc_shift, not_cmp_fold, opt_brcond_by_inverting_cond, - unmerge_merge, unmerge_cst, unmerge_dead_to_trunc, - unmerge_zext_to_zext, merge_unmerge, trunc_shift, const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine, div_rem_to_divrem, funnel_shift_combines, bitreverse_shift, commute_shift, @@ -1920,11 +1935,11 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines, constant_fold_cast_op, fabs_fneg_fold, intdiv_combines, mulh_combines, redundant_neg_operands, and_or_disjoint_mask, fma_combines, fold_binop_into_select, - sub_add_reg, select_to_minmax, + sub_add_reg, select_to_minmax, fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors, combine_concat_vector, match_addos, sext_trunc, zext_trunc, prefer_sign_combines, combine_shuffle_concat, - combine_use_vector_truncate]>; + combine_use_vector_truncate, merge_combines]>; // A combine group used to for prelegalizer combiners at -O0. The combines in // this group have been selected based on experiments to balance code size and diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td index d9121cf166e5aa..2d19e36cc8428c 100644 --- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -147,6 +147,7 @@ def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 74df67a4ff9b43..c0104d2bc26112 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1677,6 +1677,8 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { return Name == "fabs" || Name == "fabsf" || Name == "floor" || Name == "floorf" || Name == "fmod" || Name == "fmodf"; + case 'i': + return Name == "ilogb" || Name == "ilogbf"; case 'l': return Name == "log" || Name == "logf" || Name == "logl" || Name == "log2" || Name == "log2f" || Name == "log10" || @@ -2131,7 +2133,8 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, } #endif - if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) + if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy() && + !Ty->isIntegerTy()) return nullptr; // Use internal versions of these intrinsics. @@ -2391,6 +2394,11 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, // TODO: What about hosts that lack a C99 library? return ConstantFoldFP(log10, APF, Ty); break; + case LibFunc_ilogb: + case LibFunc_ilogbf: + if (!APF.isZero() && TLI->has(Func)) + return ConstantInt::get(Ty, ilogb(APF), true); + break; case LibFunc_logb: case LibFunc_logbf: if (!APF.isZero() && TLI->has(Func)) diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index f9b1621955c217..b7ddf9f479ef8e 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -7611,3 +7611,85 @@ bool CombinerHelper::matchFoldAMinusC1PlusC2(const MachineInstr &MI, return true; } + +bool CombinerHelper::matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI, + BuildFnTy &MatchInfo) { + const GUnmerge *Unmerge = cast(&MI); + + if (!MRI.hasOneNonDBGUse(Unmerge->getSourceReg())) + return false; + + const MachineInstr *Source = MRI.getVRegDef(Unmerge->getSourceReg()); + + LLT DstTy = MRI.getType(Unmerge->getReg(0)); + + // $bv:_(<8 x s8>) = G_BUILD_VECTOR .... + // $any:_(<8 x s16>) = G_ANYEXT $bv + // $uv:_(<4 x s16>), $uv1:_(<4 x s16>) = G_UNMERGE_VALUES $any + // + // -> + // + // $any:_(s16) = G_ANYEXT $bv[0] + // $any1:_(s16) = G_ANYEXT $bv[1] + // $any2:_(s16) = G_ANYEXT $bv[2] + // $any3:_(s16) = G_ANYEXT $bv[3] + // $any4:_(s16) = G_ANYEXT $bv[4] + // $any5:_(s16) = G_ANYEXT $bv[5] + // $any6:_(s16) = G_ANYEXT $bv[6] + // $any7:_(s16) = G_ANYEXT $bv[7] + // $uv:_(<4 x s16>) = G_BUILD_VECTOR $any, $any1, $any2, $any3 + // $uv1:_(<4 x s16>) = G_BUILD_VECTOR $any4, $any5, $any6, $any7 + + // We want to unmerge into vectors. + if (!DstTy.isFixedVector()) + return false; + + const GAnyExt *Any = dyn_cast(Source); + if (!Any) + return false; + + const MachineInstr *NextSource = MRI.getVRegDef(Any->getSrcReg()); + + if (const GBuildVector *BV = dyn_cast(NextSource)) { + // G_UNMERGE_VALUES G_ANYEXT G_BUILD_VECTOR + + if (!MRI.hasOneNonDBGUse(BV->getReg(0))) + return false; + + // FIXME: check element types? + if (BV->getNumSources() % Unmerge->getNumDefs() != 0) + return false; + + LLT BigBvTy = MRI.getType(BV->getReg(0)); + LLT SmallBvTy = DstTy; + LLT SmallBvElemenTy = SmallBvTy.getElementType(); + + if (!isLegalOrBeforeLegalizer( + {TargetOpcode::G_BUILD_VECTOR, {SmallBvTy, SmallBvElemenTy}})) + return false; + + // We check the legality of scalar anyext. + if (!isLegalOrBeforeLegalizer( + {TargetOpcode::G_ANYEXT, + {SmallBvElemenTy, BigBvTy.getElementType()}})) + return false; + + MatchInfo = [=](MachineIRBuilder &B) { + // Build into each G_UNMERGE_VALUES def + // a small build vector with anyext from the source build vector. + for (unsigned I = 0; I < Unmerge->getNumDefs(); ++I) { + SmallVector Ops; + for (unsigned J = 0; J < SmallBvTy.getNumElements(); ++J) { + Register SourceArray = + BV->getSourceReg(I * SmallBvTy.getNumElements() + J); + auto AnyExt = B.buildAnyExt(SmallBvElemenTy, SourceArray); + Ops.push_back(AnyExt.getReg(0)); + } + B.buildBuildVector(Unmerge->getOperand(I).getReg(), Ops); + }; + }; + return true; + }; + + return false; +} diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 98eed6b7503d10..50a75bc5932c42 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7159,15 +7159,16 @@ SDValue DAGCombiner::visitAND(SDNode *N) { SDValue N0Op0 = N0.getOperand(0); if (N0Op0.getOpcode() == ISD::AND && (ExtOpc != ISD::ZERO_EXTEND || !TLI.isZExtFree(N0Op0, VT)) && - DAG.isConstantIntBuildVectorOrConstantInt(N1) && - DAG.isConstantIntBuildVectorOrConstantInt(N0Op0.getOperand(1)) && N0->hasOneUse() && N0Op0->hasOneUse()) { - SDValue NewMask = - DAG.getNode(ISD::AND, DL, VT, N1, - DAG.getNode(ExtOpc, DL, VT, N0Op0.getOperand(1))); - return DAG.getNode(ISD::AND, DL, VT, - DAG.getNode(ExtOpc, DL, VT, N0Op0.getOperand(0)), - NewMask); + if (SDValue NewExt = DAG.FoldConstantArithmetic(ExtOpc, DL, VT, + {N0Op0.getOperand(1)})) { + if (SDValue NewMask = + DAG.FoldConstantArithmetic(ISD::AND, DL, VT, {N1, NewExt})) { + return DAG.getNode(ISD::AND, DL, VT, + DAG.getNode(ExtOpc, DL, VT, N0Op0.getOperand(0)), + NewMask); + } + } } } @@ -14819,8 +14820,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { return DAG.getConstant(0, DL, VT); // fold (sext_in_reg c1) -> c1 - if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) - return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N0, N1); + if (SDValue C = + DAG.FoldConstantArithmetic(ISD::SIGN_EXTEND_INREG, DL, VT, {N0, N1})) + return C; // If the input is already sign extended, just drop the extension. if (ExtVTBits >= DAG.ComputeMaxSignificantBits(N0)) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 43d49674297f6f..55cebc28e49275 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6659,6 +6659,44 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, if (TLI->isCommutativeBinOp(Opcode)) if (GlobalAddressSDNode *GA = dyn_cast(Ops[1])) return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode()); + + // fold (sext_in_reg c1) -> c2 + if (Opcode == ISD::SIGN_EXTEND_INREG) { + EVT EVT = cast(Ops[1])->getVT(); + + auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { + unsigned FromBits = EVT.getScalarSizeInBits(); + Val <<= Val.getBitWidth() - FromBits; + Val.ashrInPlace(Val.getBitWidth() - FromBits); + return getConstant(Val, DL, ConstantVT); + }; + + if (auto *C1 = dyn_cast(Ops[0])) { + const APInt &Val = C1->getAPIntValue(); + return SignExtendInReg(Val, VT); + } + + if (ISD::isBuildVectorOfConstantSDNodes(Ops[0].getNode())) { + SmallVector ScalarOps; + llvm::EVT OpVT = Ops[0].getOperand(0).getValueType(); + for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) { + SDValue Op = Ops[0].getOperand(I); + if (Op.isUndef()) { + ScalarOps.push_back(getUNDEF(OpVT)); + continue; + } + APInt Val = cast(Op)->getAPIntValue(); + ScalarOps.push_back(SignExtendInReg(Val, OpVT)); + } + return getBuildVector(VT, DL, ScalarOps); + } + + if (Ops[0].getOpcode() == ISD::SPLAT_VECTOR && + isa(Ops[0].getOperand(0))) + return getNode(ISD::SPLAT_VECTOR, DL, VT, + SignExtendInReg(Ops[0].getConstantOperandAPInt(0), + Ops[0].getOperand(0).getValueType())); + } } // This is for vector folding only from here on. @@ -7205,41 +7243,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, "Vector element counts must match in SIGN_EXTEND_INREG"); assert(EVT.bitsLE(VT) && "Not extending!"); if (EVT == VT) return N1; // Not actually extending - - auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { - unsigned FromBits = EVT.getScalarSizeInBits(); - Val <<= Val.getBitWidth() - FromBits; - Val.ashrInPlace(Val.getBitWidth() - FromBits); - return getConstant(Val, DL, ConstantVT); - }; - - if (N1C) { - const APInt &Val = N1C->getAPIntValue(); - return SignExtendInReg(Val, VT); - } - - if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { - SmallVector Ops; - llvm::EVT OpVT = N1.getOperand(0).getValueType(); - for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { - SDValue Op = N1.getOperand(i); - if (Op.isUndef()) { - Ops.push_back(getUNDEF(OpVT)); - continue; - } - ConstantSDNode *C = cast(Op); - APInt Val = C->getAPIntValue(); - Ops.push_back(SignExtendInReg(Val, OpVT)); - } - return getBuildVector(VT, DL, Ops); - } - - if (N1.getOpcode() == ISD::SPLAT_VECTOR && - isa(N1.getOperand(0))) - return getNode( - ISD::SPLAT_VECTOR, DL, VT, - SignExtendInReg(N1.getConstantOperandAPInt(0), - N1.getOperand(0).getValueType())); break; } case ISD::FP_TO_SINT_SAT: diff --git a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp index c713371da81e40..119e09187b9080 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp @@ -503,7 +503,7 @@ class MCDCRecordProcessor : NextIDsBuilder, mcdc::TVIdxBuilder { const auto &BranchParams = B->getBranchParams(); PosToID[I] = BranchParams.ID; CondLoc[I] = B->startLoc(); - Folded[I++] = (B->Count.isZero() && B->FalseCount.isZero()); + Folded[I++] = (B->Count.isZero() || B->FalseCount.isZero()); } // Using Profile Bitmap from runtime, mark the executed test vectors. diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td index 321190c83b79f3..8af8cdfeba6ac4 100644 --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -322,13 +322,13 @@ def AArch64PostLegalizerCombiner extractvecelt_pairwise_add, redundant_or, mul_const, redundant_sext_inreg, form_bitfield_extract, rotate_out_of_range, - icmp_to_true_false_known_bits, merge_unmerge, + icmp_to_true_false_known_bits, select_combines, fold_merge_to_zext, constant_fold_binops, identity_combines, ptr_add_immed_chain, overlapping_and, split_store_zero_128, undef_combines, select_to_minmax, or_to_bsp, combine_concat_vector, - commute_constant_to_rhs, + commute_constant_to_rhs, merge_combines, push_freeze_to_prevent_poison_from_propagating, combine_mul_cmlt, combine_use_vector_truncate]> { } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index e9d01602c298af..fb6c23a9645650 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -1282,6 +1282,11 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower(); + getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR) + .legalFor({{v8s8, v16s8}, {v4s16, v8s16}, {v2s32, v4s32}}) + .widenScalarOrEltToNextPow2(0) + .immIdx(0); // Inform verifier imm idx 0 is handled. + getLegacyLegalizerInfo().computeTables(); verify(*ST.getInstrInfo()); } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 1e2c77b08b9a63..c912a580854c1c 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -194,6 +194,23 @@ bool SIFoldOperandsImpl::frameIndexMayFold( return false; const unsigned Opc = UseMI.getOpcode(); + switch (Opc) { + case AMDGPU::S_ADD_I32: + case AMDGPU::V_ADD_U32_e32: + case AMDGPU::V_ADD_CO_U32_e32: + // TODO: Possibly relax hasOneUse. It matters more for mubuf, since we have + // to insert the wave size shift at every point we use the index. + // TODO: Fix depending on visit order to fold immediates into the operand + return UseMI.getOperand(OpNo == 1 ? 2 : 1).isImm() && + MRI->hasOneNonDBGUse(UseMI.getOperand(OpNo).getReg()); + case AMDGPU::V_ADD_U32_e64: + case AMDGPU::V_ADD_CO_U32_e64: + return UseMI.getOperand(OpNo == 2 ? 3 : 2).isImm() && + MRI->hasOneNonDBGUse(UseMI.getOperand(OpNo).getReg()); + default: + break; + } + if (TII->isMUBUF(UseMI)) return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); if (!TII->isFLATScratch(UseMI)) diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 5d679a1a916dc4..a4f01e55f53c1c 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -19354,6 +19354,9 @@ bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { /// patterns (and we don't have the non-fused floating point instruction). bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const { + if (Subtarget->useSoftFloat()) + return false; + if (!VT.isSimple()) return false; diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp index 7d6442a611125f..9b589284463294 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp @@ -363,6 +363,16 @@ void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum, } } +void NVPTXInstPrinter::printOffseti32imm(const MCInst *MI, int OpNum, + raw_ostream &O, const char *Modifier) { + auto &Op = MI->getOperand(OpNum); + assert(Op.isImm() && "Invalid operand"); + if (Op.getImm() != 0) { + O << "+"; + printOperand(MI, OpNum, O); + } +} + void NVPTXInstPrinter::printProtoIdent(const MCInst *MI, int OpNum, raw_ostream &O, const char *Modifier) { const MCOperand &Op = MI->getOperand(OpNum); diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h index e6954f861cd10e..e8a4a6dbdd5324 100644 --- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h +++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h @@ -45,6 +45,8 @@ class NVPTXInstPrinter : public MCInstPrinter { const char *Modifier = nullptr); void printMemOperand(const MCInst *MI, int OpNum, raw_ostream &O, const char *Modifier = nullptr); + void printOffseti32imm(const MCInst *MI, int OpNum, raw_ostream &O, + const char *Modifier = nullptr); void printProtoIdent(const MCInst *MI, int OpNum, raw_ostream &O, const char *Modifier = nullptr); void printPrmtMode(const MCInst *MI, int OpNum, raw_ostream &O, diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 8b34ce4f1001c1..b5478b8f09ceb4 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -1934,6 +1934,10 @@ def MmaCode : Operand { let PrintMethod = "printMmaCode"; } +def Offseti32imm : Operand { + let PrintMethod = "printOffseti32imm"; +} + def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>; @@ -2482,21 +2486,21 @@ def ProxyReg : let mayLoad = true in { class LoadParamMemInst : - NVPTXInst<(outs regclass:$dst), (ins i32imm:$b), - !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"), + NVPTXInst<(outs regclass:$dst), (ins Offseti32imm:$b), + !strconcat("ld.param", opstr, " \t$dst, [retval0$b];"), []>; class LoadParamV2MemInst : - NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b), + NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins Offseti32imm:$b), !strconcat("ld.param.v2", opstr, - " \t{{$dst, $dst2}}, [retval0+$b];"), []>; + " \t{{$dst, $dst2}}, [retval0$b];"), []>; class LoadParamV4MemInst : NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3, regclass:$dst4), - (ins i32imm:$b), + (ins Offseti32imm:$b), !strconcat("ld.param.v4", opstr, - " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"), + " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0$b];"), []>; } @@ -2512,8 +2516,8 @@ let mayStore = true in { if !or(support_imm, !isa(op)) then def _ # !if(!isa(op), "r", "i") : NVPTXInst<(outs), - (ins op:$val, i32imm:$a, i32imm:$b), - "st.param" # opstr # " \t[param$a+$b], $val;", + (ins op:$val, i32imm:$a, Offseti32imm:$b), + "st.param" # opstr # " \t[param$a$b], $val;", []>; } @@ -2524,8 +2528,8 @@ let mayStore = true in { # !if(!isa(op2), "r", "i") : NVPTXInst<(outs), (ins op1:$val1, op2:$val2, - i32imm:$a, i32imm:$b), - "st.param.v2" # opstr # " \t[param$a+$b], {{$val1, $val2}};", + i32imm:$a, Offseti32imm:$b), + "st.param.v2" # opstr # " \t[param$a$b], {{$val1, $val2}};", []>; } @@ -2541,29 +2545,29 @@ let mayStore = true in { : NVPTXInst<(outs), (ins op1:$val1, op2:$val2, op3:$val3, op4:$val4, - i32imm:$a, i32imm:$b), + i32imm:$a, Offseti32imm:$b), "st.param.v4" # opstr # - " \t[param$a+$b], {{$val1, $val2, $val3, $val4}};", + " \t[param$a$b], {{$val1, $val2, $val3, $val4}};", []>; } class StoreRetvalInst : - NVPTXInst<(outs), (ins regclass:$val, i32imm:$a), - !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"), + NVPTXInst<(outs), (ins regclass:$val, Offseti32imm:$a), + !strconcat("st.param", opstr, " \t[func_retval0$a], $val;"), []>; class StoreRetvalV2Inst : - NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a), + NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, Offseti32imm:$a), !strconcat("st.param.v2", opstr, - " \t[func_retval0+$a], {{$val, $val2}};"), + " \t[func_retval0$a], {{$val, $val2}};"), []>; class StoreRetvalV4Inst : NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3, - regclass:$val4, i32imm:$a), + regclass:$val4, Offseti32imm:$a), !strconcat("st.param.v4", opstr, - " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"), + " \t[func_retval0$a], {{$val, $val2, $val3, $val4}};"), []>; } @@ -2827,21 +2831,21 @@ multiclass LD { def _ari : NVPTXInst< (outs regclass:$dst), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, - i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset), + i32imm:$fromWidth, Int32Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr+$offset];", []>; + "\t$dst, [$addr$offset];", []>; def _ari_64 : NVPTXInst< (outs regclass:$dst), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr+$offset];", []>; + "\t$dst, [$addr$offset];", []>; def _asi : NVPTXInst< (outs regclass:$dst), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t$dst, [$addr+$offset];", []>; + "\t$dst, [$addr$offset];", []>; } let mayLoad=1, hasSideEffects=0 in { @@ -2876,23 +2880,23 @@ multiclass ST { (outs), (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, - i32imm:$offset), + Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr+$offset], $src;", []>; + " \t[$addr$offset], $src;", []>; def _ari_64 : NVPTXInst< (outs), (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, - i32imm:$offset), + Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr+$offset], $src;", []>; + " \t[$addr$offset], $src;", []>; def _asi : NVPTXInst< (outs), (ins regclass:$src, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, imem:$addr, - i32imm:$offset), + Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth" - " \t[$addr+$offset], $src;", []>; + " \t[$addr$offset], $src;", []>; } let mayStore=1, hasSideEffects=0 in { @@ -2929,21 +2933,21 @@ multiclass LD_VEC { def _v2_ari : NVPTXInst< (outs regclass:$dst1, regclass:$dst2), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2}}, [$addr$offset];", []>; def _v2_ari_64 : NVPTXInst< (outs regclass:$dst1, regclass:$dst2), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2}}, [$addr$offset];", []>; def _v2_asi : NVPTXInst< (outs regclass:$dst1, regclass:$dst2), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2}}, [$addr$offset];", []>; def _v4_avar : NVPTXInst< (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, @@ -2965,21 +2969,21 @@ multiclass LD_VEC { def _v4_ari : NVPTXInst< (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr$offset];", []>; def _v4_ari_64 : NVPTXInst< (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr$offset];", []>; def _v4_asi : NVPTXInst< (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), (ins LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, Offseti32imm:$offset), "ld${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>; + "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr$offset];", []>; } let mayLoad=1, hasSideEffects=0 in { defm LDV_i8 : LD_VEC; @@ -3016,23 +3020,23 @@ multiclass ST_VEC { (outs), (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - Int32Regs:$addr, i32imm:$offset), + Int32Regs:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr+$offset], {{$src1, $src2}};", []>; + "\t[$addr$offset], {{$src1, $src2}};", []>; def _v2_ari_64 : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - Int64Regs:$addr, i32imm:$offset), + Int64Regs:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr+$offset], {{$src1, $src2}};", []>; + "\t[$addr$offset], {{$src1, $src2}};", []>; def _v2_asi : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, - imem:$addr, i32imm:$offset), + imem:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr+$offset], {{$src1, $src2}};", []>; + "\t[$addr$offset], {{$src1, $src2}};", []>; def _v4_avar : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, @@ -3058,23 +3062,23 @@ multiclass ST_VEC { (outs), (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>; + "\t[$addr$offset], {{$src1, $src2, $src3, $src4}};", []>; def _v4_ari_64 : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth " - "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>; + "\t[$addr$offset], {{$src1, $src2, $src3, $src4}};", []>; def _v4_asi : NVPTXInst< (outs), (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4, LdStCode:$sem, LdStCode:$scope, LdStCode:$addsp, LdStCode:$Vec, - LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset), + LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, Offseti32imm:$offset), "st${sem:sem}${scope:scope}${addsp:addsp}${Vec:vec}.${Sign:sign}" - "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>; + "$fromWidth \t[$addr$offset], {{$src1, $src2, $src3, $src4}};", []>; } let mayStore=1, hasSideEffects=0 in { @@ -3903,4 +3907,4 @@ def atomic_thread_fence_seq_cst_cta : Requires<[hasPTX<60>, hasSM<70>]>; def atomic_thread_fence_acq_rel_cta : NVPTXInst<(outs), (ins), "fence.acq_rel.cta;", []>, - Requires<[hasPTX<60>, hasSM<70>]>; \ No newline at end of file + Requires<[hasPTX<60>, hasSM<70>]>; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index fbd2f47d276903..3588ef46cadce1 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -20392,12 +20392,24 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass); return std::make_pair(0U, &RISCV::GPRNoX0RegClass); case 'f': - if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16) - return std::make_pair(0U, &RISCV::FPR16RegClass); - if (Subtarget.hasStdExtF() && VT == MVT::f32) - return std::make_pair(0U, &RISCV::FPR32RegClass); - if (Subtarget.hasStdExtD() && VT == MVT::f64) - return std::make_pair(0U, &RISCV::FPR64RegClass); + if (VT == MVT::f16) { + if (Subtarget.hasStdExtZfhmin()) + return std::make_pair(0U, &RISCV::FPR16RegClass); + if (Subtarget.hasStdExtZhinxmin()) + return std::make_pair(0U, &RISCV::GPRF16NoX0RegClass); + } else if (VT == MVT::f32) { + if (Subtarget.hasStdExtF()) + return std::make_pair(0U, &RISCV::FPR32RegClass); + if (Subtarget.hasStdExtZfinx()) + return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass); + } else if (VT == MVT::f64) { + if (Subtarget.hasStdExtD()) + return std::make_pair(0U, &RISCV::FPR64RegClass); + if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit()) + return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass); + if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) + return std::make_pair(0U, &RISCV::GPRNoX0RegClass); + } break; default: break; @@ -20440,12 +20452,24 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, if (!VT.isVector()) return std::make_pair(0U, &RISCV::GPRCRegClass); } else if (Constraint == "cf") { - if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16) - return std::make_pair(0U, &RISCV::FPR16CRegClass); - if (Subtarget.hasStdExtF() && VT == MVT::f32) - return std::make_pair(0U, &RISCV::FPR32CRegClass); - if (Subtarget.hasStdExtD() && VT == MVT::f64) - return std::make_pair(0U, &RISCV::FPR64CRegClass); + if (VT == MVT::f16) { + if (Subtarget.hasStdExtZfhmin()) + return std::make_pair(0U, &RISCV::FPR16CRegClass); + if (Subtarget.hasStdExtZhinxmin()) + return std::make_pair(0U, &RISCV::GPRF16CRegClass); + } else if (VT == MVT::f32) { + if (Subtarget.hasStdExtF()) + return std::make_pair(0U, &RISCV::FPR32CRegClass); + if (Subtarget.hasStdExtZfinx()) + return std::make_pair(0U, &RISCV::GPRF32CRegClass); + } else if (VT == MVT::f64) { + if (Subtarget.hasStdExtD()) + return std::make_pair(0U, &RISCV::FPR64CRegClass); + if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit()) + return std::make_pair(0U, &RISCV::GPRPairCRegClass); + if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit()) + return std::make_pair(0U, &RISCV::GPRCRegClass); + } } // Clang will correctly decode the usage of register name aliases into their diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 83417e570dabf7..3e05f3b0180a78 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -793,6 +793,9 @@ EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL, bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd( const MachineFunction &MF, EVT VT) const { + if (useSoftFloat()) + return false; + VT = VT.getScalarType(); if (!VT.isSimple()) diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 6cf37836f921d4..d57450d91ea2dd 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -1528,7 +1528,6 @@ def ProcessorFeatures { list ZN3Features = !listconcat(ZN2Features, ZN3AdditionalFeatures); - list ZN4AdditionalTuning = [TuningFastDPWSSD]; list ZN4Tuning = !listconcat(ZN3Tuning, ZN4AdditionalTuning); @@ -1550,7 +1549,6 @@ def ProcessorFeatures { list ZN4Features = !listconcat(ZN3Features, ZN4AdditionalFeatures); - list ZN5Tuning = ZN4Tuning; list ZN5AdditionalFeatures = [FeatureVNNI, FeatureMOVDIRI, @@ -1561,7 +1559,6 @@ def ProcessorFeatures { ]; list ZN5Features = !listconcat(ZN4Features, ZN5AdditionalFeatures); - } //===----------------------------------------------------------------------===// @@ -1910,7 +1907,7 @@ def : ProcModel<"znver2", Znver2Model, ProcessorFeatures.ZN2Features, def : ProcModel<"znver3", Znver3Model, ProcessorFeatures.ZN3Features, ProcessorFeatures.ZN3Tuning>; def : ProcModel<"znver4", Znver4Model, ProcessorFeatures.ZN4Features, - ProcessorFeatures.ZN4Tuning>; + ProcessorFeatures.ZN4Tuning>; def : ProcModel<"znver5", Znver4Model, ProcessorFeatures.ZN5Features, ProcessorFeatures.ZN5Tuning>; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 9d143256de1e03..bcb84add65d83e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -34838,6 +34838,9 @@ bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const { + if (Subtarget.useSoftFloat()) + return false; + if (!Subtarget.hasAnyFMA()) return false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 72ebd9fbb6d9e5..d9d41e052a32a6 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -870,8 +870,7 @@ bool InstCombinerImpl::foldAllocaCmp(AllocaInst *Alloca) { if (ICmp && ICmp->isEquality() && getUnderlyingObject(*U) == Alloca) { // Collect equality icmps of the alloca, and don't treat them as // captures. - auto Res = ICmps.insert({ICmp, 0}); - Res.first->second |= 1u << U->getOperandNo(); + ICmps[ICmp] |= 1u << U->getOperandNo(); return false; } diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 06813bac7c781f..65c1669f92b4d3 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3840,11 +3840,11 @@ static const std::optional & collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, std::map> &BPS, int Depth, bool &FoundRoot) { - auto I = BPS.find(V); - if (I != BPS.end()) + auto [I, Inserted] = BPS.try_emplace(V); + if (!Inserted) return I->second; - auto &Result = BPS[V] = std::nullopt; + auto &Result = I->second; auto BitWidth = V->getType()->getScalarSizeInBits(); // Can't do integer/elements > 128 bits. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 857efbdf687cb8..0d35bfb921dc79 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -539,10 +539,10 @@ class InnerLoopVectorizer { friend class LoopVectorizationPlanner; /// Set up the values of the IVs correctly when exiting the vector loop. - void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, - Value *VectorTripCount, Value *EndValue, - BasicBlock *MiddleBlock, VPlan &Plan, - VPTransformState &State); + virtual void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, + Value *VectorTripCount, Value *EndValue, + BasicBlock *MiddleBlock, VPlan &Plan, + VPTransformState &State); /// Iteratively sink the scalarized operands of a predicated instruction into /// the block that was created for it. @@ -770,6 +770,11 @@ class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { BasicBlock *emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue); void printDebugTracesAtStart() override; void printDebugTracesAtEnd() override; + + void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, + Value *VectorTripCount, Value *EndValue, + BasicBlock *MiddleBlock, VPlan &Plan, + VPTransformState &State) override {}; }; // A specialized derived class of inner loop vectorizer that performs @@ -5738,14 +5743,15 @@ LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, InstructionCost LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, ElementCount VF) { - Type *ValTy = getLoadStoreType(I); - auto *VectorTy = cast(ToVectorTy(ValTy, VF)); - unsigned AS = getLoadStoreAddressSpace(I); - enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; - const auto *Group = getInterleavedAccessGroup(I); assert(Group && "Fail to get an interleaved access group."); + Instruction *InsertPos = Group->getInsertPos(); + Type *ValTy = getLoadStoreType(InsertPos); + auto *VectorTy = cast(ToVectorTy(ValTy, VF)); + unsigned AS = getLoadStoreAddressSpace(InsertPos); + enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; + unsigned InterleaveFactor = Group->getFactor(); auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); @@ -5760,8 +5766,9 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || (isa(I) && (Group->getNumMembers() < Group->getFactor())); InstructionCost Cost = TTI.getInterleavedMemoryOpCost( - I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), - AS, CostKind, Legal->isMaskRequired(I), UseMaskForGaps); + InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices, + Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I), + UseMaskForGaps); if (Group->isReverse()) { // TODO: Add support for reversed masked interleaved access. @@ -8859,11 +8866,8 @@ static void addLiveOutsForFirstOrderRecurrences( ScalarPHVPBB = cast(MiddleVPBB->getSuccessors()[1]); } else if (ExitUsersToFix.empty()) { ScalarPHVPBB = cast(MiddleVPBB->getSingleSuccessor()); - } - if (!ScalarPHVPBB) { - assert(ExitUsersToFix.empty() && - "missed inserting extracts for exiting values"); - return; + } else { + llvm_unreachable("unsupported CFG in VPlan"); } VPBuilder ScalarPHBuilder(ScalarPHVPBB); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index f4a1f58debbaef..41f13cc2d9a978 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -2958,11 +2958,20 @@ void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF, VPCostContext &Ctx) const { - Instruction *I = getInsertPos(); + Instruction *InsertPos = getInsertPos(); + // Find the VPValue index of the interleave group. We need to skip gaps. + unsigned InsertPosIdx = 0; + for (unsigned Idx = 0; IG->getFactor(); ++Idx) + if (auto *Member = IG->getMember(Idx)) { + if (Member == InsertPos) + break; + InsertPosIdx++; + } Type *ValTy = Ctx.Types.inferScalarType( - getNumDefinedValues() > 0 ? getVPValue(0) : getStoredValues()[0]); + getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx) + : getStoredValues()[InsertPosIdx]); auto *VectorTy = cast(ToVectorTy(ValTy, VF)); - unsigned AS = getLoadStoreAddressSpace(I); + unsigned AS = getLoadStoreAddressSpace(InsertPos); enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; unsigned InterleaveFactor = IG->getFactor(); @@ -2976,8 +2985,8 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF, // Calculate the cost of the whole interleaved group. InstructionCost Cost = Ctx.TTI.getInterleavedMemoryOpCost( - I->getOpcode(), WideVecTy, IG->getFactor(), Indices, IG->getAlign(), AS, - CostKind, getMask(), NeedsMaskForGaps); + InsertPos->getOpcode(), WideVecTy, IG->getFactor(), Indices, + IG->getAlign(), AS, CostKind, getMask(), NeedsMaskForGaps); if (!IG->isReverse()) return Cost; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir index 96a6f18b1d4108..16a8f808978468 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shift-immed-mismatch-crash.mir @@ -9,24 +9,27 @@ liveins: body: | ; CHECK-LABEL: name: shift_immed_chain_mismatch_size_crash ; CHECK: bb.0: - ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) - ; CHECK: liveins: $x0 - ; CHECK: [[DEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF - ; CHECK: [[DEF1:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9 - ; CHECK: G_BRCOND [[DEF]](s1), %bb.2 - ; CHECK: G_BR %bb.1 - ; CHECK: bb.1: - ; CHECK: successors: - ; CHECK: bb.2: - ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `ptr undef`, align 8) - ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = nsw G_SHL [[LOAD]], [[C1]](s32) - ; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[SHL]], [[C]] - ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 - ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[MUL]], [[C2]](s64) - ; CHECK: $w0 = COPY [[SHL1]](s32) - ; CHECK: RET_ReallyLR implicit $w0 + ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9 + ; CHECK-NEXT: G_BRCOND [[DEF]](s1), %bb.2 + ; CHECK-NEXT: G_BR %bb.1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `ptr undef`, align 8) + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = nsw G_SHL [[LOAD]], [[C1]](s32) + ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[SHL]], [[C]] + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 + ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[MUL]], [[C2]](s64) + ; CHECK-NEXT: $w0 = COPY [[SHL1]](s32) + ; CHECK-NEXT: RET_ReallyLR implicit $w0 bb.1: liveins: $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shifts-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shifts-undef.mir index d4dc24741527b6..236d49fc99c629 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-shifts-undef.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-shifts-undef.mir @@ -13,9 +13,8 @@ body: | ; CHECK-LABEL: name: shl_by_ge_bw ; CHECK: liveins: $w0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[DEF]](s16) - ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: $w0 = COPY [[DEF]](s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %1:_(s32) = COPY $w0 %0:_(s16) = G_TRUNC %1(s32) @@ -39,9 +38,8 @@ body: | ; CHECK-LABEL: name: lshr_by_ge_bw ; CHECK: liveins: $w0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[DEF]](s16) - ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: $w0 = COPY [[DEF]](s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %1:_(s32) = COPY $w0 %0:_(s16) = G_TRUNC %1(s32) @@ -65,9 +63,8 @@ body: | ; CHECK-LABEL: name: ashr_by_ge_bw ; CHECK: liveins: $w0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[DEF]](s16) - ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: $w0 = COPY [[DEF]](s32) ; CHECK-NEXT: RET_ReallyLR implicit $w0 %1:_(s32) = COPY $w0 %0:_(s16) = G_TRUNC %1(s32) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-unmerge.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-unmerge.mir index c2c6e04d2d0ce5..7566d38e6c6cfa 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-unmerge.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-unmerge.mir @@ -54,9 +54,8 @@ body: | bb.1: ; CHECK-LABEL: name: test_combine_unmerge_build_vector ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-NEXT: $w0 = COPY [[DEF]](s32) - ; CHECK-NEXT: $w1 = COPY [[DEF1]](s32) + ; CHECK-NEXT: $w1 = COPY [[DEF]](s32) %0:_(s32) = G_IMPLICIT_DEF %1:_(s32) = G_IMPLICIT_DEF %2:_(<2 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32) @@ -74,11 +73,9 @@ body: | bb.1: ; CHECK-LABEL: name: test_combine_unmerge_buildvector_3ops ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF - ; CHECK-NEXT: [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-NEXT: $w0 = COPY [[DEF]](s32) - ; CHECK-NEXT: $w1 = COPY [[DEF1]](s32) - ; CHECK-NEXT: $w2 = COPY [[DEF2]](s32) + ; CHECK-NEXT: $w1 = COPY [[DEF]](s32) + ; CHECK-NEXT: $w2 = COPY [[DEF]](s32) %0:_(s32) = G_IMPLICIT_DEF %1:_(s32) = G_IMPLICIT_DEF %5:_(s32) = G_IMPLICIT_DEF @@ -434,3 +431,111 @@ body: | $w0 = COPY %1(s32) $w1 = COPY %2(s32) ... + +# Check that we unmerge the build vector on the anyext +--- +name: test_anyext_buildvector +body: | + bb.1: + ; CHECK-LABEL: name: test_anyext_buildvector + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32) + ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY1]](s32) + ; CHECK-NEXT: %un1:_(<2 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64) + ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY2]](s32) + ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY3]](s32) + ; CHECK-NEXT: %un2:_(<2 x s64>) = G_BUILD_VECTOR [[ANYEXT2]](s64), [[ANYEXT3]](s64) + ; CHECK-NEXT: $q0 = COPY %un1(<2 x s64>) + ; CHECK-NEXT: $q1 = COPY %un2(<2 x s64>) + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w0 + %2:_(s32) = COPY $w0 + %3:_(s32) = COPY $w0 + %bv:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + %un1:_(<2 x s64>), %un2:_(<2 x s64>) = G_UNMERGE_VALUES %any(<4 x s64>) + $q0 = COPY %un1(<2 x s64>) + $q1 = COPY %un2(<2 x s64>) +... + +# Check that we unmerge the build vector on the anyext and undef +--- +name: test_anyext_buildvector_undef +body: | + bb.1: + ; CHECK-LABEL: name: test_anyext_buildvector_undef + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32) + ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY1]](s32) + ; CHECK-NEXT: %un1:_(<2 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64) + ; CHECK-NEXT: %un2:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK-NEXT: $q0 = COPY %un1(<2 x s64>) + ; CHECK-NEXT: $q1 = COPY %un2(<2 x s64>) + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w0 + %2:_(s32) = G_IMPLICIT_DEF + %3:_(s32) = G_IMPLICIT_DEF + %bv:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + %un1:_(<2 x s64>), %un2:_(<2 x s64>) = G_UNMERGE_VALUES %any(<4 x s64>) + $q0 = COPY %un1(<2 x s64>) + $q1 = COPY %un2(<2 x s64>) +... + +# Check that we don't unmerge the build vector on the anyext, multi-use +--- +name: test_anyext_buildvector_multi +body: | + bb.1: + ; CHECK-LABEL: name: test_anyext_buildvector_multi + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: %bv:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[DEF]](s32), [[DEF1]](s32) + ; CHECK-NEXT: %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + ; CHECK-NEXT: %un1:_(<2 x s64>), %un2:_(<2 x s64>) = G_UNMERGE_VALUES %any(<4 x s64>) + ; CHECK-NEXT: $q0 = COPY %un1(<2 x s64>) + ; CHECK-NEXT: $q1 = COPY %un2(<2 x s64>) + ; CHECK-NEXT: $q2 = COPY %bv(<4 x s32>) + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w0 + %2:_(s32) = G_IMPLICIT_DEF + %3:_(s32) = G_IMPLICIT_DEF + %bv:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + %un1:_(<2 x s64>), %un2:_(<2 x s64>) = G_UNMERGE_VALUES %any(<4 x s64>) + $q0 = COPY %un1(<2 x s64>) + $q1 = COPY %un2(<2 x s64>) + $q2 = COPY %bv(<4 x s32>) +... + +# Check that we don't unmerge the build vector on the anyext into scalar +--- +name: test_anyext_buildvector_scalar +body: | + bb.1: + ; CHECK-LABEL: name: test_anyext_buildvector_scalar + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK-NEXT: %bv:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32) + ; CHECK-NEXT: %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + ; CHECK-NEXT: %un1:_(s128), %un2:_(s128) = G_UNMERGE_VALUES %any(<4 x s64>) + ; CHECK-NEXT: $q0 = COPY %un1(s128) + ; CHECK-NEXT: $q1 = COPY %un2(s128) + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w0 + %2:_(s32) = COPY $w0 + %3:_(s32) = COPY $w0 + %bv:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + %any:_(<4 x s64>) = G_ANYEXT %bv(<4 x s32>) + %un1:_(s128), %un2:_(s128) = G_UNMERGE_VALUES %any(<4 x s64>) + $q0 = COPY %un1(s128) + $q1 = COPY %un2(s128) +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll index 87c1307ad29556..be80886ed3efee 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shuffle-vector-widen-crash.ll @@ -9,18 +9,16 @@ define i32 @bar() { ; CHECK-LABEL: bar: ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: movi.2d v0, #0000000000000000 -; CHECK-NEXT: mov b1, v0[1] -; CHECK-NEXT: mov b2, v0[3] -; CHECK-NEXT: mov b3, v0[2] -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: mov.h v0[1], w8 -; CHECK-NEXT: mov.h v3[1], w9 -; CHECK-NEXT: ushll.4s v0, v0, #0 -; CHECK-NEXT: ushll.4s v1, v3, #0 -; CHECK-NEXT: mov.d v0[1], v1[0] -; CHECK-NEXT: movi.4s v1, #1 -; CHECK-NEXT: and.16b v0, v0, v1 +; CHECK-NEXT: umov.b w8, v0[0] +; CHECK-NEXT: umov.b w9, v0[1] +; CHECK-NEXT: mov.s v1[0], w8 +; CHECK-NEXT: umov.b w8, v0[2] +; CHECK-NEXT: mov.s v1[1], w9 +; CHECK-NEXT: umov.b w9, v0[3] +; CHECK-NEXT: movi.4s v0, #1 +; CHECK-NEXT: mov.s v1[2], w8 +; CHECK-NEXT: mov.s v1[3], w9 +; CHECK-NEXT: and.16b v0, v1, v0 ; CHECK-NEXT: addv.4s s0, v0 ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir index 146d1177f469a4..4d096b7231c7c4 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -652,8 +652,8 @@ # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_EXTRACT_SUBVECTOR (opcode {{[0-9]+}}): 2 type indices, 1 imm index -# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. the first uncovered type index: 2, OK +# DEBUG-NEXT: .. the first uncovered imm index: 1, OK # DEBUG-NEXT: G_INSERT_VECTOR_ELT (opcode {{[0-9]+}}): 3 type indices, 0 imm indices # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected diff --git a/llvm/test/CodeGen/AArch64/add.ll b/llvm/test/CodeGen/AArch64/add.ll index ce7e3101a7a541..e3072dc41d933c 100644 --- a/llvm/test/CodeGen/AArch64/add.ll +++ b/llvm/test/CodeGen/AArch64/add.ll @@ -155,21 +155,23 @@ define void @v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -238,14 +240,12 @@ define void @v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] diff --git a/llvm/test/CodeGen/AArch64/andorxor.ll b/llvm/test/CodeGen/AArch64/andorxor.ll index 459daece90deed..5c7429aebb31e9 100644 --- a/llvm/test/CodeGen/AArch64/andorxor.ll +++ b/llvm/test/CodeGen/AArch64/andorxor.ll @@ -447,21 +447,23 @@ define void @and_v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -494,21 +496,23 @@ define void @or_v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: orr v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -541,21 +545,23 @@ define void @xor_v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: eor v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -698,14 +704,12 @@ define void @and_v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: and_v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] @@ -737,14 +741,12 @@ define void @or_v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: or_v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: orr v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] @@ -776,14 +778,12 @@ define void @xor_v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: xor_v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: eor v0.8b, v0.8b, v1.8b ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] diff --git a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll index 8611532d6ea924..7a4cdd52db904a 100644 --- a/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll +++ b/llvm/test/CodeGen/AArch64/arm64-extract-insert-varidx.ll @@ -29,21 +29,23 @@ define <4 x i8> @test_varidx_extract_v8s8(<8 x i8> %x, i32 %idx) { ; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16 ; CHECK-GISEL-NEXT: mov w9, w0 ; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GISEL-NEXT: mov b1, v0.b[1] ; CHECK-GISEL-NEXT: add x8, sp, #8 -; CHECK-GISEL-NEXT: str d0, [sp, #8] ; CHECK-GISEL-NEXT: and x9, x9, #0x7 -; CHECK-GISEL-NEXT: mov b2, v0.b[1] -; CHECK-GISEL-NEXT: mov b3, v0.b[2] +; CHECK-GISEL-NEXT: str d0, [sp, #8] +; CHECK-GISEL-NEXT: mov b2, v0.b[2] ; CHECK-GISEL-NEXT: lsl x10, x9, #1 ; CHECK-GISEL-NEXT: mov b0, v0.b[3] ; CHECK-GISEL-NEXT: sub x9, x10, x9 -; CHECK-GISEL-NEXT: ldr b1, [x8, x9] -; CHECK-GISEL-NEXT: mov v1.b[0], v1.b[0] -; CHECK-GISEL-NEXT: mov v1.b[1], v2.b[0] -; CHECK-GISEL-NEXT: mov v1.b[2], v3.b[0] -; CHECK-GISEL-NEXT: mov v1.b[3], v0.b[0] -; CHECK-GISEL-NEXT: ushll v0.8h, v1.8b, #0 -; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GISEL-NEXT: ldrb w8, [x8, x9] +; CHECK-GISEL-NEXT: fmov w9, s1 +; CHECK-GISEL-NEXT: fmov s1, w8 +; CHECK-GISEL-NEXT: fmov w8, s2 +; CHECK-GISEL-NEXT: mov v1.h[1], w9 +; CHECK-GISEL-NEXT: mov v1.h[2], w8 +; CHECK-GISEL-NEXT: fmov w8, s0 +; CHECK-GISEL-NEXT: mov v1.h[3], w8 +; CHECK-GISEL-NEXT: fmov d0, d1 ; CHECK-GISEL-NEXT: add sp, sp, #16 ; CHECK-GISEL-NEXT: ret %tmp = extractelement <8 x i8> %x, i32 %idx @@ -179,13 +181,15 @@ define <2 x i16> @test_varidx_extract_v4s16(<4 x i16> %x, i32 %idx) { ; CHECK-GISEL-NEXT: sub sp, sp, #16 ; CHECK-GISEL-NEXT: .cfi_def_cfa_offset 16 ; CHECK-GISEL-NEXT: mov w9, w0 +; CHECK-GISEL-NEXT: mov w8, #2 // =0x2 +; CHECK-GISEL-NEXT: add x10, sp, #8 +; CHECK-GISEL-NEXT: and x9, x9, #0x3 ; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-GISEL-NEXT: add x8, sp, #8 ; CHECK-GISEL-NEXT: str d0, [sp, #8] -; CHECK-GISEL-NEXT: and x9, x9, #0x3 -; CHECK-GISEL-NEXT: ldr h1, [x8, x9, lsl #1] -; CHECK-GISEL-NEXT: mov v1.h[1], v0.h[1] -; CHECK-GISEL-NEXT: ushll v0.4s, v1.4h, #0 +; CHECK-GISEL-NEXT: madd x8, x9, x8, x10 +; CHECK-GISEL-NEXT: umov w9, v0.h[1] +; CHECK-GISEL-NEXT: ld1 { v0.h }[0], [x8] +; CHECK-GISEL-NEXT: mov v0.s[1], w9 ; CHECK-GISEL-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GISEL-NEXT: add sp, sp, #16 ; CHECK-GISEL-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/bitcast.ll b/llvm/test/CodeGen/AArch64/bitcast.ll index bbdf8b0a13d358..39f2572d9fd354 100644 --- a/llvm/test/CodeGen/AArch64/bitcast.ll +++ b/llvm/test/CodeGen/AArch64/bitcast.ll @@ -81,13 +81,14 @@ define <4 x i8> @bitcast_i32_v4i8(i32 %a, i32 %b){ ; CHECK-GI-NEXT: add w8, w0, w1 ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: mov b1, v0.b[1] -; CHECK-GI-NEXT: mov v2.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b3, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v2.b[1], v1.b[0] -; CHECK-GI-NEXT: mov v2.b[2], v3.b[0] -; CHECK-GI-NEXT: mov v2.b[3], v0.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v2.8b, #0 +; CHECK-GI-NEXT: mov b2, v0.b[2] +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov b1, v0.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v0.h[3], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %c = add i32 %a, %b @@ -134,8 +135,9 @@ define <2 x i16> @bitcast_i32_v2i16(i32 %a, i32 %b){ ; CHECK-GI-NEXT: add w8, w0, w1 ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: mov h1, v0.h[1] -; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v0.s[1], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %c = add i32 %a, %b @@ -414,13 +416,14 @@ define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){ ; CHECK-GI-NEXT: add v0.2s, v0.2s, v1.2s ; CHECK-GI-NEXT: uzp1 v0.4h, v0.4h, v0.4h ; CHECK-GI-NEXT: mov b1, v0.b[1] -; CHECK-GI-NEXT: mov v2.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b3, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v2.b[1], v1.b[0] -; CHECK-GI-NEXT: mov v2.b[2], v3.b[0] -; CHECK-GI-NEXT: mov v2.b[3], v0.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v2.8b, #0 +; CHECK-GI-NEXT: mov b2, v0.b[2] +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov b1, v0.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v0.h[3], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %c = add <2 x i16> %a, %b @@ -449,8 +452,10 @@ define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){ ; CHECK-GI-NEXT: add v0.4h, v0.4h, v1.4h ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: mov h1, v0.h[1] -; CHECK-GI-NEXT: mov v0.h[1], v1.h[0] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: fmov w8, s0 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: fmov w8, s1 +; CHECK-GI-NEXT: mov v0.s[1], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %c = add <4 x i8> %a, %b diff --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll index 0033999b9bd51d..41b336bc3e8c0e 100644 --- a/llvm/test/CodeGen/AArch64/concat-vector.ll +++ b/llvm/test/CodeGen/AArch64/concat-vector.ll @@ -14,11 +14,10 @@ define <4 x i8> @concat1(<2 x i8> %A, <2 x i8> %B) { ; CHECK-GI-NEXT: mov w8, v0.s[1] ; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1 ; CHECK-GI-NEXT: mov w9, v1.s[1] -; CHECK-GI-NEXT: mov v0.b[1], w8 +; CHECK-GI-NEXT: mov v0.h[1], w8 ; CHECK-GI-NEXT: fmov w8, s1 -; CHECK-GI-NEXT: mov v0.b[2], w8 -; CHECK-GI-NEXT: mov v0.b[3], w9 -; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v0.h[3], w9 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %v4i8 = shufflevector <2 x i8> %A, <2 x i8> %B, <4 x i32> diff --git a/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll b/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll index 307974c012a9e4..43c6e01911462a 100644 --- a/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll +++ b/llvm/test/CodeGen/AArch64/extract-subvec-combine.ll @@ -1,12 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI define <2 x i32> @and_extract_zext_idx0(<4 x i16> %vec) nounwind { -; CHECK-LABEL: and_extract_zext_idx0: -; CHECK: // %bb.0: -; CHECK-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: and_extract_zext_idx0: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_extract_zext_idx0: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi d1, #0x00ffff0000ffff +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: ret %zext = zext <4 x i16> %vec to <4 x i32> %extract = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %zext, i64 0) %and = and <2 x i32> %extract, @@ -14,11 +22,18 @@ define <2 x i32> @and_extract_zext_idx0(<4 x i16> %vec) nounwind { } define <4 x i16> @and_extract_sext_idx0(<8 x i8> %vec) nounwind { -; CHECK-LABEL: and_extract_sext_idx0: -; CHECK: // %bb.0: -; CHECK-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: and_extract_sext_idx0: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_extract_sext_idx0: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: ret %sext = sext <8 x i8> %vec to <8 x i16> %extract = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %sext, i64 0) %and = and <4 x i16> %extract, @@ -26,12 +41,20 @@ define <4 x i16> @and_extract_sext_idx0(<8 x i8> %vec) nounwind { } define <2 x i32> @and_extract_zext_idx2(<4 x i16> %vec) nounwind { -; CHECK-LABEL: and_extract_zext_idx2: -; CHECK: // %bb.0: -; CHECK-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: and_extract_zext_idx2: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_extract_zext_idx2: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: movi d1, #0x00ffff0000ffff +; CHECK-GI-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: ret %zext = zext <4 x i16> %vec to <4 x i32> %extract = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %zext, i64 2) %and = and <2 x i32> %extract, @@ -39,12 +62,20 @@ define <2 x i32> @and_extract_zext_idx2(<4 x i16> %vec) nounwind { } define <4 x i16> @and_extract_sext_idx4(<8 x i8> %vec) nounwind { -; CHECK-LABEL: and_extract_sext_idx4: -; CHECK: // %bb.0: -; CHECK-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: and_extract_sext_idx4: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: and_extract_sext_idx4: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-GI-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-GI-NEXT: ret %sext = sext <8 x i8> %vec to <8 x i16> %extract = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %sext, i64 4) %and = and <4 x i16> %extract, @@ -52,11 +83,18 @@ define <4 x i16> @and_extract_sext_idx4(<8 x i8> %vec) nounwind { } define <2 x i32> @sext_extract_zext_idx0(<4 x i16> %vec) nounwind { -; CHECK-LABEL: sext_extract_zext_idx0: -; CHECK: // %bb.0: -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sext_extract_zext_idx0: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sext_extract_zext_idx0: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: shl v0.2s, v0.2s, #16 +; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #16 +; CHECK-GI-NEXT: ret %zext = zext <4 x i16> %vec to <4 x i32> %extract = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %zext, i64 0) %sext_inreg_step0 = shl <2 x i32> %extract, @@ -80,11 +118,18 @@ define <2 x i32> @sext_extract_zext_idx0_negtest(<4 x i16> %vec) nounwind { } define <4 x i16> @sext_extract_sext_idx0(<8 x i8> %vec) nounwind { -; CHECK-LABEL: sext_extract_sext_idx0: -; CHECK: // %bb.0: -; CHECK-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sext_extract_sext_idx0: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sext_extract_sext_idx0: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: ret %sext = sext <8 x i8> %vec to <8 x i16> %extract = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %sext, i64 0) %sext_inreg_step0 = shl <4 x i16> %extract, @@ -93,12 +138,20 @@ define <4 x i16> @sext_extract_sext_idx0(<8 x i8> %vec) nounwind { } define <2 x i32> @sext_extract_zext_idx2(<4 x i16> %vec) nounwind { -; CHECK-LABEL: sext_extract_zext_idx2: -; CHECK: // %bb.0: -; CHECK-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sext_extract_zext_idx2: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sext_extract_zext_idx2: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-GI-NEXT: shl v0.2s, v0.2s, #16 +; CHECK-GI-NEXT: sshr v0.2s, v0.2s, #16 +; CHECK-GI-NEXT: ret %zext = zext <4 x i16> %vec to <4 x i32> %extract = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %zext, i64 2) %sext_inreg_step0 = shl <2 x i32> %extract, @@ -107,12 +160,20 @@ define <2 x i32> @sext_extract_zext_idx2(<4 x i16> %vec) nounwind { } define <4 x i16> @sext_extract_sext_idx4(<8 x i8> %vec) nounwind { -; CHECK-LABEL: sext_extract_sext_idx4: -; CHECK: // %bb.0: -; CHECK-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: sext_extract_sext_idx4: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: sext_extract_sext_idx4: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-GI-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-GI-NEXT: ret %sext = sext <8 x i8> %vec to <8 x i16> %extract = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %sext, i64 4) %sext_inreg_step0 = shl <4 x i16> %extract, @@ -120,5 +181,15 @@ define <4 x i16> @sext_extract_sext_idx4(<8 x i8> %vec) nounwind { ret <4 x i16> %sext_inreg } +define <8 x i8> @sext_extract_idx(<16 x i8> %vec) nounwind { +; CHECK-LABEL: sext_extract_idx: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret + %extract = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %vec, i64 0) + ret <8 x i8> %extract +} + declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32>, i64) declare <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16>, i64) +declare <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8>, i64) diff --git a/llvm/test/CodeGen/AArch64/fptoi.ll b/llvm/test/CodeGen/AArch64/fptoi.ll index c436c410a4e397..9c4f0207b84ce8 100644 --- a/llvm/test/CodeGen/AArch64/fptoi.ll +++ b/llvm/test/CodeGen/AArch64/fptoi.ll @@ -7616,10 +7616,9 @@ define <2 x i16> @fptos_v2f128_v2i16(<2 x fp128> %a) { ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: fmov s0, w19 +; CHECK-GI-NEXT: mov v0.s[0], w19 ; CHECK-GI-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v0.h[1], w0 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: mov v0.s[1], w0 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: add sp, sp, #32 ; CHECK-GI-NEXT: ret @@ -7660,10 +7659,9 @@ define <2 x i16> @fptou_v2f128_v2i16(<2 x fp128> %a) { ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: bl __fixunstfsi -; CHECK-GI-NEXT: fmov s0, w19 +; CHECK-GI-NEXT: mov v0.s[0], w19 ; CHECK-GI-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v0.h[1], w0 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: mov v0.s[1], w0 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: add sp, sp, #32 ; CHECK-GI-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll index 9c52b024d3e259..17c87a5dae4199 100644 --- a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll @@ -977,51 +977,46 @@ define i32 @test_signed_f128_i32(fp128 %f) { ; ; CHECK-GI-LABEL: test_signed_f128_i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 ; CHECK-GI-NEXT: .cfi_offset w19, -8 -; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 ; CHECK-GI-NEXT: adrp x8, .LCPI30_1 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] -; CHECK-GI-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d1, v3.d[1] -; CHECK-GI-NEXT: fcsel d8, d2, d3, lt -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: fcsel d9, d0, d1, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: mov x9, #-4603241769126068224 // =0xc01e000000000000 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, x9, lt ; CHECK-GI-NEXT: adrp x8, .LCPI30_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] -; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d1, d8, d1, gt -; CHECK-GI-NEXT: fmov x8, d1 -; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov x8, #281474976448512 // =0xfffffffc0000 +; CHECK-GI-NEXT: movk x8, #16413, lsl #48 +; CHECK-GI-NEXT: csel x8, x20, x8, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-GI-NEXT: csel w0, wzr, w19, ne -; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: ret %x = call i32 @llvm.fptosi.sat.i32.f128(fp128 %f) ret i32 %x diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll index 29a9082173ea51..9ef6d61c350ecf 100644 --- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll @@ -525,53 +525,48 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) { ; ; CHECK-GI-LABEL: test_signed_v1f128_v1i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 ; CHECK-GI-NEXT: .cfi_offset w19, -8 -; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 ; CHECK-GI-NEXT: adrp x8, .LCPI14_1 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] -; CHECK-GI-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d1, v3.d[1] -; CHECK-GI-NEXT: fcsel d8, d2, d3, lt -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: fcsel d9, d0, d1, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: mov x9, #-4603241769126068224 // =0xc01e000000000000 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, x9, lt ; CHECK-GI-NEXT: adrp x8, .LCPI14_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] -; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d1, d8, d1, gt -; CHECK-GI-NEXT: fmov x8, d1 -; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov x8, #281474976448512 // =0xfffffffc0000 +; CHECK-GI-NEXT: movk x8, #16413, lsl #48 +; CHECK-GI-NEXT: csel x8, x20, x8, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-GI-NEXT: csel w8, wzr, w19, ne -; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[0], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptosi.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x @@ -645,92 +640,82 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) { ; ; CHECK-GI-LABEL: test_signed_v2f128_v2i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #128 -; CHECK-GI-NEXT: stp d11, d10, [sp, #64] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x30, [sp, #96] // 8-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 -; CHECK-GI-NEXT: .cfi_offset w30, -32 -; CHECK-GI-NEXT: .cfi_offset b8, -40 -; CHECK-GI-NEXT: .cfi_offset b9, -48 -; CHECK-GI-NEXT: .cfi_offset b10, -56 -; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -48 ; CHECK-GI-NEXT: adrp x8, .LCPI15_1 -; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI15_1] -; CHECK-GI-NEXT: stp q2, q1, [sp, #32] // 32-byte Folded Spill +; CHECK-GI-NEXT: str q0, [sp, #32] // 16-byte Folded Spill ; CHECK-GI-NEXT: mov v1.16b, v2.16b +; CHECK-GI-NEXT: str q2, [sp, #16] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d9, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: fcsel d10, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: mov x20, #-4603241769126068224 // =0xc01e000000000000 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x21, x8, x20, lt ; CHECK-GI-NEXT: adrp x8, .LCPI15_0 +; CHECK-GI-NEXT: mov v0.d[1], x21 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] -; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d11, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d9, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d10, d11, gt +; CHECK-GI-NEXT: mov x22, #281474976448512 // =0xfffffffc0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x22, #16413, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x21, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: csel w21, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q2, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d9, d1, d2, lt -; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d11, gt -; CHECK-GI-NEXT: fcsel d0, d9, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi ; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: mov v0.s[0], w21 ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload ; CHECK-GI-NEXT: csel w8, wzr, w19, ne -; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[1], w8 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret %x = call <2 x i32> @llvm.fptosi.sat.v2f128.v2i32(<2 x fp128> %f) ret <2 x i32> %x @@ -825,124 +810,107 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) { ; ; CHECK-GI-LABEL: test_signed_v3f128_v3i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #144 -; CHECK-GI-NEXT: stp d11, d10, [sp, #80] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #96] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x30, x21, [sp, #112] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 144 +; CHECK-GI-NEXT: sub sp, sp, #128 +; CHECK-GI-NEXT: stp x30, x23, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 ; CHECK-GI-NEXT: .cfi_offset w21, -24 -; CHECK-GI-NEXT: .cfi_offset w30, -32 -; CHECK-GI-NEXT: .cfi_offset b8, -40 -; CHECK-GI-NEXT: .cfi_offset b9, -48 -; CHECK-GI-NEXT: .cfi_offset b10, -56 -; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w30, -48 ; CHECK-GI-NEXT: adrp x8, .LCPI16_1 -; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] -; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q2, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #32] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: mov x20, #-4603241769126068224 // =0xc01e000000000000 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x21, x8, x20, lt ; CHECK-GI-NEXT: adrp x8, .LCPI16_0 +; CHECK-GI-NEXT: mov v0.d[1], x21 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] -; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d9, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov x22, #281474976448512 // =0xfffffffc0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x22, #16413, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x21, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: csel w21, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d1, d2, lt -; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 -; CHECK-GI-NEXT: bl __gttf2 ; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x23, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x23 +; CHECK-GI-NEXT: bl __gttf2 +; CHECK-GI-NEXT: cmp w0, #0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x23, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w21, wzr, w19, ne +; CHECK-GI-NEXT: csel w23, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v3.d[1] -; CHECK-GI-NEXT: fcsel d10, d3, d2, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: mov v0.s[0], w21 ; CHECK-GI-NEXT: cmp w0, #0 ; CHECK-GI-NEXT: csel w8, wzr, w19, ne -; CHECK-GI-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d9, d8, [sp, #96] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #80] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v0.s[1], w21 -; CHECK-GI-NEXT: ldp x30, x21, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w23 +; CHECK-GI-NEXT: ldp x30, x23, [sp, #80] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[2], w8 -; CHECK-GI-NEXT: add sp, sp, #144 +; CHECK-GI-NEXT: add sp, sp, #128 ; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptosi.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x @@ -1057,52 +1025,44 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) { ; ; CHECK-GI-LABEL: test_signed_v4f128_v4i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #176 -; CHECK-GI-NEXT: stp d11, d10, [sp, #96] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #112] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x30, [sp, #128] // 8-byte Folded Spill -; CHECK-GI-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 176 +; CHECK-GI-NEXT: sub sp, sp, #160 +; CHECK-GI-NEXT: str x30, [sp, #96] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #128] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #144] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 160 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 ; CHECK-GI-NEXT: .cfi_offset w21, -24 ; CHECK-GI-NEXT: .cfi_offset w22, -32 -; CHECK-GI-NEXT: .cfi_offset w30, -48 -; CHECK-GI-NEXT: .cfi_offset b8, -56 -; CHECK-GI-NEXT: .cfi_offset b9, -64 -; CHECK-GI-NEXT: .cfi_offset b10, -72 -; CHECK-GI-NEXT: .cfi_offset b11, -80 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w30, -64 ; CHECK-GI-NEXT: adrp x8, .LCPI17_1 ; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] ; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: str q3, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: str q1, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp q1, q3, [sp, #64] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: mov x20, #-4603241769126068224 // =0xc01e000000000000 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x21, x8, x20, lt ; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: mov v0.d[1], x21 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] -; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d9, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov x22, #281474976448512 // =0xfffffffc0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x22, #16413, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x21, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload @@ -1110,28 +1070,24 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 ; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w20, wzr, w19, ne +; CHECK-GI-NEXT: csel w21, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q1, q4, [sp, #64] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d4, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x23, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x23 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x23, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi ; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload @@ -1139,76 +1095,64 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 ; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w21, wzr, w19, ne +; CHECK-GI-NEXT: csel w23, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q2, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d1, d2, lt -; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x24, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x24 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x24, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi ; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp q1, q0, [sp, #64] // 32-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: csel w22, wzr, w19, ne +; CHECK-GI-NEXT: csel w24, wzr, w19, ne ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q5, q1, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #80] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v5.d[1] -; CHECK-GI-NEXT: fcsel d10, d5, d2, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, x20, lt +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixtfsi -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #80] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: mov v1.16b, v0.16b ; CHECK-GI-NEXT: bl __unordtf2 -; CHECK-GI-NEXT: mov v0.s[0], w20 +; CHECK-GI-NEXT: mov v0.s[0], w21 ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload ; CHECK-GI-NEXT: csel w8, wzr, w19, ne -; CHECK-GI-NEXT: ldp x20, x19, [sp, #160] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d9, d8, [sp, #112] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #96] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov v0.s[1], w21 -; CHECK-GI-NEXT: mov v0.s[2], w22 -; CHECK-GI-NEXT: ldp x22, x21, [sp, #144] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #144] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #128] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.s[1], w23 +; CHECK-GI-NEXT: mov v0.s[2], w24 +; CHECK-GI-NEXT: ldp x24, x23, [sp, #112] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[3], w8 -; CHECK-GI-NEXT: add sp, sp, #176 +; CHECK-GI-NEXT: add sp, sp, #160 ; CHECK-GI-NEXT: ret %x = call <4 x i32> @llvm.fptosi.sat.v4f128.v4i32(<4 x fp128> %f) ret <4 x i32> %x diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll index 60f961fa8f9443..3c19fca4a22aef 100644 --- a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll @@ -787,43 +787,38 @@ define i32 @test_unsigned_f128_i32(fp128 %f) { ; ; CHECK-GI-LABEL: test_unsigned_f128_i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 -; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 ; CHECK-GI-NEXT: adrp x8, .LCPI30_1 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_1] -; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v3.d[1] -; CHECK-GI-NEXT: mov d1, v2.d[1] -; CHECK-GI-NEXT: fcsel d8, d3, d2, lt -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: fcsel d9, d0, d1, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt ; CHECK-GI-NEXT: adrp x8, .LCPI30_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI30_0] -; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d1, d8, d1, gt -; CHECK-GI-NEXT: fmov x8, d1 -; CHECK-GI-NEXT: fcsel d2, d9, d0, gt -; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov x8, #281474976579584 // =0xfffffffe0000 +; CHECK-GI-NEXT: movk x8, #16414, lsl #48 +; CHECK-GI-NEXT: csel x8, x20, x8, gt +; CHECK-GI-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.d[1], x8 -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: b __fixunstfsi %x = call i32 @llvm.fptoui.sat.i32.f128(fp128 %f) ret i32 %x diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll index 046ec0d0790296..e1670ad2dc053b 100644 --- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll +++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll @@ -481,46 +481,41 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) { ; ; CHECK-GI-LABEL: test_unsigned_v1f128_v1i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #64 -; CHECK-GI-NEXT: stp d9, d8, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 -; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: sub sp, sp, #48 +; CHECK-GI-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 48 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w30, -32 ; CHECK-GI-NEXT: adrp x8, .LCPI14_1 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_1] -; CHECK-GI-NEXT: stp q0, q1, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q2, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v3.d[1] -; CHECK-GI-NEXT: mov d1, v2.d[1] -; CHECK-GI-NEXT: fcsel d8, d3, d2, lt -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: fcsel d9, d0, d1, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt ; CHECK-GI-NEXT: adrp x8, .LCPI14_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] -; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v1.d[1] -; CHECK-GI-NEXT: fcsel d1, d8, d1, gt -; CHECK-GI-NEXT: fmov x8, d1 -; CHECK-GI-NEXT: fcsel d2, d9, d0, gt +; CHECK-GI-NEXT: csel x8, x19, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d2 +; CHECK-GI-NEXT: mov x8, #281474976579584 // =0xfffffffe0000 +; CHECK-GI-NEXT: movk x8, #16414, lsl #48 +; CHECK-GI-NEXT: csel x8, x20, x8, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi -; CHECK-GI-NEXT: ldp d9, d8, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[0], w0 -; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: add sp, sp, #48 ; CHECK-GI-NEXT: ret %x = call <1 x i32> @llvm.fptoui.sat.v1f128.v1i32(<1 x fp128> %f) ret <1 x i32> %x @@ -579,75 +574,64 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) { ; CHECK-GI-LABEL: test_unsigned_v2f128_v2i32: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: sub sp, sp, #96 -; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill ; CHECK-GI-NEXT: .cfi_def_cfa_offset 96 ; CHECK-GI-NEXT: .cfi_offset w19, -8 -; CHECK-GI-NEXT: .cfi_offset w30, -16 -; CHECK-GI-NEXT: .cfi_offset b8, -24 -; CHECK-GI-NEXT: .cfi_offset b9, -32 -; CHECK-GI-NEXT: .cfi_offset b10, -40 -; CHECK-GI-NEXT: .cfi_offset b11, -48 +; CHECK-GI-NEXT: .cfi_offset w20, -16 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w30, -48 ; CHECK-GI-NEXT: adrp x8, .LCPI15_1 -; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: ldr q2, [x8, :lo12:.LCPI15_1] -; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp q2, q1, [sp, #16] // 32-byte Folded Spill ; CHECK-GI-NEXT: mov v1.16b, v2.16b -; CHECK-GI-NEXT: str q2, [sp, #32] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q2, q1, [sp, #16] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d9, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: fcsel d10, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt ; CHECK-GI-NEXT: adrp x8, .LCPI15_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] -; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d11, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d9, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d10, d11, gt +; CHECK-GI-NEXT: mov x21, #281474976579584 // =0xfffffffe0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x21, #16414, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x21, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi -; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp q1, q0, [sp, #16] // 32-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q3, q1, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #32] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v3.d[1] -; CHECK-GI-NEXT: fcsel d9, d3, d2, lt -; CHECK-GI-NEXT: fmov x8, d9 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x20 +; CHECK-GI-NEXT: csel x22, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x22 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d11, gt -; CHECK-GI-NEXT: fcsel d0, d9, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x20, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x22, x21, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: mov v0.s[0], w19 -; CHECK-GI-NEXT: ldp x30, x19, [sp, #80] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[1], w0 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: add sp, sp, #96 @@ -723,106 +707,87 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) { ; ; CHECK-GI-LABEL: test_unsigned_v3f128_v3i32: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: sub sp, sp, #128 -; CHECK-GI-NEXT: stp d11, d10, [sp, #64] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill -; CHECK-GI-NEXT: str x30, [sp, #96] // 8-byte Folded Spill -; CHECK-GI-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill -; CHECK-GI-NEXT: .cfi_def_cfa_offset 128 +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp x30, x23, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 -; CHECK-GI-NEXT: .cfi_offset w30, -32 -; CHECK-GI-NEXT: .cfi_offset b8, -40 -; CHECK-GI-NEXT: .cfi_offset b9, -48 -; CHECK-GI-NEXT: .cfi_offset b10, -56 -; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: .cfi_offset w21, -24 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w30, -48 ; CHECK-GI-NEXT: adrp x8, .LCPI16_1 -; CHECK-GI-NEXT: str q1, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_1] -; CHECK-GI-NEXT: str q0, [sp, #32] // 16-byte Folded Spill -; CHECK-GI-NEXT: str q2, [sp, #16] // 16-byte Folded Spill -; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp q1, q2, [sp, #32] // 32-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q2, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt ; CHECK-GI-NEXT: adrp x8, .LCPI16_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] -; CHECK-GI-NEXT: str q1, [sp, #32] // 16-byte Folded Spill +; CHECK-GI-NEXT: str q1, [sp, #16] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d9, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov x21, #281474976579584 // =0xfffffffe0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x21, #16414, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x21, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr q1, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q1, q3, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d3, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x20 +; CHECK-GI-NEXT: csel x22, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x22 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x20, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x22, x21, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi -; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp q1, q0, [sp, #32] // 32-byte Folded Reload ; CHECK-GI-NEXT: mov w20, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q4, q1, [sp, #16] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #48] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v4.d[1] -; CHECK-GI-NEXT: fcsel d10, d4, d2, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x22, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x22 +; CHECK-GI-NEXT: csel x23, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x23 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x22, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x23, x21, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: mov v0.s[0], w19 -; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x30, x23, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[1], w20 -; CHECK-GI-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[2], w0 -; CHECK-GI-NEXT: add sp, sp, #128 +; CHECK-GI-NEXT: add sp, sp, #112 ; CHECK-GI-NEXT: ret %x = call <3 x i32> @llvm.fptoui.sat.v3f128.v3i32(<3 x fp128> %f) ret <3 x i32> %x @@ -912,19 +877,18 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-GI-LABEL: test_unsigned_v4f128_v4i32: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: sub sp, sp, #144 -; CHECK-GI-NEXT: stp d11, d10, [sp, #80] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp d9, d8, [sp, #96] // 16-byte Folded Spill -; CHECK-GI-NEXT: stp x30, x21, [sp, #112] // 16-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #80] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x24, x23, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x22, x21, [sp, #112] // 16-byte Folded Spill ; CHECK-GI-NEXT: stp x20, x19, [sp, #128] // 16-byte Folded Spill ; CHECK-GI-NEXT: .cfi_def_cfa_offset 144 ; CHECK-GI-NEXT: .cfi_offset w19, -8 ; CHECK-GI-NEXT: .cfi_offset w20, -16 ; CHECK-GI-NEXT: .cfi_offset w21, -24 -; CHECK-GI-NEXT: .cfi_offset w30, -32 -; CHECK-GI-NEXT: .cfi_offset b8, -40 -; CHECK-GI-NEXT: .cfi_offset b9, -48 -; CHECK-GI-NEXT: .cfi_offset b10, -56 -; CHECK-GI-NEXT: .cfi_offset b11, -64 +; CHECK-GI-NEXT: .cfi_offset w22, -32 +; CHECK-GI-NEXT: .cfi_offset w23, -40 +; CHECK-GI-NEXT: .cfi_offset w24, -48 +; CHECK-GI-NEXT: .cfi_offset w30, -64 ; CHECK-GI-NEXT: adrp x8, .LCPI17_1 ; CHECK-GI-NEXT: stp q1, q2, [sp] // 32-byte Folded Spill ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_1] @@ -932,109 +896,92 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) { ; CHECK-GI-NEXT: str q3, [sp, #32] // 16-byte Folded Spill ; CHECK-GI-NEXT: str q1, [sp, #64] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q2, q1, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: mov d8, v1.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d1, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x19, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x19 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt ; CHECK-GI-NEXT: adrp x8, .LCPI17_0 +; CHECK-GI-NEXT: mov v0.d[1], x20 ; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] ; CHECK-GI-NEXT: str q1, [sp, #48] // 16-byte Folded Spill ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: mov d9, v0.d[1] -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt +; CHECK-GI-NEXT: mov x22, #281474976579584 // =0xfffffffe0000 +; CHECK-GI-NEXT: csel x8, x19, xzr, gt +; CHECK-GI-NEXT: movk x22, #16414, lsl #48 ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x20, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w19, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q1, q4, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d4, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x20, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x20 +; CHECK-GI-NEXT: csel x21, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x21 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x20, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x21, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w20, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q1, q5, [sp, #48] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v2.d[1] -; CHECK-GI-NEXT: fcsel d10, d2, d5, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d11, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d11 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x21, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x21 +; CHECK-GI-NEXT: csel x23, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x23 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d11, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x21, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x23, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: ldr q0, [sp, #32] // 16-byte Folded Reload ; CHECK-GI-NEXT: ldr q1, [sp, #64] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov w21, w0 ; CHECK-GI-NEXT: bl __getf2 -; CHECK-GI-NEXT: ldp q6, q1, [sp, #32] // 32-byte Folded Reload +; CHECK-GI-NEXT: ldp q0, q1, [sp, #32] // 32-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: ldr q2, [sp, #64] // 16-byte Folded Reload -; CHECK-GI-NEXT: mov d0, v6.d[1] -; CHECK-GI-NEXT: fcsel d10, d6, d2, lt -; CHECK-GI-NEXT: fmov x8, d10 -; CHECK-GI-NEXT: fcsel d8, d0, d8, lt -; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d8 -; CHECK-GI-NEXT: mov v0.d[1], x8 +; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x23, x8, xzr, lt +; CHECK-GI-NEXT: mov x8, v0.d[1] +; CHECK-GI-NEXT: mov v0.d[0], x23 +; CHECK-GI-NEXT: csel x24, x8, xzr, lt +; CHECK-GI-NEXT: mov v0.d[1], x24 ; CHECK-GI-NEXT: bl __gttf2 -; CHECK-GI-NEXT: ldr q0, [sp, #48] // 16-byte Folded Reload ; CHECK-GI-NEXT: cmp w0, #0 -; CHECK-GI-NEXT: fcsel d1, d8, d9, gt -; CHECK-GI-NEXT: fcsel d0, d10, d0, gt -; CHECK-GI-NEXT: fmov x8, d0 +; CHECK-GI-NEXT: csel x8, x23, xzr, gt ; CHECK-GI-NEXT: mov v0.d[0], x8 -; CHECK-GI-NEXT: fmov x8, d1 +; CHECK-GI-NEXT: csel x8, x24, x22, gt ; CHECK-GI-NEXT: mov v0.d[1], x8 ; CHECK-GI-NEXT: bl __fixunstfsi ; CHECK-GI-NEXT: mov v0.s[0], w19 -; CHECK-GI-NEXT: ldp d9, d8, [sp, #96] // 16-byte Folded Reload -; CHECK-GI-NEXT: ldp d11, d10, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x24, x23, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[1], w20 ; CHECK-GI-NEXT: ldp x20, x19, [sp, #128] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[2], w21 -; CHECK-GI-NEXT: ldp x30, x21, [sp, #112] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp x22, x21, [sp, #112] // 16-byte Folded Reload ; CHECK-GI-NEXT: mov v0.s[3], w0 ; CHECK-GI-NEXT: add sp, sp, #144 ; CHECK-GI-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/load.ll b/llvm/test/CodeGen/AArch64/load.ll index 167e9d1c196435..70ab10e716875a 100644 --- a/llvm/test/CodeGen/AArch64/load.ll +++ b/llvm/test/CodeGen/AArch64/load.ll @@ -157,10 +157,9 @@ define <2 x i16> @load_v2i16(ptr %ptr){ ; ; CHECK-GI-LABEL: load_v2i16: ; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ldr h1, [x0, #2] +; CHECK-GI-NEXT: mov v0.s[1], v1.s[0] ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret %a = load <2 x i16>, ptr %ptr diff --git a/llvm/test/CodeGen/AArch64/mul.ll b/llvm/test/CodeGen/AArch64/mul.ll index 5e7f71c18c27a0..9ca975d9e742e1 100644 --- a/llvm/test/CodeGen/AArch64/mul.ll +++ b/llvm/test/CodeGen/AArch64/mul.ll @@ -167,21 +167,23 @@ define void @v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -250,14 +252,12 @@ define void @v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: mul v0.2s, v0.2s, v1.2s ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] diff --git a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll index dbb4270fb8002e..f6dbf5251fc272 100644 --- a/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll +++ b/llvm/test/CodeGen/AArch64/neon-bitwise-instructions.ll @@ -1120,10 +1120,9 @@ define <4 x i16> @vselect_constant_cond_zero_v4i16(<4 x i16> %a) { ; CHECK-GI-NEXT: mov w8, #1 // =0x1 ; CHECK-GI-NEXT: mov w9, #0 // =0x0 ; CHECK-GI-NEXT: fmov s1, w8 -; CHECK-GI-NEXT: mov v1.b[1], w9 -; CHECK-GI-NEXT: mov v1.b[2], w9 -; CHECK-GI-NEXT: mov v1.b[3], w8 -; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: mov v1.h[3], w8 ; CHECK-GI-NEXT: shl v1.4h, v1.4h, #15 ; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #15 ; CHECK-GI-NEXT: and v0.8b, v0.8b, v1.8b @@ -1144,13 +1143,10 @@ define <4 x i32> @vselect_constant_cond_zero_v4i32(<4 x i32> %a) { ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mov w8, #1 // =0x1 ; CHECK-GI-NEXT: mov w9, #0 // =0x0 -; CHECK-GI-NEXT: fmov s1, w8 -; CHECK-GI-NEXT: fmov s2, w9 -; CHECK-GI-NEXT: mov v2.h[1], w8 -; CHECK-GI-NEXT: mov v1.h[1], w9 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-GI-NEXT: mov v1.s[0], w8 +; CHECK-GI-NEXT: mov v1.s[1], w9 +; CHECK-GI-NEXT: mov v1.s[2], w9 +; CHECK-GI-NEXT: mov v1.s[3], w8 ; CHECK-GI-NEXT: shl v1.4s, v1.4s, #31 ; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #31 ; CHECK-GI-NEXT: and v0.16b, v0.16b, v1.16b @@ -1196,10 +1192,9 @@ define <4 x i16> @vselect_constant_cond_v4i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK-GI-NEXT: mov w8, #1 // =0x1 ; CHECK-GI-NEXT: mov w9, #0 // =0x0 ; CHECK-GI-NEXT: fmov s2, w8 -; CHECK-GI-NEXT: mov v2.b[1], w9 -; CHECK-GI-NEXT: mov v2.b[2], w9 -; CHECK-GI-NEXT: mov v2.b[3], w8 -; CHECK-GI-NEXT: ushll v2.8h, v2.8b, #0 +; CHECK-GI-NEXT: mov v2.h[1], w9 +; CHECK-GI-NEXT: mov v2.h[2], w9 +; CHECK-GI-NEXT: mov v2.h[3], w8 ; CHECK-GI-NEXT: shl v2.4h, v2.4h, #15 ; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #15 ; CHECK-GI-NEXT: bif v0.8b, v1.8b, v2.8b @@ -1220,13 +1215,10 @@ define <4 x i32> @vselect_constant_cond_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mov w8, #1 // =0x1 ; CHECK-GI-NEXT: mov w9, #0 // =0x0 -; CHECK-GI-NEXT: fmov s2, w8 -; CHECK-GI-NEXT: fmov s3, w9 -; CHECK-GI-NEXT: mov v3.h[1], w8 -; CHECK-GI-NEXT: mov v2.h[1], w9 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: mov v2.d[1], v3.d[0] +; CHECK-GI-NEXT: mov v2.s[0], w8 +; CHECK-GI-NEXT: mov v2.s[1], w9 +; CHECK-GI-NEXT: mov v2.s[2], w9 +; CHECK-GI-NEXT: mov v2.s[3], w8 ; CHECK-GI-NEXT: shl v2.4s, v2.4s, #31 ; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #31 ; CHECK-GI-NEXT: bif v0.16b, v1.16b, v2.16b diff --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll index adc89f7a0d99d8..8f7d5dd5588b97 100644 --- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll +++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll @@ -2672,14 +2672,9 @@ define <4 x i32> @fcmal4xfloat(<4 x float> %A, <4 x float> %B) { ; CHECK-GI-LABEL: fcmal4xfloat: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mov w8, #1 // =0x1 -; CHECK-GI-NEXT: fmov s0, w8 -; CHECK-GI-NEXT: mov v1.16b, v0.16b -; CHECK-GI-NEXT: mov v0.h[1], w8 -; CHECK-GI-NEXT: mov v1.h[1], w8 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: mov v1.d[1], v0.d[0] -; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31 +; CHECK-GI-NEXT: dup v0.2s, w8 +; CHECK-GI-NEXT: mov v0.d[1], v0.d[0] +; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31 ; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31 ; CHECK-GI-NEXT: ret %tmp3 = fcmp true <4 x float> %A, %B @@ -2723,14 +2718,10 @@ define <4 x i32> @fcmnv4xfloat(<4 x float> %A, <4 x float> %B) { ; CHECK-GI-LABEL: fcmnv4xfloat: ; CHECK-GI: // %bb.0: ; CHECK-GI-NEXT: mov w8, #0 // =0x0 -; CHECK-GI-NEXT: fmov s0, w8 -; CHECK-GI-NEXT: mov v1.16b, v0.16b -; CHECK-GI-NEXT: mov v0.h[1], w8 -; CHECK-GI-NEXT: mov v1.h[1], w8 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: mov v1.d[1], v0.d[0] -; CHECK-GI-NEXT: shl v0.4s, v1.4s, #31 +; CHECK-GI-NEXT: mov v0.s[0], w8 +; CHECK-GI-NEXT: mov v0.s[1], w8 +; CHECK-GI-NEXT: mov v0.d[1], v0.d[0] +; CHECK-GI-NEXT: shl v0.4s, v0.4s, #31 ; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #31 ; CHECK-GI-NEXT: ret %tmp3 = fcmp false <4 x float> %A, %B diff --git a/llvm/test/CodeGen/AArch64/sext.ll b/llvm/test/CodeGen/AArch64/sext.ll index 0f256c1f18f589..853ed92c91fbcd 100644 --- a/llvm/test/CodeGen/AArch64/sext.ll +++ b/llvm/test/CodeGen/AArch64/sext.ll @@ -1198,58 +1198,50 @@ define <16 x i64> @sext_v16i10_v16i64(<16 x i10> %a) { ; ; CHECK-GI-LABEL: sext_v16i10_v16i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr w8, [sp] -; CHECK-GI-NEXT: ldr w10, [sp, #32] -; CHECK-GI-NEXT: fmov s0, w0 -; CHECK-GI-NEXT: fmov s1, w4 -; CHECK-GI-NEXT: ldr w9, [sp, #8] -; CHECK-GI-NEXT: ldr w11, [sp, #40] -; CHECK-GI-NEXT: fmov s2, w8 -; CHECK-GI-NEXT: fmov s3, w10 -; CHECK-GI-NEXT: ldr w8, [sp, #16] -; CHECK-GI-NEXT: mov v0.h[1], w1 -; CHECK-GI-NEXT: mov v1.h[1], w5 -; CHECK-GI-NEXT: mov v2.h[1], w9 -; CHECK-GI-NEXT: mov v3.h[1], w11 -; CHECK-GI-NEXT: ldr w9, [sp, #48] -; CHECK-GI-NEXT: mov v0.h[2], w2 -; CHECK-GI-NEXT: mov v1.h[2], w6 -; CHECK-GI-NEXT: mov v2.h[2], w8 -; CHECK-GI-NEXT: mov v3.h[2], w9 -; CHECK-GI-NEXT: ldr w8, [sp, #24] -; CHECK-GI-NEXT: ldr w9, [sp, #56] -; CHECK-GI-NEXT: mov v0.h[3], w3 -; CHECK-GI-NEXT: mov v1.h[3], w7 -; CHECK-GI-NEXT: mov v2.h[3], w8 -; CHECK-GI-NEXT: mov v3.h[3], w9 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: ushll v4.2d, v0.2s, #0 -; CHECK-GI-NEXT: ushll2 v0.2d, v0.4s, #0 -; CHECK-GI-NEXT: ushll v5.2d, v1.2s, #0 -; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0 -; CHECK-GI-NEXT: ushll v6.2d, v2.2s, #0 -; CHECK-GI-NEXT: ushll2 v2.2d, v2.4s, #0 -; CHECK-GI-NEXT: ushll v7.2d, v3.2s, #0 -; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0 -; CHECK-GI-NEXT: shl v4.2d, v4.2d, #54 -; CHECK-GI-NEXT: shl v16.2d, v0.2d, #54 +; CHECK-GI-NEXT: mov v1.s[0], w0 +; CHECK-GI-NEXT: mov v2.s[0], w2 +; CHECK-GI-NEXT: ldr s0, [sp] +; CHECK-GI-NEXT: mov v3.s[0], w4 +; CHECK-GI-NEXT: mov v4.s[0], w6 +; CHECK-GI-NEXT: ldr s5, [sp, #8] +; CHECK-GI-NEXT: ldr s6, [sp, #16] +; CHECK-GI-NEXT: ldr s7, [sp, #24] +; CHECK-GI-NEXT: ldr s16, [sp, #32] +; CHECK-GI-NEXT: ldr s17, [sp, #40] +; CHECK-GI-NEXT: ldr s18, [sp, #48] +; CHECK-GI-NEXT: ldr s19, [sp, #56] +; CHECK-GI-NEXT: mov v1.s[1], w1 +; CHECK-GI-NEXT: mov v0.s[1], v5.s[0] +; CHECK-GI-NEXT: mov v2.s[1], w3 +; CHECK-GI-NEXT: mov v3.s[1], w5 +; CHECK-GI-NEXT: mov v4.s[1], w7 +; CHECK-GI-NEXT: mov v6.s[1], v7.s[0] +; CHECK-GI-NEXT: mov v16.s[1], v17.s[0] +; CHECK-GI-NEXT: mov v18.s[1], v19.s[0] +; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0 +; CHECK-GI-NEXT: ushll v1.2d, v1.2s, #0 +; CHECK-GI-NEXT: ushll v2.2d, v2.2s, #0 +; CHECK-GI-NEXT: ushll v3.2d, v3.2s, #0 +; CHECK-GI-NEXT: ushll v4.2d, v4.2s, #0 +; CHECK-GI-NEXT: ushll v5.2d, v6.2s, #0 +; CHECK-GI-NEXT: ushll v6.2d, v16.2s, #0 +; CHECK-GI-NEXT: ushll v7.2d, v18.2s, #0 +; CHECK-GI-NEXT: shl v0.2d, v0.2d, #54 +; CHECK-GI-NEXT: shl v1.2d, v1.2d, #54 +; CHECK-GI-NEXT: shl v2.2d, v2.2d, #54 +; CHECK-GI-NEXT: shl v3.2d, v3.2d, #54 +; CHECK-GI-NEXT: shl v16.2d, v4.2d, #54 ; CHECK-GI-NEXT: shl v5.2d, v5.2d, #54 -; CHECK-GI-NEXT: shl v17.2d, v1.2d, #54 ; CHECK-GI-NEXT: shl v6.2d, v6.2d, #54 -; CHECK-GI-NEXT: shl v18.2d, v2.2d, #54 ; CHECK-GI-NEXT: shl v7.2d, v7.2d, #54 -; CHECK-GI-NEXT: shl v19.2d, v3.2d, #54 -; CHECK-GI-NEXT: sshr v0.2d, v4.2d, #54 -; CHECK-GI-NEXT: sshr v1.2d, v16.2d, #54 -; CHECK-GI-NEXT: sshr v2.2d, v5.2d, #54 -; CHECK-GI-NEXT: sshr v3.2d, v17.2d, #54 -; CHECK-GI-NEXT: sshr v4.2d, v6.2d, #54 -; CHECK-GI-NEXT: sshr v5.2d, v18.2d, #54 -; CHECK-GI-NEXT: sshr v6.2d, v7.2d, #54 -; CHECK-GI-NEXT: sshr v7.2d, v19.2d, #54 +; CHECK-GI-NEXT: sshr v4.2d, v0.2d, #54 +; CHECK-GI-NEXT: sshr v0.2d, v1.2d, #54 +; CHECK-GI-NEXT: sshr v1.2d, v2.2d, #54 +; CHECK-GI-NEXT: sshr v2.2d, v3.2d, #54 +; CHECK-GI-NEXT: sshr v3.2d, v16.2d, #54 +; CHECK-GI-NEXT: sshr v5.2d, v5.2d, #54 +; CHECK-GI-NEXT: sshr v6.2d, v6.2d, #54 +; CHECK-GI-NEXT: sshr v7.2d, v7.2d, #54 ; CHECK-GI-NEXT: ret entry: %c = sext <16 x i10> %a to <16 x i64> diff --git a/llvm/test/CodeGen/AArch64/sub.ll b/llvm/test/CodeGen/AArch64/sub.ll index c298e6d8a1ff2a..8f35a69f52b85b 100644 --- a/llvm/test/CodeGen/AArch64/sub.ll +++ b/llvm/test/CodeGen/AArch64/sub.ll @@ -155,21 +155,23 @@ define void @v4i8(ptr %p1, ptr %p2) { ; CHECK-GI-NEXT: fmov s0, w8 ; CHECK-GI-NEXT: fmov s1, w9 ; CHECK-GI-NEXT: mov b2, v0.b[1] -; CHECK-GI-NEXT: mov v3.b[0], v0.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[1] -; CHECK-GI-NEXT: mov v5.b[0], v1.b[0] -; CHECK-GI-NEXT: mov v3.b[1], v2.b[0] -; CHECK-GI-NEXT: mov b2, v0.b[2] -; CHECK-GI-NEXT: mov b0, v0.b[3] -; CHECK-GI-NEXT: mov v5.b[1], v4.b[0] -; CHECK-GI-NEXT: mov b4, v1.b[2] -; CHECK-GI-NEXT: mov b1, v1.b[3] -; CHECK-GI-NEXT: mov v3.b[2], v2.b[0] -; CHECK-GI-NEXT: mov v5.b[2], v4.b[0] -; CHECK-GI-NEXT: mov v3.b[3], v0.b[0] -; CHECK-GI-NEXT: mov v5.b[3], v1.b[0] -; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0 -; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0 +; CHECK-GI-NEXT: mov b3, v1.b[1] +; CHECK-GI-NEXT: mov b4, v0.b[2] +; CHECK-GI-NEXT: mov b5, v0.b[3] +; CHECK-GI-NEXT: fmov w8, s2 +; CHECK-GI-NEXT: mov b2, v1.b[2] +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov b3, v1.b[3] +; CHECK-GI-NEXT: mov v0.h[1], w8 +; CHECK-GI-NEXT: mov v1.h[1], w9 +; CHECK-GI-NEXT: fmov w8, s4 +; CHECK-GI-NEXT: fmov w9, s2 +; CHECK-GI-NEXT: mov v0.h[2], w8 +; CHECK-GI-NEXT: mov v1.h[2], w9 +; CHECK-GI-NEXT: fmov w8, s5 +; CHECK-GI-NEXT: fmov w9, s3 +; CHECK-GI-NEXT: mov v0.h[3], w8 +; CHECK-GI-NEXT: mov v1.h[3], w9 ; CHECK-GI-NEXT: sub v0.4h, v0.4h, v1.4h ; CHECK-GI-NEXT: uzp1 v0.8b, v0.8b, v0.8b ; CHECK-GI-NEXT: fmov w8, s0 @@ -238,14 +240,12 @@ define void @v2i16(ptr %p1, ptr %p2) { ; ; CHECK-GI-LABEL: v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr h0, [x0] -; CHECK-GI-NEXT: ldr h1, [x1] -; CHECK-GI-NEXT: add x8, x0, #2 -; CHECK-GI-NEXT: add x9, x1, #2 -; CHECK-GI-NEXT: ld1 { v0.h }[1], [x8] -; CHECK-GI-NEXT: ld1 { v1.h }[1], [x9] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-GI-NEXT: ld1 { v0.h }[0], [x0] +; CHECK-GI-NEXT: ld1 { v1.h }[0], [x1] +; CHECK-GI-NEXT: ldr h2, [x0, #2] +; CHECK-GI-NEXT: ldr h3, [x1, #2] +; CHECK-GI-NEXT: mov v0.s[1], v2.s[0] +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] ; CHECK-GI-NEXT: sub v0.2s, v0.2s, v1.2s ; CHECK-GI-NEXT: mov s1, v0.s[1] ; CHECK-GI-NEXT: str h0, [x0] diff --git a/llvm/test/CodeGen/AArch64/xtn.ll b/llvm/test/CodeGen/AArch64/xtn.ll index fb3f8ebd7d1413..8a4d6b8c7b789f 100644 --- a/llvm/test/CodeGen/AArch64/xtn.ll +++ b/llvm/test/CodeGen/AArch64/xtn.ll @@ -174,9 +174,8 @@ define <2 x i16> @xtn_v2i128_v2i16(<2 x i128> %a) { ; ; CHECK-GI-LABEL: xtn_v2i128_v2i16: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: fmov s0, w0 -; CHECK-GI-NEXT: mov v0.h[1], w2 -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-GI-NEXT: mov v0.s[0], w0 +; CHECK-GI-NEXT: mov v0.s[1], w2 ; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-GI-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/zext.ll b/llvm/test/CodeGen/AArch64/zext.ll index 7e95b6684e8211..0d5010113ce0b2 100644 --- a/llvm/test/CodeGen/AArch64/zext.ll +++ b/llvm/test/CodeGen/AArch64/zext.ll @@ -1169,52 +1169,44 @@ define <16 x i64> @zext_v16i10_v16i64(<16 x i10> %a) { ; ; CHECK-GI-LABEL: zext_v16i10_v16i64: ; CHECK-GI: // %bb.0: // %entry -; CHECK-GI-NEXT: ldr w8, [sp] -; CHECK-GI-NEXT: ldr w10, [sp, #32] -; CHECK-GI-NEXT: fmov s0, w0 -; CHECK-GI-NEXT: fmov s1, w4 -; CHECK-GI-NEXT: ldr w9, [sp, #8] -; CHECK-GI-NEXT: ldr w11, [sp, #40] -; CHECK-GI-NEXT: fmov s2, w8 -; CHECK-GI-NEXT: fmov s3, w10 -; CHECK-GI-NEXT: ldr w8, [sp, #16] -; CHECK-GI-NEXT: mov v0.h[1], w1 -; CHECK-GI-NEXT: mov v1.h[1], w5 -; CHECK-GI-NEXT: mov v2.h[1], w9 -; CHECK-GI-NEXT: mov v3.h[1], w11 -; CHECK-GI-NEXT: ldr w9, [sp, #48] -; CHECK-GI-NEXT: mov v0.h[2], w2 -; CHECK-GI-NEXT: mov v1.h[2], w6 -; CHECK-GI-NEXT: mov v2.h[2], w8 -; CHECK-GI-NEXT: mov v3.h[2], w9 -; CHECK-GI-NEXT: ldr w8, [sp, #24] -; CHECK-GI-NEXT: ldr w9, [sp, #56] -; CHECK-GI-NEXT: mov v0.h[3], w3 -; CHECK-GI-NEXT: mov v1.h[3], w7 -; CHECK-GI-NEXT: mov v2.h[3], w8 -; CHECK-GI-NEXT: mov v3.h[3], w9 +; CHECK-GI-NEXT: mov v0.s[0], w0 +; CHECK-GI-NEXT: mov v1.s[0], w2 +; CHECK-GI-NEXT: ldr s3, [sp] +; CHECK-GI-NEXT: mov v2.s[0], w4 +; CHECK-GI-NEXT: mov v5.s[0], w6 +; CHECK-GI-NEXT: ldr s4, [sp, #8] +; CHECK-GI-NEXT: ldr s6, [sp, #16] +; CHECK-GI-NEXT: ldr s7, [sp, #24] +; CHECK-GI-NEXT: ldr s16, [sp, #32] +; CHECK-GI-NEXT: ldr s17, [sp, #40] +; CHECK-GI-NEXT: ldr s18, [sp, #48] +; CHECK-GI-NEXT: ldr s19, [sp, #56] +; CHECK-GI-NEXT: mov v0.s[1], w1 +; CHECK-GI-NEXT: mov v1.s[1], w3 +; CHECK-GI-NEXT: mov v3.s[1], v4.s[0] +; CHECK-GI-NEXT: mov v2.s[1], w5 +; CHECK-GI-NEXT: mov v5.s[1], w7 +; CHECK-GI-NEXT: mov v6.s[1], v7.s[0] +; CHECK-GI-NEXT: mov v16.s[1], v17.s[0] +; CHECK-GI-NEXT: mov v18.s[1], v19.s[0] ; CHECK-GI-NEXT: adrp x8, .LCPI54_0 ; CHECK-GI-NEXT: ldr q7, [x8, :lo12:.LCPI54_0] -; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0 -; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0 -; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0 -; CHECK-GI-NEXT: ushll v4.2d, v0.2s, #0 -; CHECK-GI-NEXT: ushll2 v5.2d, v0.4s, #0 -; CHECK-GI-NEXT: ushll v6.2d, v1.2s, #0 -; CHECK-GI-NEXT: ushll2 v16.2d, v1.4s, #0 -; CHECK-GI-NEXT: ushll v17.2d, v2.2s, #0 -; CHECK-GI-NEXT: ushll2 v18.2d, v2.4s, #0 -; CHECK-GI-NEXT: ushll v19.2d, v3.2s, #0 -; CHECK-GI-NEXT: ushll2 v20.2d, v3.4s, #0 -; CHECK-GI-NEXT: and v0.16b, v4.16b, v7.16b -; CHECK-GI-NEXT: and v1.16b, v5.16b, v7.16b -; CHECK-GI-NEXT: and v2.16b, v6.16b, v7.16b -; CHECK-GI-NEXT: and v3.16b, v16.16b, v7.16b -; CHECK-GI-NEXT: and v4.16b, v17.16b, v7.16b -; CHECK-GI-NEXT: and v5.16b, v18.16b, v7.16b -; CHECK-GI-NEXT: and v6.16b, v19.16b, v7.16b -; CHECK-GI-NEXT: and v7.16b, v20.16b, v7.16b +; CHECK-GI-NEXT: ushll v0.2d, v0.2s, #0 +; CHECK-GI-NEXT: ushll v1.2d, v1.2s, #0 +; CHECK-GI-NEXT: ushll v2.2d, v2.2s, #0 +; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0 +; CHECK-GI-NEXT: ushll v5.2d, v3.2s, #0 +; CHECK-GI-NEXT: ushll v6.2d, v6.2s, #0 +; CHECK-GI-NEXT: ushll v16.2d, v16.2s, #0 +; CHECK-GI-NEXT: ushll v17.2d, v18.2s, #0 +; CHECK-GI-NEXT: and v0.16b, v0.16b, v7.16b +; CHECK-GI-NEXT: and v1.16b, v1.16b, v7.16b +; CHECK-GI-NEXT: and v2.16b, v2.16b, v7.16b +; CHECK-GI-NEXT: and v3.16b, v4.16b, v7.16b +; CHECK-GI-NEXT: and v4.16b, v5.16b, v7.16b +; CHECK-GI-NEXT: and v5.16b, v6.16b, v7.16b +; CHECK-GI-NEXT: and v6.16b, v16.16b, v7.16b +; CHECK-GI-NEXT: and v7.16b, v17.16b, v7.16b ; CHECK-GI-NEXT: ret entry: %c = zext <16 x i10> %a to <16 x i64> diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index 3e4b43d9cfcd34..c5d4ef23070eb5 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -4705,8 +4705,7 @@ define amdgpu_ps void @large_offset() { ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0 ; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 -; GFX10-NEXT: s_movk_i32 s0, 0x810 -; GFX10-NEXT: s_addk_i32 s0, 0x3c0 +; GFX10-NEXT: s_movk_i32 s0, 0xbd0 ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v3, v0 @@ -4823,8 +4822,7 @@ define amdgpu_ps void @large_offset() { ; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2 ; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3 ; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 0 -; GFX10-PAL-NEXT: s_movk_i32 s0, 0x810 -; GFX10-PAL-NEXT: s_addk_i32 s0, 0x3c0 +; GFX10-PAL-NEXT: s_movk_i32 s0, 0xbd0 ; GFX10-PAL-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-PAL-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-PAL-NEXT: v_mov_b32_e32 v3, v0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir b/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir index 2b5ec86244ec2a..8626ac0f23ec79 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-fi-operand-shrink.mir @@ -183,8 +183,7 @@ body: | bb.0: ; GCN-LABEL: name: shrink_vgpr_imm_vgpr_fi_v_add_i32_e64_no_carry_out_use - ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; GCN-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 16, [[V_MOV_B32_e32_]], 0, implicit $exec + ; GCN: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 16, %stack.0, 0, implicit $exec ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ADD_CO_U32_e64_]] %0:vgpr_32 = V_MOV_B32_e32 16, implicit $exec %1:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir index 0d6511cbfceb21..d10dec6ca8289f 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir @@ -13,8 +13,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e32__const_v_fi - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, [[V_MOV_B32_e32_]], implicit $exec + ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, %stack.0, implicit $exec ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_U32_e32_]] ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -34,8 +33,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_const - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_32 = V_ADD_CO_U32_e64 [[V_MOV_B32_e32_]], 128, 0, implicit $exec + ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_32 = V_ADD_CO_U32_e64 %stack.0, 128, 0, implicit $exec ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -57,8 +55,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64__const_v_fi - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 128, [[V_MOV_B32_e32_]], 0, implicit $exec + ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 128, %stack.0, 0, implicit $exec ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -78,8 +75,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_const - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[V_MOV_B32_e32_]], 128, 0, implicit $exec + ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 128, 0, implicit $exec ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -99,8 +95,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64___fi_const_v - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_32 = V_ADD_CO_U32_e64 128, [[V_MOV_B32_e32_]], 0, implicit $exec + ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_32 = V_ADD_CO_U32_e64 128, %stack.0, 0, implicit $exec ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir index aa91a4f9f988fc..280126a0d7cd22 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir @@ -14,8 +14,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_const - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 - ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_MOV_B32_]], 128, implicit-def $scc + ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 128, implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 %stack.0 @@ -35,8 +34,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__const_fi - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 - ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, %stack.0, implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 %stack.0 @@ -56,8 +54,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__materializedconst_fi - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 - ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 256 @@ -101,8 +98,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_1 - ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 - ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc + ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc ; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:sreg_32 = S_MOV_B32 256 @@ -173,8 +169,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e32__const_v_fi - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, [[V_MOV_B32_e32_]], implicit $exec + ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, %stack.0, implicit $exec ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e32_]] ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -215,21 +210,10 @@ stack: - { id: 0, size: 16384, alignment: 4, local-offset: 0 } body: | bb.0: - ; GFX9-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi - ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; GFX9-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, [[V_MOV_B32_e32_]], 0, implicit $exec - ; GFX9-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX9-NEXT: SI_RETURN implicit $sgpr4 - ; - ; GFX10-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi - ; GFX10: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec - ; GFX10-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX10-NEXT: SI_RETURN implicit $sgpr4 - ; - ; GFX12-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi - ; GFX12: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec - ; GFX12-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX12-NEXT: SI_RETURN implicit $sgpr4 + ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi + ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec + ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] + ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec %1:vgpr_32 = V_ADD_U32_e64 64, %0, 0, implicit $exec $sgpr4 = COPY %1 @@ -246,21 +230,10 @@ stack: - { id: 0, size: 16384, alignment: 4, local-offset: 0 } body: | bb.0: - ; GFX9-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm - ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; GFX9-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[V_MOV_B32_e32_]], 64, 0, implicit $exec - ; GFX9-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX9-NEXT: SI_RETURN implicit $sgpr4 - ; - ; GFX10-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm - ; GFX10: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec - ; GFX10-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX10-NEXT: SI_RETURN implicit $sgpr4 - ; - ; GFX12-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm - ; GFX12: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec - ; GFX12-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] - ; GFX12-NEXT: SI_RETURN implicit $sgpr4 + ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm + ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec + ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]] + ; CHECK-NEXT: SI_RETURN implicit $sgpr4 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec %1:vgpr_32 = V_ADD_U32_e64 %0, 64, 0, implicit $exec $sgpr4 = COPY %1 @@ -278,8 +251,7 @@ stack: body: | bb.0: ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e32__const_v_fi - ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec + ; CHECK: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, %stack.0, implicit-def $vcc, implicit $exec ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e32_]] ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec @@ -298,21 +270,10 @@ stack: - { id: 0, size: 16384, alignment: 4, local-offset: 0 } body: | bb.0: - ; GFX9-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm - ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; GFX9-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 [[V_MOV_B32_e32_]], 64, 0, implicit $exec - ; GFX9-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX9-NEXT: SI_RETURN implicit $vgpr0 - ; - ; GFX10-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm - ; GFX10: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec - ; GFX10-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX10-NEXT: SI_RETURN implicit $vgpr0 - ; - ; GFX12-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm - ; GFX12: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX12-NEXT: SI_RETURN implicit $vgpr0 + ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm + ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec %1:vgpr_32, %2:sreg_64 = V_ADD_CO_U32_e64 %0, 64, 0, implicit $exec $vgpr0 = COPY %1 @@ -329,21 +290,10 @@ stack: - { id: 0, size: 16384, alignment: 4, local-offset: 0 } body: | bb.0: - ; GFX9-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi - ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec - ; GFX9-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, [[V_MOV_B32_e32_]], 0, implicit $exec - ; GFX9-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX9-NEXT: SI_RETURN implicit $vgpr0 - ; - ; GFX10-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi - ; GFX10: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec - ; GFX10-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX10-NEXT: SI_RETURN implicit $vgpr0 - ; - ; GFX12-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi - ; GFX12: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] - ; GFX12-NEXT: SI_RETURN implicit $vgpr0 + ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi + ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 %0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec %1:vgpr_32, %2:sreg_64 = V_ADD_CO_U32_e64 64, %0, 0, implicit $exec $vgpr0 = COPY %1 diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll index 4215ae43345fde..e3cd8028422ddb 100644 --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -64,8 +64,8 @@ define void @func_mov_fi_i32_offset() #0 { ; GFX9-MUBUF: v_lshrrev_b32_e64 [[SCALED:v[0-9]+]], 6, s32 ; GFX9-MUBUF-NEXT: v_add_u32_e32 v0, 4, [[SCALED]] -; GFX9-FLATSCR: v_mov_b32_e32 [[ADD:v[0-9]+]], s32 -; GFX9-FLATSCR-NEXT: v_add_u32_e32 v0, 4, [[ADD]] +; FIXME: Should commute and shrink +; GFX9-FLATSCR: v_add_u32_e64 v0, 4, s32 ; GCN-NOT: v_mov ; GCN: ds_write_b32 v0, v0 @@ -164,12 +164,12 @@ define void @void_func_byval_struct_i8_i32_ptr_value(ptr addrspace(5) byval({ i8 ; GFX9-FLATSCR: scratch_load_dword v{{[0-9]+}}, off, s32 offset:4 glc{{$}} ; CI: v_lshr_b32_e64 [[SHIFT:v[0-9]+]], s32, 6 -; CI: v_add_i32_e32 [[GEP:v[0-9]+]], vcc, 4, [[SHIFT]] +; CI: v_add_i32_e64 [[GEP:v[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 4, [[SHIFT]] -; GFX9-MUBUF: v_lshrrev_b32_e64 [[SP:v[0-9]+]], 6, s32 -; GFX9-FLATSCR: v_mov_b32_e32 [[SP:v[0-9]+]], s32 +; GFX9-MUBUF: v_lshrrev_b32_e64 [[SP:v[0-9]+]], 6, s32 +; GFX9-MUBUF: v_add_u32_e32 [[GEP:v[0-9]+]], 4, [[SP]] -; GFX9: v_add_u32_e32 [[GEP:v[0-9]+]], 4, [[SP]] +; GFX9-FLATSCR: v_add_u32_e64 [[GEP:v[0-9]+]], 4, s32 ; GCN: ds_write_b32 v{{[0-9]+}}, [[GEP]] define void @void_func_byval_struct_i8_i32_ptr_nonentry_block(ptr addrspace(5) byval({ i8, i32 }) %arg0, i32 %arg2) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll index e86ef52e413b69..302b140e32f3aa 100644 --- a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll +++ b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll @@ -1426,17 +1426,16 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset( ; GFX10_1-NEXT: buffer_store_dword v2, off, s[0:3], s5 ; 4-byte Folded Spill ; GFX10_1-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10_1-NEXT: s_mov_b32 exec_lo, s4 -; GFX10_1-NEXT: v_lshrrev_b32_e64 v0, 5, s32 +; GFX10_1-NEXT: v_lshrrev_b32_e64 v3, 5, s32 ; GFX10_1-NEXT: v_writelane_b32 v2, s59, 0 -; GFX10_1-NEXT: v_lshrrev_b32_e64 v1, 5, s32 +; GFX10_1-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_1-NEXT: s_and_b32 s4, 0, exec_lo -; GFX10_1-NEXT: v_add_nc_u32_e32 v0, 0x4040, v0 -; GFX10_1-NEXT: v_add_nc_u32_e32 v1, 64, v1 +; GFX10_1-NEXT: v_add_nc_u32_e32 v1, 0x442c, v3 +; GFX10_1-NEXT: v_add_nc_u32_e32 v0, 64, v0 ; GFX10_1-NEXT: ;;#ASMSTART -; GFX10_1-NEXT: ; use alloca0 v1 +; GFX10_1-NEXT: ; use alloca0 v0 ; GFX10_1-NEXT: ;;#ASMEND -; GFX10_1-NEXT: v_add_nc_u32_e32 v0, 0x3ec, v0 -; GFX10_1-NEXT: v_readfirstlane_b32 s59, v0 +; GFX10_1-NEXT: v_readfirstlane_b32 s59, v1 ; GFX10_1-NEXT: ;;#ASMSTART ; GFX10_1-NEXT: ; use s59, scc ; GFX10_1-NEXT: ;;#ASMEND @@ -1456,17 +1455,16 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset( ; GFX10_3-NEXT: s_add_i32 s5, s32, 0x100800 ; GFX10_3-NEXT: buffer_store_dword v2, off, s[0:3], s5 ; 4-byte Folded Spill ; GFX10_3-NEXT: s_mov_b32 exec_lo, s4 -; GFX10_3-NEXT: v_lshrrev_b32_e64 v0, 5, s32 +; GFX10_3-NEXT: v_lshrrev_b32_e64 v3, 5, s32 ; GFX10_3-NEXT: v_writelane_b32 v2, s59, 0 -; GFX10_3-NEXT: v_lshrrev_b32_e64 v1, 5, s32 +; GFX10_3-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_3-NEXT: s_and_b32 s4, 0, exec_lo -; GFX10_3-NEXT: v_add_nc_u32_e32 v0, 0x4040, v0 -; GFX10_3-NEXT: v_add_nc_u32_e32 v1, 64, v1 +; GFX10_3-NEXT: v_add_nc_u32_e32 v1, 0x442c, v3 +; GFX10_3-NEXT: v_add_nc_u32_e32 v0, 64, v0 ; GFX10_3-NEXT: ;;#ASMSTART -; GFX10_3-NEXT: ; use alloca0 v1 +; GFX10_3-NEXT: ; use alloca0 v0 ; GFX10_3-NEXT: ;;#ASMEND -; GFX10_3-NEXT: v_add_nc_u32_e32 v0, 0x3ec, v0 -; GFX10_3-NEXT: v_readfirstlane_b32 s59, v0 +; GFX10_3-NEXT: v_readfirstlane_b32 s59, v1 ; GFX10_3-NEXT: ;;#ASMSTART ; GFX10_3-NEXT: ; use s59, scc ; GFX10_3-NEXT: ;;#ASMEND @@ -1485,19 +1483,16 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset( ; GFX11-NEXT: s_add_i32 s1, s32, 0x8040 ; GFX11-NEXT: scratch_store_b32 off, v2, s1 ; 4-byte Folded Spill ; GFX11-NEXT: s_mov_b32 exec_lo, s0 -; GFX11-NEXT: s_add_i32 s0, s32, 0x4040 -; GFX11-NEXT: v_writelane_b32 v2, s59, 0 -; GFX11-NEXT: v_mov_b32_e32 v0, s0 ; GFX11-NEXT: s_add_i32 s0, s32, 64 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mov_b32_e32 v1, s0 +; GFX11-NEXT: v_writelane_b32 v2, s59, 0 +; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s32 ; GFX11-NEXT: s_and_b32 s0, 0, exec_lo -; GFX11-NEXT: v_add_nc_u32_e32 v0, 0x3ec, v0 ; GFX11-NEXT: ;;#ASMSTART -; GFX11-NEXT: ; use alloca0 v1 +; GFX11-NEXT: ; use alloca0 v0 ; GFX11-NEXT: ;;#ASMEND -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_readfirstlane_b32 s59, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x442c, v3 +; GFX11-NEXT: v_readfirstlane_b32 s59, v1 ; GFX11-NEXT: ;;#ASMSTART ; GFX11-NEXT: ; use s59, scc ; GFX11-NEXT: ;;#ASMEND @@ -1520,17 +1515,15 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset( ; GFX12-NEXT: scratch_store_b32 off, v2, s32 offset:32768 ; 4-byte Folded Spill ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_mov_b32 exec_lo, s0 -; GFX12-NEXT: s_add_co_i32 s0, s32, 0x4000 +; GFX12-NEXT: v_dual_mov_b32 v0, s32 :: v_dual_mov_b32 v3, s32 ; GFX12-NEXT: v_writelane_b32 v2, s59, 0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s32 ; GFX12-NEXT: s_and_b32 s0, 0, exec_lo ; GFX12-NEXT: ;;#ASMSTART -; GFX12-NEXT: ; use alloca0 v1 +; GFX12-NEXT: ; use alloca0 v0 ; GFX12-NEXT: ;;#ASMEND -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_nc_u32_e32 v0, 0x3ec, v0 -; GFX12-NEXT: v_readfirstlane_b32 s59, v0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_add_nc_u32_e32 v1, 0x43ec, v3 +; GFX12-NEXT: v_readfirstlane_b32 s59, v1 ; GFX12-NEXT: ;;#ASMSTART ; GFX12-NEXT: ; use s59, scc ; GFX12-NEXT: ;;#ASMEND @@ -1550,10 +1543,8 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset( ; GFX8-NEXT: s_add_i32 s6, s32, 0x201000 ; GFX8-NEXT: buffer_store_dword v2, off, s[0:3], s6 ; 4-byte Folded Spill ; GFX8-NEXT: s_mov_b64 exec, s[4:5] -; GFX8-NEXT: v_lshrrev_b32_e64 v0, 6, s32 -; GFX8-NEXT: s_movk_i32 vcc_lo, 0x4040 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, vcc_lo, v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x3ec, v0 +; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x442c, v1 ; GFX8-NEXT: v_writelane_b32 v2, s59, 0 ; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32 ; GFX8-NEXT: v_readfirstlane_b32 s59, v0 diff --git a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll index e9cd94620a6b9a..308411fa225dae 100644 --- a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll +++ b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll @@ -1582,12 +1582,10 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX7-NEXT: buffer_store_dword v15, v16, s[0:3], s32 offen offset:60 ; 4-byte Folded Spill ; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; GFX7-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane -; GFX7-NEXT: v_lshr_b32_e64 v0, s32, 6 +; GFX7-NEXT: v_lshr_b32_e64 v1, s32, 6 ; GFX7-NEXT: v_writelane_b32 v22, vcc_lo, 0 ; GFX7-NEXT: v_writelane_b32 v22, vcc_hi, 1 -; GFX7-NEXT: s_movk_i32 vcc_lo, 0x4040 -; GFX7-NEXT: v_add_i32_e32 v0, vcc, vcc_lo, v0 -; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0x200, v0 +; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0x4240, v1 ; GFX7-NEXT: v_writelane_b32 v23, s59, 27 ; GFX7-NEXT: v_readfirstlane_b32 s59, v0 ; GFX7-NEXT: s_and_b64 vcc, 0, exec @@ -1723,12 +1721,10 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX8-NEXT: buffer_store_dword v15, v16, s[0:3], s32 offen offset:60 ; 4-byte Folded Spill ; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; GFX8-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane -; GFX8-NEXT: v_lshrrev_b32_e64 v0, 6, s32 +; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32 ; GFX8-NEXT: v_writelane_b32 v22, vcc_lo, 0 ; GFX8-NEXT: v_writelane_b32 v22, vcc_hi, 1 -; GFX8-NEXT: s_movk_i32 vcc_lo, 0x4040 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, vcc_lo, v0 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x200, v0 +; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x4240, v1 ; GFX8-NEXT: v_writelane_b32 v23, s59, 27 ; GFX8-NEXT: v_readfirstlane_b32 s59, v0 ; GFX8-NEXT: s_and_b64 vcc, 0, exec @@ -1983,17 +1979,16 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX10_1-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10_1-NEXT: s_mov_b32 exec_lo, s4 ; GFX10_1-NEXT: v_writelane_b32 v23, s30, 0 -; GFX10_1-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_1-NEXT: v_lshrrev_b32_e64 v1, 5, s32 +; GFX10_1-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_1-NEXT: s_and_b32 s4, 0, exec_lo ; GFX10_1-NEXT: v_writelane_b32 v23, s31, 1 -; GFX10_1-NEXT: v_add_nc_u32_e32 v0, 0x4040, v0 -; GFX10_1-NEXT: v_add_nc_u32_e32 v1, 64, v1 +; GFX10_1-NEXT: v_add_nc_u32_e32 v22, 0x4240, v1 +; GFX10_1-NEXT: v_add_nc_u32_e32 v0, 64, v0 ; GFX10_1-NEXT: ;;#ASMSTART -; GFX10_1-NEXT: ; use alloca0 v1 +; GFX10_1-NEXT: ; use alloca0 v0 ; GFX10_1-NEXT: ;;#ASMEND ; GFX10_1-NEXT: v_writelane_b32 v23, s33, 2 -; GFX10_1-NEXT: v_add_nc_u32_e32 v22, 0x200, v0 ; GFX10_1-NEXT: v_writelane_b32 v23, s34, 3 ; GFX10_1-NEXT: v_writelane_b32 v23, s35, 4 ; GFX10_1-NEXT: v_writelane_b32 v23, s36, 5 @@ -2070,17 +2065,16 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX10_3-NEXT: buffer_store_dword v23, off, s[0:3], s5 ; 4-byte Folded Spill ; GFX10_3-NEXT: s_mov_b32 exec_lo, s4 ; GFX10_3-NEXT: v_writelane_b32 v23, s30, 0 -; GFX10_3-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_3-NEXT: v_lshrrev_b32_e64 v1, 5, s32 +; GFX10_3-NEXT: v_lshrrev_b32_e64 v0, 5, s32 ; GFX10_3-NEXT: s_and_b32 s4, 0, exec_lo ; GFX10_3-NEXT: v_writelane_b32 v23, s31, 1 -; GFX10_3-NEXT: v_add_nc_u32_e32 v0, 0x4040, v0 -; GFX10_3-NEXT: v_add_nc_u32_e32 v1, 64, v1 +; GFX10_3-NEXT: v_add_nc_u32_e32 v22, 0x4240, v1 +; GFX10_3-NEXT: v_add_nc_u32_e32 v0, 64, v0 ; GFX10_3-NEXT: ;;#ASMSTART -; GFX10_3-NEXT: ; use alloca0 v1 +; GFX10_3-NEXT: ; use alloca0 v0 ; GFX10_3-NEXT: ;;#ASMEND ; GFX10_3-NEXT: v_writelane_b32 v23, s33, 2 -; GFX10_3-NEXT: v_add_nc_u32_e32 v22, 0x200, v0 ; GFX10_3-NEXT: v_writelane_b32 v23, s34, 3 ; GFX10_3-NEXT: v_writelane_b32 v23, s35, 4 ; GFX10_3-NEXT: v_writelane_b32 v23, s36, 5 @@ -2156,17 +2150,15 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX11-NEXT: scratch_store_b32 off, v23, s1 ; 4-byte Folded Spill ; GFX11-NEXT: s_mov_b32 exec_lo, s0 ; GFX11-NEXT: v_writelane_b32 v23, s30, 0 -; GFX11-NEXT: s_add_i32 s0, s32, 0x4040 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v0, s0 ; GFX11-NEXT: s_add_i32 s0, s32, 64 -; GFX11-NEXT: v_writelane_b32 v23, s31, 1 -; GFX11-NEXT: v_mov_b32_e32 v1, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, s32 :: v_dual_mov_b32 v0, s0 ; GFX11-NEXT: s_and_b32 s0, 0, exec_lo -; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x200, v0 +; GFX11-NEXT: v_writelane_b32 v23, s31, 1 ; GFX11-NEXT: ;;#ASMSTART -; GFX11-NEXT: ; use alloca0 v1 +; GFX11-NEXT: ; use alloca0 v0 ; GFX11-NEXT: ;;#ASMEND +; GFX11-NEXT: v_add_nc_u32_e32 v22, 0x4240, v1 ; GFX11-NEXT: v_writelane_b32 v23, s33, 2 ; GFX11-NEXT: v_writelane_b32 v23, s34, 3 ; GFX11-NEXT: v_writelane_b32 v23, s35, 4 @@ -2248,16 +2240,14 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_mov_b32 exec_lo, s0 ; GFX12-NEXT: v_writelane_b32 v23, s30, 0 -; GFX12-NEXT: s_add_co_i32 s0, s32, 0x4000 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_dual_mov_b32 v1, s32 :: v_dual_mov_b32 v0, s0 +; GFX12-NEXT: v_dual_mov_b32 v0, s32 :: v_dual_mov_b32 v1, s32 ; GFX12-NEXT: s_and_b32 s0, 0, exec_lo -; GFX12-NEXT: v_writelane_b32 v23, s31, 1 ; GFX12-NEXT: ;;#ASMSTART -; GFX12-NEXT: ; use alloca0 v1 +; GFX12-NEXT: ; use alloca0 v0 ; GFX12-NEXT: ;;#ASMEND +; GFX12-NEXT: v_writelane_b32 v23, s31, 1 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-NEXT: v_add_nc_u32_e32 v22, 0x200, v0 +; GFX12-NEXT: v_add_nc_u32_e32 v22, 0x4200, v1 ; GFX12-NEXT: v_writelane_b32 v23, s33, 2 ; GFX12-NEXT: v_writelane_b32 v23, s34, 3 ; GFX12-NEXT: v_writelane_b32 v23, s35, 4 diff --git a/llvm/test/CodeGen/ARM/fmuladd-soft-float.ll b/llvm/test/CodeGen/ARM/fmuladd-soft-float.ll new file mode 100644 index 00000000000000..88c31325b64b76 --- /dev/null +++ b/llvm/test/CodeGen/ARM/fmuladd-soft-float.ll @@ -0,0 +1,406 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=arm < %s | FileCheck %s -check-prefix=SOFT-FLOAT +; RUN: llc -mtriple=arm -mattr=+vfp4d16sp < %s | FileCheck %s -check-prefix=SOFT-FLOAT-VFP32 +; RUN: llc -mtriple=arm -mattr=+vfp4d16sp,+fp64 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-VFP64 + +define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r4, lr} +; SOFT-FLOAT-NEXT: mov r4, r2 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: mov r1, r4 +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: pop {r4, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r4, lr} +; SOFT-FLOAT-VFP32-NEXT: mov r4, r2 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: mov r1, r4 +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: pop {r4, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r4, lr} +; SOFT-FLOAT-VFP64-NEXT: mov r4, r2 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: mov r1, r4 +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: pop {r4, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %result = call float @llvm.fmuladd.f32(float %a, float %b, float %c) + ret float %result +} + +define double @fmuladd_intrinsic_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r11, lr} +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: pop {r11, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r11, lr} +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: pop {r11, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r11, lr} +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: pop {r11, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %result = call double @llvm.fmuladd.f64(double %a, double %b, double %c) + ret double %result +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r4, lr} +; SOFT-FLOAT-NEXT: mov r4, r2 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: mov r1, r4 +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: pop {r4, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r4, lr} +; SOFT-FLOAT-VFP32-NEXT: mov r4, r2 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: mov r1, r4 +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: pop {r4, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r4, lr} +; SOFT-FLOAT-VFP64-NEXT: mov r4, r2 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: mov r1, r4 +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: pop {r4, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %product = fmul contract float %a, %b + %result = fadd contract float %product, %c + ret float %result +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r11, lr} +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: pop {r11, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r11, lr} +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: pop {r11, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r11, lr} +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #8] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #12] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: pop {r11, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %product = fmul contract double %a, %b + %result = fadd contract double %product, %c + ret double %result +} + +define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-NEXT: mov r7, r1 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #24] +; SOFT-FLOAT-NEXT: mov r4, r3 +; SOFT-FLOAT-NEXT: mov r6, r2 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #40] +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-NEXT: mov r5, r0 +; SOFT-FLOAT-NEXT: mov r0, r7 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #44] +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #32] +; SOFT-FLOAT-NEXT: mov r7, r0 +; SOFT-FLOAT-NEXT: mov r0, r6 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #48] +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-NEXT: mov r6, r0 +; SOFT-FLOAT-NEXT: mov r0, r4 +; SOFT-FLOAT-NEXT: bl __mulsf3 +; SOFT-FLOAT-NEXT: ldr r1, [sp, #52] +; SOFT-FLOAT-NEXT: bl __addsf3 +; SOFT-FLOAT-NEXT: mov r3, r0 +; SOFT-FLOAT-NEXT: mov r0, r5 +; SOFT-FLOAT-NEXT: mov r1, r7 +; SOFT-FLOAT-NEXT: mov r2, r6 +; SOFT-FLOAT-NEXT: pop {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-VFP32-NEXT: mov r7, r1 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #24] +; SOFT-FLOAT-VFP32-NEXT: mov r4, r3 +; SOFT-FLOAT-VFP32-NEXT: mov r6, r2 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #40] +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-VFP32-NEXT: mov r5, r0 +; SOFT-FLOAT-VFP32-NEXT: mov r0, r7 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #44] +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #32] +; SOFT-FLOAT-VFP32-NEXT: mov r7, r0 +; SOFT-FLOAT-VFP32-NEXT: mov r0, r6 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #48] +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-VFP32-NEXT: mov r6, r0 +; SOFT-FLOAT-VFP32-NEXT: mov r0, r4 +; SOFT-FLOAT-VFP32-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #52] +; SOFT-FLOAT-VFP32-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP32-NEXT: mov r3, r0 +; SOFT-FLOAT-VFP32-NEXT: mov r0, r5 +; SOFT-FLOAT-VFP32-NEXT: mov r1, r7 +; SOFT-FLOAT-VFP32-NEXT: mov r2, r6 +; SOFT-FLOAT-VFP32-NEXT: pop {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-VFP64-NEXT: mov r7, r1 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #24] +; SOFT-FLOAT-VFP64-NEXT: mov r4, r3 +; SOFT-FLOAT-VFP64-NEXT: mov r6, r2 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #40] +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-VFP64-NEXT: mov r5, r0 +; SOFT-FLOAT-VFP64-NEXT: mov r0, r7 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #44] +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #32] +; SOFT-FLOAT-VFP64-NEXT: mov r7, r0 +; SOFT-FLOAT-VFP64-NEXT: mov r0, r6 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #48] +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-VFP64-NEXT: mov r6, r0 +; SOFT-FLOAT-VFP64-NEXT: mov r0, r4 +; SOFT-FLOAT-VFP64-NEXT: bl __mulsf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #52] +; SOFT-FLOAT-VFP64-NEXT: bl __addsf3 +; SOFT-FLOAT-VFP64-NEXT: mov r3, r0 +; SOFT-FLOAT-VFP64-NEXT: mov r0, r5 +; SOFT-FLOAT-VFP64-NEXT: mov r1, r7 +; SOFT-FLOAT-VFP64-NEXT: mov r2, r6 +; SOFT-FLOAT-VFP64-NEXT: pop {r4, r5, r6, r7, r11, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %product = fmul contract <4 x float> %a, %b + %result = fadd contract <4 x float> %product, %c + ret <4 x float> %result +} + +define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT: @ %bb.0: +; SOFT-FLOAT-NEXT: push {r4, r5, r6, lr} +; SOFT-FLOAT-NEXT: mov r5, r3 +; SOFT-FLOAT-NEXT: mov r6, r2 +; SOFT-FLOAT-NEXT: mov r4, r0 +; SOFT-FLOAT-NEXT: ldr r0, [sp, #32] +; SOFT-FLOAT-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-NEXT: ldr r2, [sp, #64] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #68] +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #96] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #100] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: str r0, [r4, #24] +; SOFT-FLOAT-NEXT: str r1, [r4, #28] +; SOFT-FLOAT-NEXT: ldr r0, [sp, #24] +; SOFT-FLOAT-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-NEXT: ldr r2, [sp, #56] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #60] +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #88] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #92] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: str r0, [r4, #16] +; SOFT-FLOAT-NEXT: str r1, [r4, #20] +; SOFT-FLOAT-NEXT: ldr r0, [sp, #16] +; SOFT-FLOAT-NEXT: ldr r1, [sp, #20] +; SOFT-FLOAT-NEXT: ldr r2, [sp, #48] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #52] +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #80] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #84] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #40] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #44] +; SOFT-FLOAT-NEXT: str r0, [r4, #8] +; SOFT-FLOAT-NEXT: mov r0, r6 +; SOFT-FLOAT-NEXT: str r1, [r4, #12] +; SOFT-FLOAT-NEXT: mov r1, r5 +; SOFT-FLOAT-NEXT: bl __muldf3 +; SOFT-FLOAT-NEXT: ldr r2, [sp, #72] +; SOFT-FLOAT-NEXT: ldr r3, [sp, #76] +; SOFT-FLOAT-NEXT: bl __adddf3 +; SOFT-FLOAT-NEXT: stm r4, {r0, r1} +; SOFT-FLOAT-NEXT: pop {r4, r5, r6, lr} +; SOFT-FLOAT-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP32-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-VFP32: @ %bb.0: +; SOFT-FLOAT-VFP32-NEXT: push {r4, r5, r6, lr} +; SOFT-FLOAT-VFP32-NEXT: mov r5, r3 +; SOFT-FLOAT-VFP32-NEXT: mov r6, r2 +; SOFT-FLOAT-VFP32-NEXT: mov r4, r0 +; SOFT-FLOAT-VFP32-NEXT: ldr r0, [sp, #32] +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #64] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #68] +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #96] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #100] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: str r0, [r4, #24] +; SOFT-FLOAT-VFP32-NEXT: str r1, [r4, #28] +; SOFT-FLOAT-VFP32-NEXT: ldr r0, [sp, #24] +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #56] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #60] +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #88] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #92] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: str r0, [r4, #16] +; SOFT-FLOAT-VFP32-NEXT: str r1, [r4, #20] +; SOFT-FLOAT-VFP32-NEXT: ldr r0, [sp, #16] +; SOFT-FLOAT-VFP32-NEXT: ldr r1, [sp, #20] +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #48] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #52] +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #80] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #84] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #40] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #44] +; SOFT-FLOAT-VFP32-NEXT: str r0, [r4, #8] +; SOFT-FLOAT-VFP32-NEXT: mov r0, r6 +; SOFT-FLOAT-VFP32-NEXT: str r1, [r4, #12] +; SOFT-FLOAT-VFP32-NEXT: mov r1, r5 +; SOFT-FLOAT-VFP32-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP32-NEXT: ldr r2, [sp, #72] +; SOFT-FLOAT-VFP32-NEXT: ldr r3, [sp, #76] +; SOFT-FLOAT-VFP32-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP32-NEXT: stm r4, {r0, r1} +; SOFT-FLOAT-VFP32-NEXT: pop {r4, r5, r6, lr} +; SOFT-FLOAT-VFP32-NEXT: mov pc, lr +; +; SOFT-FLOAT-VFP64-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-VFP64: @ %bb.0: +; SOFT-FLOAT-VFP64-NEXT: push {r4, r5, r6, lr} +; SOFT-FLOAT-VFP64-NEXT: mov r5, r3 +; SOFT-FLOAT-VFP64-NEXT: mov r6, r2 +; SOFT-FLOAT-VFP64-NEXT: mov r4, r0 +; SOFT-FLOAT-VFP64-NEXT: ldr r0, [sp, #32] +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #36] +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #64] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #68] +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #96] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #100] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: str r0, [r4, #24] +; SOFT-FLOAT-VFP64-NEXT: str r1, [r4, #28] +; SOFT-FLOAT-VFP64-NEXT: ldr r0, [sp, #24] +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #28] +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #56] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #60] +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #88] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #92] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: str r0, [r4, #16] +; SOFT-FLOAT-VFP64-NEXT: str r1, [r4, #20] +; SOFT-FLOAT-VFP64-NEXT: ldr r0, [sp, #16] +; SOFT-FLOAT-VFP64-NEXT: ldr r1, [sp, #20] +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #48] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #52] +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #80] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #84] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #40] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #44] +; SOFT-FLOAT-VFP64-NEXT: str r0, [r4, #8] +; SOFT-FLOAT-VFP64-NEXT: mov r0, r6 +; SOFT-FLOAT-VFP64-NEXT: str r1, [r4, #12] +; SOFT-FLOAT-VFP64-NEXT: mov r1, r5 +; SOFT-FLOAT-VFP64-NEXT: bl __muldf3 +; SOFT-FLOAT-VFP64-NEXT: ldr r2, [sp, #72] +; SOFT-FLOAT-VFP64-NEXT: ldr r3, [sp, #76] +; SOFT-FLOAT-VFP64-NEXT: bl __adddf3 +; SOFT-FLOAT-VFP64-NEXT: stm r4, {r0, r1} +; SOFT-FLOAT-VFP64-NEXT: pop {r4, r5, r6, lr} +; SOFT-FLOAT-VFP64-NEXT: mov pc, lr + %product = fmul contract <4 x double> %a, %b + %result = fadd contract <4 x double> %product, %c + ret <4 x double> %result +} + +attributes #0 = { "use-soft-float"="true" } + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/CodeGen/ARM/pr112710.ll b/llvm/test/CodeGen/ARM/pr112710.ll new file mode 100644 index 00000000000000..006d564a6d3acd --- /dev/null +++ b/llvm/test/CodeGen/ARM/pr112710.ll @@ -0,0 +1,40 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-- | FileCheck %s + +; Reduced regression test for infinite-loop due to #112710 +define void @test(i32 %bf.load.i) { +; CHECK-LABEL: test: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: vldr d16, .LCPI0_0 +; CHECK-NEXT: vmov.i64 q9, #0xffff +; CHECK-NEXT: vdup.32 d17, r0 +; CHECK-NEXT: vneg.s32 d16, d16 +; CHECK-NEXT: vshl.u32 d16, d17, d16 +; CHECK-NEXT: vldr d17, .LCPI0_1 +; CHECK-NEXT: vand d16, d16, d17 +; CHECK-NEXT: vmovl.u32 q8, d16 +; CHECK-NEXT: vand q8, q8, q9 +; CHECK-NEXT: vst1.64 {d16, d17}, [r0] +; CHECK-NEXT: bl use +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 8 @ 0x8 +; CHECK-NEXT: .long 24 @ 0x18 +; CHECK-NEXT: .LCPI0_1: +; CHECK-NEXT: .long 4095 @ 0xfff +; CHECK-NEXT: .long 1 @ 0x1 +entry: + %0 = insertelement <2 x i32> poison, i32 %bf.load.i, i64 0 + %1 = shufflevector <2 x i32> %0, <2 x i32> poison, <2 x i32> zeroinitializer + %2 = lshr <2 x i32> %1, + %arrayinit.element1.i = getelementptr inbounds i8, ptr poison, i32 16 + %3 = trunc <2 x i32> %2 to <2 x i16> + %4 = and <2 x i16> %3, + %5 = zext nneg <2 x i16> %4 to <2 x i64> + store <2 x i64> %5, ptr %arrayinit.element1.i, align 8 + call void @use() + unreachable +} +declare void @use() diff --git a/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll new file mode 100644 index 00000000000000..bbfb7cf9ca907a --- /dev/null +++ b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll @@ -0,0 +1,932 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=mips < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32 +; RUN: llc -mtriple=mips -mcpu mips32r2 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32R2 +; RUN: llc -mtriple=mips64 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64 +; RUN: llc -mtriple=mips64 -mcpu mips64r2 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64R2 + +define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $16, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: .cfi_offset 16, -8 +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $16, $6 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: move $5, $16 +; SOFT-FLOAT-32-NEXT: lw $16, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $16, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 16, -8 +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $16, $6 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $16 +; SOFT-FLOAT-32R2-NEXT: lw $16, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64-NEXT: move $16, $6 +; SOFT-FLOAT-64-NEXT: sll $4, $4, 0 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $5, 0 +; SOFT-FLOAT-64-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64R2-NEXT: move $16, $6 +; SOFT-FLOAT-64R2-NEXT: sll $4, $4, 0 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $5, 0 +; SOFT-FLOAT-64R2-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 16 + %result = call float @llvm.fmuladd.f32(float %a, float %b, float %c) + ret float %result +} + +define double @fmuladd_intrinsic_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: nop +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $6, 40($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 44($sp) +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: nop +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $6, 40($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 44($sp) +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $16, $6 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: move $5, $16 +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $16, $6 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $16 +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 16 + %result = call double @llvm.fmuladd.f64(double %a, double %b, double %c) + ret double %result +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $16, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: .cfi_offset 16, -8 +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $16, $6 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: move $5, $16 +; SOFT-FLOAT-32-NEXT: lw $16, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $16, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 16, -8 +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $16, $6 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $16 +; SOFT-FLOAT-32R2-NEXT: lw $16, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64-NEXT: move $16, $6 +; SOFT-FLOAT-64-NEXT: sll $4, $4, 0 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $5, 0 +; SOFT-FLOAT-64-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64R2-NEXT: move $16, $6 +; SOFT-FLOAT-64R2-NEXT: sll $4, $4, 0 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $5, 0 +; SOFT-FLOAT-64R2-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 16 + %product = fmul contract float %a, %b + %result = fadd contract float %product, %c + ret float %result +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: nop +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $6, 40($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 44($sp) +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: nop +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $6, 40($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 44($sp) +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 24 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $16, $6 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: move $5, $16 +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 16 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -16 +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $16, $6 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $16 +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 16 + %product = fmul contract double %a, %b + %result = fadd contract double %product, %c + ret double %result +} + +define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -48 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-32-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $21, 40($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $20, 36($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $19, 32($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $18, 28($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $17, 24($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $16, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: .cfi_offset 21, -8 +; SOFT-FLOAT-32-NEXT: .cfi_offset 20, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset 19, -16 +; SOFT-FLOAT-32-NEXT: .cfi_offset 18, -20 +; SOFT-FLOAT-32-NEXT: .cfi_offset 17, -24 +; SOFT-FLOAT-32-NEXT: .cfi_offset 16, -28 +; SOFT-FLOAT-32-NEXT: move $17, $7 +; SOFT-FLOAT-32-NEXT: move $16, $4 +; SOFT-FLOAT-32-NEXT: lw $4, 64($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 80($sp) +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $18, $6 +; SOFT-FLOAT-32-NEXT: lw $5, 96($sp) +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $4, 68($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 84($sp) +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $19, $2 +; SOFT-FLOAT-32-NEXT: lw $5, 100($sp) +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: move $20, $2 +; SOFT-FLOAT-32-NEXT: lw $5, 76($sp) +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $4, $17 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $17, 88($sp) +; SOFT-FLOAT-32-NEXT: lw $21, 72($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 92($sp) +; SOFT-FLOAT-32-NEXT: sw $20, 12($16) +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: sw $19, 8($16) +; SOFT-FLOAT-32-NEXT: sw $2, 4($16) +; SOFT-FLOAT-32-NEXT: move $4, $18 +; SOFT-FLOAT-32-NEXT: jal __mulsf3 +; SOFT-FLOAT-32-NEXT: move $5, $21 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: jal __addsf3 +; SOFT-FLOAT-32-NEXT: move $5, $17 +; SOFT-FLOAT-32-NEXT: sw $2, 0($16) +; SOFT-FLOAT-32-NEXT: lw $16, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $17, 24($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $18, 28($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $19, 32($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $20, 36($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $21, 40($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 48 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -48 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 44($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $21, 40($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $20, 36($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $19, 32($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $18, 28($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $17, 24($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $16, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 21, -8 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 20, -12 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 19, -16 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 18, -20 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 17, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 16, -28 +; SOFT-FLOAT-32R2-NEXT: move $17, $7 +; SOFT-FLOAT-32R2-NEXT: move $16, $4 +; SOFT-FLOAT-32R2-NEXT: lw $4, 64($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 80($sp) +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $18, $6 +; SOFT-FLOAT-32R2-NEXT: lw $5, 96($sp) +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $4, 68($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 84($sp) +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $19, $2 +; SOFT-FLOAT-32R2-NEXT: lw $5, 100($sp) +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: move $20, $2 +; SOFT-FLOAT-32R2-NEXT: lw $5, 76($sp) +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $4, $17 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $17, 88($sp) +; SOFT-FLOAT-32R2-NEXT: lw $21, 72($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 92($sp) +; SOFT-FLOAT-32R2-NEXT: sw $20, 12($16) +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: sw $19, 8($16) +; SOFT-FLOAT-32R2-NEXT: sw $2, 4($16) +; SOFT-FLOAT-32R2-NEXT: move $4, $18 +; SOFT-FLOAT-32R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $21 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: jal __addsf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $17 +; SOFT-FLOAT-32R2-NEXT: sw $2, 0($16) +; SOFT-FLOAT-32R2-NEXT: lw $16, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $17, 24($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $18, 28($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $19, 32($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $20, 36($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $21, 40($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $ra, 44($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 48 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -64 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-NEXT: sd $ra, 56($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $22, 48($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $21, 40($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $20, 32($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $19, 24($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $18, 16($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-64-NEXT: .cfi_offset 21, -24 +; SOFT-FLOAT-64-NEXT: .cfi_offset 20, -32 +; SOFT-FLOAT-64-NEXT: .cfi_offset 19, -40 +; SOFT-FLOAT-64-NEXT: .cfi_offset 18, -48 +; SOFT-FLOAT-64-NEXT: .cfi_offset 17, -56 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -64 +; SOFT-FLOAT-64-NEXT: move $16, $9 +; SOFT-FLOAT-64-NEXT: move $17, $8 +; SOFT-FLOAT-64-NEXT: move $18, $7 +; SOFT-FLOAT-64-NEXT: move $19, $6 +; SOFT-FLOAT-64-NEXT: move $20, $5 +; SOFT-FLOAT-64-NEXT: move $21, $4 +; SOFT-FLOAT-64-NEXT: sll $4, $4, 0 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $6, 0 +; SOFT-FLOAT-64-NEXT: move $22, $2 +; SOFT-FLOAT-64-NEXT: dsra $4, $21, 32 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: dsra $5, $19, 32 +; SOFT-FLOAT-64-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: dsra $5, $17, 32 +; SOFT-FLOAT-64-NEXT: # kill: def $v0 killed $v0 def $v0_64 +; SOFT-FLOAT-64-NEXT: sll $4, $22, 0 +; SOFT-FLOAT-64-NEXT: sll $5, $17, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: dsll $17, $2, 32 +; SOFT-FLOAT-64-NEXT: dsll $1, $2, 32 +; SOFT-FLOAT-64-NEXT: dsrl $1, $1, 32 +; SOFT-FLOAT-64-NEXT: sll $4, $20, 0 +; SOFT-FLOAT-64-NEXT: sll $5, $18, 0 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: or $17, $1, $17 +; SOFT-FLOAT-64-NEXT: move $19, $2 +; SOFT-FLOAT-64-NEXT: dsra $4, $20, 32 +; SOFT-FLOAT-64-NEXT: jal __mulsf3 +; SOFT-FLOAT-64-NEXT: dsra $5, $18, 32 +; SOFT-FLOAT-64-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: dsra $5, $16, 32 +; SOFT-FLOAT-64-NEXT: # kill: def $v0 killed $v0 def $v0_64 +; SOFT-FLOAT-64-NEXT: dsll $18, $2, 32 +; SOFT-FLOAT-64-NEXT: sll $4, $19, 0 +; SOFT-FLOAT-64-NEXT: jal __addsf3 +; SOFT-FLOAT-64-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64-NEXT: dsll $1, $2, 32 +; SOFT-FLOAT-64-NEXT: dsrl $1, $1, 32 +; SOFT-FLOAT-64-NEXT: or $3, $1, $18 +; SOFT-FLOAT-64-NEXT: move $2, $17 +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $18, 16($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $19, 24($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $20, 32($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $21, 40($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $22, 48($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 56($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 64 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -64 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 56($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $22, 48($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $21, 40($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $20, 32($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $19, 24($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $18, 16($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 21, -24 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 20, -32 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 19, -40 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 18, -48 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 17, -56 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -64 +; SOFT-FLOAT-64R2-NEXT: move $16, $9 +; SOFT-FLOAT-64R2-NEXT: move $17, $8 +; SOFT-FLOAT-64R2-NEXT: move $18, $7 +; SOFT-FLOAT-64R2-NEXT: move $19, $6 +; SOFT-FLOAT-64R2-NEXT: move $20, $5 +; SOFT-FLOAT-64R2-NEXT: move $21, $4 +; SOFT-FLOAT-64R2-NEXT: dsra $4, $4, 32 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: dsra $5, $6, 32 +; SOFT-FLOAT-64R2-NEXT: move $22, $2 +; SOFT-FLOAT-64R2-NEXT: sll $4, $21, 0 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $19, 0 +; SOFT-FLOAT-64R2-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $17, 0 +; SOFT-FLOAT-64R2-NEXT: sll $4, $22, 0 +; SOFT-FLOAT-64R2-NEXT: dsra $5, $17, 32 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: dext $17, $2, 0, 32 +; SOFT-FLOAT-64R2-NEXT: # kill: def $v0 killed $v0 def $v0_64 +; SOFT-FLOAT-64R2-NEXT: dsll $1, $2, 32 +; SOFT-FLOAT-64R2-NEXT: dsra $4, $20, 32 +; SOFT-FLOAT-64R2-NEXT: dsra $5, $18, 32 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: or $17, $17, $1 +; SOFT-FLOAT-64R2-NEXT: move $19, $2 +; SOFT-FLOAT-64R2-NEXT: sll $4, $20, 0 +; SOFT-FLOAT-64R2-NEXT: jal __mulsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $18, 0 +; SOFT-FLOAT-64R2-NEXT: sll $4, $2, 0 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: sll $5, $16, 0 +; SOFT-FLOAT-64R2-NEXT: dext $18, $2, 0, 32 +; SOFT-FLOAT-64R2-NEXT: sll $4, $19, 0 +; SOFT-FLOAT-64R2-NEXT: jal __addsf3 +; SOFT-FLOAT-64R2-NEXT: dsra $5, $16, 32 +; SOFT-FLOAT-64R2-NEXT: # kill: def $v0 killed $v0 def $v0_64 +; SOFT-FLOAT-64R2-NEXT: dsll $1, $2, 32 +; SOFT-FLOAT-64R2-NEXT: or $3, $18, $1 +; SOFT-FLOAT-64R2-NEXT: move $2, $17 +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $18, 16($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $19, 24($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $20, 32($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $21, 40($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $22, 48($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 56($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 64 + %product = fmul contract <4 x float> %a, %b + %result = fadd contract <4 x float> %product, %c + ret <4 x float> %result +} + +define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, -64 +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-32-NEXT: sw $ra, 60($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $fp, 56($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $23, 52($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $22, 48($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $21, 44($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $20, 40($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $19, 36($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $18, 32($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $17, 28($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $16, 24($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32-NEXT: .cfi_offset 30, -8 +; SOFT-FLOAT-32-NEXT: .cfi_offset 23, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-32-NEXT: .cfi_offset 21, -20 +; SOFT-FLOAT-32-NEXT: .cfi_offset 20, -24 +; SOFT-FLOAT-32-NEXT: .cfi_offset 19, -28 +; SOFT-FLOAT-32-NEXT: .cfi_offset 18, -32 +; SOFT-FLOAT-32-NEXT: .cfi_offset 17, -36 +; SOFT-FLOAT-32-NEXT: .cfi_offset 16, -40 +; SOFT-FLOAT-32-NEXT: sw $7, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: sw $6, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: move $16, $4 +; SOFT-FLOAT-32-NEXT: lw $4, 88($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 92($sp) +; SOFT-FLOAT-32-NEXT: lw $6, 120($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 124($sp) +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: nop +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $6, 152($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 156($sp) +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: move $19, $2 +; SOFT-FLOAT-32-NEXT: lw $4, 96($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 100($sp) +; SOFT-FLOAT-32-NEXT: lw $6, 128($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 132($sp) +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: move $20, $3 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: lw $6, 160($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 164($sp) +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: move $21, $2 +; SOFT-FLOAT-32-NEXT: lw $4, 80($sp) +; SOFT-FLOAT-32-NEXT: lw $5, 84($sp) +; SOFT-FLOAT-32-NEXT: lw $6, 112($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 116($sp) +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: move $22, $3 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: lw $23, 140($sp) +; SOFT-FLOAT-32-NEXT: lw $fp, 136($sp) +; SOFT-FLOAT-32-NEXT: lw $17, 108($sp) +; SOFT-FLOAT-32-NEXT: lw $18, 104($sp) +; SOFT-FLOAT-32-NEXT: lw $7, 148($sp) +; SOFT-FLOAT-32-NEXT: lw $6, 144($sp) +; SOFT-FLOAT-32-NEXT: sw $22, 28($16) +; SOFT-FLOAT-32-NEXT: sw $21, 24($16) +; SOFT-FLOAT-32-NEXT: sw $20, 20($16) +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: sw $19, 16($16) +; SOFT-FLOAT-32-NEXT: sw $3, 12($16) +; SOFT-FLOAT-32-NEXT: sw $2, 8($16) +; SOFT-FLOAT-32-NEXT: lw $4, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $5, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: move $6, $18 +; SOFT-FLOAT-32-NEXT: jal __muldf3 +; SOFT-FLOAT-32-NEXT: move $7, $17 +; SOFT-FLOAT-32-NEXT: move $4, $2 +; SOFT-FLOAT-32-NEXT: move $5, $3 +; SOFT-FLOAT-32-NEXT: move $6, $fp +; SOFT-FLOAT-32-NEXT: jal __adddf3 +; SOFT-FLOAT-32-NEXT: move $7, $23 +; SOFT-FLOAT-32-NEXT: sw $3, 4($16) +; SOFT-FLOAT-32-NEXT: sw $2, 0($16) +; SOFT-FLOAT-32-NEXT: lw $16, 24($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $17, 28($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $18, 32($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $19, 36($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $20, 40($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $21, 44($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $22, 48($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $23, 52($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $fp, 56($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: lw $ra, 60($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: jr $ra +; SOFT-FLOAT-32-NEXT: addiu $sp, $sp, 64 +; +; SOFT-FLOAT-32R2-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32R2: # %bb.0: +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, -64 +; SOFT-FLOAT-32R2-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-32R2-NEXT: sw $ra, 60($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $fp, 56($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $23, 52($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $22, 48($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $21, 44($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $20, 40($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $19, 36($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $18, 32($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $17, 28($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $16, 24($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 31, -4 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 30, -8 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 23, -12 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 21, -20 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 20, -24 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 19, -28 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 18, -32 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 17, -36 +; SOFT-FLOAT-32R2-NEXT: .cfi_offset 16, -40 +; SOFT-FLOAT-32R2-NEXT: sw $7, 20($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: sw $6, 16($sp) # 4-byte Folded Spill +; SOFT-FLOAT-32R2-NEXT: move $16, $4 +; SOFT-FLOAT-32R2-NEXT: lw $4, 88($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 92($sp) +; SOFT-FLOAT-32R2-NEXT: lw $6, 120($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 124($sp) +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: nop +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $6, 152($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 156($sp) +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: move $19, $2 +; SOFT-FLOAT-32R2-NEXT: lw $4, 96($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 100($sp) +; SOFT-FLOAT-32R2-NEXT: lw $6, 128($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 132($sp) +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: move $20, $3 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: lw $6, 160($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 164($sp) +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: move $21, $2 +; SOFT-FLOAT-32R2-NEXT: lw $4, 80($sp) +; SOFT-FLOAT-32R2-NEXT: lw $5, 84($sp) +; SOFT-FLOAT-32R2-NEXT: lw $6, 112($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 116($sp) +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: move $22, $3 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: lw $23, 140($sp) +; SOFT-FLOAT-32R2-NEXT: lw $fp, 136($sp) +; SOFT-FLOAT-32R2-NEXT: lw $17, 108($sp) +; SOFT-FLOAT-32R2-NEXT: lw $18, 104($sp) +; SOFT-FLOAT-32R2-NEXT: lw $7, 148($sp) +; SOFT-FLOAT-32R2-NEXT: lw $6, 144($sp) +; SOFT-FLOAT-32R2-NEXT: sw $22, 28($16) +; SOFT-FLOAT-32R2-NEXT: sw $21, 24($16) +; SOFT-FLOAT-32R2-NEXT: sw $20, 20($16) +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: sw $19, 16($16) +; SOFT-FLOAT-32R2-NEXT: sw $3, 12($16) +; SOFT-FLOAT-32R2-NEXT: sw $2, 8($16) +; SOFT-FLOAT-32R2-NEXT: lw $4, 16($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $5, 20($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: move $6, $18 +; SOFT-FLOAT-32R2-NEXT: jal __muldf3 +; SOFT-FLOAT-32R2-NEXT: move $7, $17 +; SOFT-FLOAT-32R2-NEXT: move $4, $2 +; SOFT-FLOAT-32R2-NEXT: move $5, $3 +; SOFT-FLOAT-32R2-NEXT: move $6, $fp +; SOFT-FLOAT-32R2-NEXT: jal __adddf3 +; SOFT-FLOAT-32R2-NEXT: move $7, $23 +; SOFT-FLOAT-32R2-NEXT: sw $3, 4($16) +; SOFT-FLOAT-32R2-NEXT: sw $2, 0($16) +; SOFT-FLOAT-32R2-NEXT: lw $16, 24($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $17, 28($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $18, 32($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $19, 36($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $20, 40($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $21, 44($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $22, 48($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $23, 52($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $fp, 56($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: lw $ra, 60($sp) # 4-byte Folded Reload +; SOFT-FLOAT-32R2-NEXT: jr $ra +; SOFT-FLOAT-32R2-NEXT: addiu $sp, $sp, 64 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, -64 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-NEXT: sd $ra, 56($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $22, 48($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $21, 40($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $20, 32($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $19, 24($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $18, 16($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-64-NEXT: .cfi_offset 21, -24 +; SOFT-FLOAT-64-NEXT: .cfi_offset 20, -32 +; SOFT-FLOAT-64-NEXT: .cfi_offset 19, -40 +; SOFT-FLOAT-64-NEXT: .cfi_offset 18, -48 +; SOFT-FLOAT-64-NEXT: .cfi_offset 17, -56 +; SOFT-FLOAT-64-NEXT: .cfi_offset 16, -64 +; SOFT-FLOAT-64-NEXT: move $17, $10 +; SOFT-FLOAT-64-NEXT: move $18, $9 +; SOFT-FLOAT-64-NEXT: move $19, $8 +; SOFT-FLOAT-64-NEXT: move $20, $6 +; SOFT-FLOAT-64-NEXT: move $21, $5 +; SOFT-FLOAT-64-NEXT: move $16, $4 +; SOFT-FLOAT-64-NEXT: move $4, $7 +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $5, $11 +; SOFT-FLOAT-64-NEXT: ld $5, 88($sp) +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: move $22, $2 +; SOFT-FLOAT-64-NEXT: ld $5, 64($sp) +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $4, $19 +; SOFT-FLOAT-64-NEXT: ld $5, 96($sp) +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: move $19, $2 +; SOFT-FLOAT-64-NEXT: move $4, $20 +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $5, $17 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: ld $17, 72($sp) +; SOFT-FLOAT-64-NEXT: ld $5, 80($sp) +; SOFT-FLOAT-64-NEXT: sd $19, 24($16) +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: sd $22, 16($16) +; SOFT-FLOAT-64-NEXT: sd $2, 8($16) +; SOFT-FLOAT-64-NEXT: move $4, $21 +; SOFT-FLOAT-64-NEXT: jal __muldf3 +; SOFT-FLOAT-64-NEXT: move $5, $18 +; SOFT-FLOAT-64-NEXT: move $4, $2 +; SOFT-FLOAT-64-NEXT: jal __adddf3 +; SOFT-FLOAT-64-NEXT: move $5, $17 +; SOFT-FLOAT-64-NEXT: sd $2, 0($16) +; SOFT-FLOAT-64-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $18, 16($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $19, 24($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $20, 32($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $21, 40($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $22, 48($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: ld $ra, 56($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64-NEXT: jr $ra +; SOFT-FLOAT-64-NEXT: daddiu $sp, $sp, 64 +; +; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64R2: # %bb.0: +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, -64 +; SOFT-FLOAT-64R2-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64R2-NEXT: sd $ra, 56($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $22, 48($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $21, 40($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $20, 32($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $19, 24($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $18, 16($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $17, 8($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: sd $16, 0($sp) # 8-byte Folded Spill +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 31, -8 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 22, -16 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 21, -24 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 20, -32 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 19, -40 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 18, -48 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 17, -56 +; SOFT-FLOAT-64R2-NEXT: .cfi_offset 16, -64 +; SOFT-FLOAT-64R2-NEXT: move $17, $10 +; SOFT-FLOAT-64R2-NEXT: move $18, $9 +; SOFT-FLOAT-64R2-NEXT: move $19, $8 +; SOFT-FLOAT-64R2-NEXT: move $20, $6 +; SOFT-FLOAT-64R2-NEXT: move $21, $5 +; SOFT-FLOAT-64R2-NEXT: move $16, $4 +; SOFT-FLOAT-64R2-NEXT: move $4, $7 +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $11 +; SOFT-FLOAT-64R2-NEXT: ld $5, 88($sp) +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: move $22, $2 +; SOFT-FLOAT-64R2-NEXT: ld $5, 64($sp) +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $4, $19 +; SOFT-FLOAT-64R2-NEXT: ld $5, 96($sp) +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: move $19, $2 +; SOFT-FLOAT-64R2-NEXT: move $4, $20 +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $17 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: ld $17, 72($sp) +; SOFT-FLOAT-64R2-NEXT: ld $5, 80($sp) +; SOFT-FLOAT-64R2-NEXT: sd $19, 24($16) +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: sd $22, 16($16) +; SOFT-FLOAT-64R2-NEXT: sd $2, 8($16) +; SOFT-FLOAT-64R2-NEXT: move $4, $21 +; SOFT-FLOAT-64R2-NEXT: jal __muldf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $18 +; SOFT-FLOAT-64R2-NEXT: move $4, $2 +; SOFT-FLOAT-64R2-NEXT: jal __adddf3 +; SOFT-FLOAT-64R2-NEXT: move $5, $17 +; SOFT-FLOAT-64R2-NEXT: sd $2, 0($16) +; SOFT-FLOAT-64R2-NEXT: ld $16, 0($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $17, 8($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $18, 16($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $19, 24($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $20, 32($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $21, 40($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $22, 48($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: ld $ra, 56($sp) # 8-byte Folded Reload +; SOFT-FLOAT-64R2-NEXT: jr $ra +; SOFT-FLOAT-64R2-NEXT: daddiu $sp, $sp, 64 + %product = fmul contract <4 x double> %a, %b + %result = fadd contract <4 x double> %product, %c + ret <4 x double> %result +} + +attributes #0 = { "use-soft-float"="true" } + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll index bc58a700cb9828..028fab7ae54d6a 100644 --- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll +++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll @@ -19,7 +19,7 @@ define i32 @f(ptr %p) { ; ENABLED-NEXT: ld.param.u64 %rd1, [f_param_0]; ; ENABLED-NEXT: ld.v2.u32 {%r1, %r2}, [%rd1]; ; ENABLED-NEXT: add.s32 %r3, %r1, %r2; -; ENABLED-NEXT: st.param.b32 [func_retval0+0], %r3; +; ENABLED-NEXT: st.param.b32 [func_retval0], %r3; ; ENABLED-NEXT: ret; ; ; DISABLED-LABEL: f( @@ -32,7 +32,7 @@ define i32 @f(ptr %p) { ; DISABLED-NEXT: ld.u32 %r1, [%rd1]; ; DISABLED-NEXT: ld.u32 %r2, [%rd1+4]; ; DISABLED-NEXT: add.s32 %r3, %r1, %r2; -; DISABLED-NEXT: st.param.b32 [func_retval0+0], %r3; +; DISABLED-NEXT: st.param.b32 [func_retval0], %r3; ; DISABLED-NEXT: ret; %p.1 = getelementptr i32, ptr %p, i32 1 %v0 = load i32, ptr %p, align 8 @@ -68,7 +68,7 @@ define half @fh(ptr %p) { ; ENABLED-NEXT: cvt.f32.f16 %f11, %rs5; ; ENABLED-NEXT: add.rn.f32 %f12, %f10, %f11; ; ENABLED-NEXT: cvt.rn.f16.f32 %rs9, %f12; -; ENABLED-NEXT: st.param.b16 [func_retval0+0], %rs9; +; ENABLED-NEXT: st.param.b16 [func_retval0], %rs9; ; ENABLED-NEXT: ret; ; ; DISABLED-LABEL: fh( @@ -100,7 +100,7 @@ define half @fh(ptr %p) { ; DISABLED-NEXT: cvt.f32.f16 %f11, %rs5; ; DISABLED-NEXT: add.rn.f32 %f12, %f10, %f11; ; DISABLED-NEXT: cvt.rn.f16.f32 %rs9, %f12; -; DISABLED-NEXT: st.param.b16 [func_retval0+0], %rs9; +; DISABLED-NEXT: st.param.b16 [func_retval0], %rs9; ; DISABLED-NEXT: ret; %p.1 = getelementptr half, ptr %p, i32 1 %p.2 = getelementptr half, ptr %p, i32 2 @@ -132,7 +132,7 @@ define float @ff(ptr %p) { ; ENABLED-NEXT: add.rn.f32 %f7, %f3, %f4; ; ENABLED-NEXT: add.rn.f32 %f8, %f6, %f7; ; ENABLED-NEXT: add.rn.f32 %f9, %f8, %f5; -; ENABLED-NEXT: st.param.f32 [func_retval0+0], %f9; +; ENABLED-NEXT: st.param.f32 [func_retval0], %f9; ; ENABLED-NEXT: ret; ; ; DISABLED-LABEL: ff( @@ -151,7 +151,7 @@ define float @ff(ptr %p) { ; DISABLED-NEXT: add.rn.f32 %f7, %f3, %f4; ; DISABLED-NEXT: add.rn.f32 %f8, %f6, %f7; ; DISABLED-NEXT: add.rn.f32 %f9, %f8, %f5; -; DISABLED-NEXT: st.param.f32 [func_retval0+0], %f9; +; DISABLED-NEXT: st.param.f32 [func_retval0], %f9; ; DISABLED-NEXT: ret; %p.1 = getelementptr float, ptr %p, i32 1 %p.2 = getelementptr float, ptr %p, i32 2 diff --git a/llvm/test/CodeGen/NVPTX/activemask.ll b/llvm/test/CodeGen/NVPTX/activemask.ll index 1496b2ebdd4427..e1d169d17c60e9 100644 --- a/llvm/test/CodeGen/NVPTX/activemask.ll +++ b/llvm/test/CodeGen/NVPTX/activemask.ll @@ -6,7 +6,7 @@ declare i32 @llvm.nvvm.activemask() ; CHECK-LABEL: activemask( ; ; CHECK: activemask.b32 %[[REG:.+]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %[[REG]]; +; CHECK-NEXT: st.param.b32 [func_retval0], %[[REG]]; ; CHECK-NEXT: ret; define dso_local i32 @activemask() { entry: @@ -18,7 +18,7 @@ entry: ; ; CHECK: activemask.b32 %[[REG:.+]]; ; CHECK: activemask.b32 %[[REG]]; -; CHECK: .param.b32 [func_retval0+0], %[[REG]]; +; CHECK: .param.b32 [func_retval0], %[[REG]]; ; CHECK-NEXT: ret; define dso_local i32 @convergent(i1 %cond) { entry: diff --git a/llvm/test/CodeGen/NVPTX/addr-mode.ll b/llvm/test/CodeGen/NVPTX/addr-mode.ll index a6a085c0e2e33e..ca2a74f7e54a3e 100644 --- a/llvm/test/CodeGen/NVPTX/addr-mode.ll +++ b/llvm/test/CodeGen/NVPTX/addr-mode.ll @@ -12,7 +12,7 @@ define i32 @test_addr_mode_i64(ptr %x) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_addr_mode_i64_param_0]; ; CHECK-NEXT: ld.u32 %r1, [%rd1+-4]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %addr = getelementptr i32, ptr %x, i64 -1 %res = load i32, ptr %addr @@ -28,7 +28,7 @@ define i32 @test_addr_mode_i32(ptr %x) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_addr_mode_i32_param_0]; ; CHECK-NEXT: ld.u32 %r1, [%rd1+-4]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %addr = getelementptr i32, ptr %x, i32 -1 %res = load i32, ptr %addr @@ -44,7 +44,7 @@ define i32 @test_addr_mode_i16(ptr %x) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_addr_mode_i16_param_0]; ; CHECK-NEXT: ld.u32 %r1, [%rd1+-4]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %addr = getelementptr i32, ptr %x, i16 -1 %res = load i32, ptr %addr @@ -60,7 +60,7 @@ define i32 @test_addr_mode_i8(ptr %x) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u64 %rd1, [test_addr_mode_i8_param_0]; ; CHECK-NEXT: ld.u32 %r1, [%rd1+-4]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %addr = getelementptr i32, ptr %x, i8 -1 %res = load i32, ptr %addr @@ -77,7 +77,7 @@ define i32 @test_addr_mode_i64_large(ptr %x) { ; CHECK-NEXT: ld.param.u64 %rd1, [test_addr_mode_i64_large_param_0]; ; CHECK-NEXT: add.s64 %rd2, %rd1, 17179869172; ; CHECK-NEXT: ld.u32 %r1, [%rd2]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %addr = getelementptr i32, ptr %x, i64 4294967293 %res = load i32, ptr %addr diff --git a/llvm/test/CodeGen/NVPTX/aggregate-return.ll b/llvm/test/CodeGen/NVPTX/aggregate-return.ll index 5983d71e065dd4..4bda8049b267b9 100644 --- a/llvm/test/CodeGen/NVPTX/aggregate-return.ll +++ b/llvm/test/CodeGen/NVPTX/aggregate-return.ll @@ -10,7 +10,7 @@ define void @test_v2f32(<2 x float> %input, ptr %output) { ; CHECK-LABEL: @test_v2f32 %call = tail call <2 x float> @barv(<2 x float> %input) ; CHECK: .param .align 8 .b8 retval0[8]; -; CHECK: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0]; store <2 x float> %call, ptr %output, align 8 ; CHECK: st.v2.f32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]} ret void @@ -21,7 +21,7 @@ define void @test_v3f32(<3 x float> %input, ptr %output) { ; %call = tail call <3 x float> @barv3(<3 x float> %input) ; CHECK: .param .align 16 .b8 retval0[16]; -; CHECK-DAG: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v2.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.f32 [[E2:%f[0-9]+]], [retval0+8]; ; Make sure we don't load more values than than we need to. ; CHECK-NOT: ld.param.f32 [[E3:%f[0-9]+]], [retval0+12]; @@ -38,7 +38,7 @@ define void @test_a2f32([2 x float] %input, ptr %output) { ; CHECK-LABEL: @test_a2f32 %call = tail call [2 x float] @bara([2 x float] %input) ; CHECK: .param .align 4 .b8 retval0[8]; -; CHECK-DAG: ld.param.f32 [[ELEMA1:%f[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.f32 [[ELEMA1:%f[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.f32 [[ELEMA2:%f[0-9]+]], [retval0+4]; store [2 x float] %call, ptr %output, align 4 ; CHECK: } @@ -52,7 +52,7 @@ define void @test_s2f32({float, float} %input, ptr %output) { ; CHECK-LABEL: @test_s2f32 %call = tail call {float, float} @bars({float, float} %input) ; CHECK: .param .align 4 .b8 retval0[8]; -; CHECK-DAG: ld.param.f32 [[ELEMS1:%f[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.f32 [[ELEMS1:%f[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.f32 [[ELEMS2:%f[0-9]+]], [retval0+4]; store {float, float} %call, ptr %output, align 4 ; CHECK: } diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll index 95bca39c73ad73..80815b3ca37c05 100644 --- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll @@ -37,7 +37,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) { ; SM70-NEXT: or.b32 %r9, %r5, 4194304; ; SM70-NEXT: selp.b32 %r10, %r9, %r8, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r10; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fadd( @@ -52,7 +52,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) { ; SM80-NEXT: cvt.f32.bf16 %f2, %rs1; ; SM80-NEXT: add.rn.f32 %f3, %f2, %f1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fadd( @@ -67,7 +67,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f2, %rs1; ; SM80-FTZ-NEXT: add.rn.ftz.f32 %f3, %f2, %f1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fadd( @@ -78,7 +78,7 @@ define bfloat @test_fadd(bfloat %0, bfloat %1) { ; SM90-NEXT: ld.param.b16 %rs1, [test_fadd_param_0]; ; SM90-NEXT: ld.param.b16 %rs2, [test_fadd_param_1]; ; SM90-NEXT: add.rn.bf16 %rs3, %rs1, %rs2; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %3 = fadd bfloat %0, %1 ret bfloat %3 @@ -108,7 +108,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) { ; SM70-NEXT: or.b32 %r9, %r5, 4194304; ; SM70-NEXT: selp.b32 %r10, %r9, %r8, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r10; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fsub( @@ -123,7 +123,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) { ; SM80-NEXT: cvt.f32.bf16 %f2, %rs1; ; SM80-NEXT: sub.rn.f32 %f3, %f2, %f1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fsub( @@ -138,7 +138,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f2, %rs1; ; SM80-FTZ-NEXT: sub.rn.ftz.f32 %f3, %f2, %f1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs3, %f3; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fsub( @@ -149,7 +149,7 @@ define bfloat @test_fsub(bfloat %0, bfloat %1) { ; SM90-NEXT: ld.param.b16 %rs1, [test_fsub_param_0]; ; SM90-NEXT: ld.param.b16 %rs2, [test_fsub_param_1]; ; SM90-NEXT: sub.rn.bf16 %rs3, %rs1, %rs2; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %3 = fsub bfloat %0, %1 ret bfloat %3 @@ -199,7 +199,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM70-NEXT: selp.b32 %r22, %r21, %r20, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs11}, %r22; } ; SM70-NEXT: mov.b32 %r23, {%rs11, %rs7}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r23; +; SM70-NEXT: st.param.b32 [func_retval0], %r23; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_faddx2( @@ -222,7 +222,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-NEXT: add.rn.f32 %f6, %f5, %f4; ; SM80-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_faddx2( @@ -245,7 +245,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-FTZ-NEXT: add.rn.ftz.f32 %f6, %f5, %f4; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-FTZ-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_faddx2( @@ -256,7 +256,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM90-NEXT: ld.param.b32 %r1, [test_faddx2_param_1]; ; SM90-NEXT: ld.param.b32 %r2, [test_faddx2_param_0]; ; SM90-NEXT: add.rn.bf16x2 %r3, %r2, %r1; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = fadd <2 x bfloat> %a, %b ret <2 x bfloat> %r @@ -306,7 +306,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM70-NEXT: selp.b32 %r22, %r21, %r20, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs11}, %r22; } ; SM70-NEXT: mov.b32 %r23, {%rs11, %rs7}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r23; +; SM70-NEXT: st.param.b32 [func_retval0], %r23; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fsubx2( @@ -329,7 +329,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-NEXT: sub.rn.f32 %f6, %f5, %f4; ; SM80-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fsubx2( @@ -352,7 +352,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-FTZ-NEXT: sub.rn.ftz.f32 %f6, %f5, %f4; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-FTZ-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fsubx2( @@ -363,7 +363,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM90-NEXT: ld.param.b32 %r1, [test_fsubx2_param_1]; ; SM90-NEXT: ld.param.b32 %r2, [test_fsubx2_param_0]; ; SM90-NEXT: sub.rn.bf16x2 %r3, %r2, %r1; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = fsub <2 x bfloat> %a, %b ret <2 x bfloat> %r @@ -413,7 +413,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM70-NEXT: selp.b32 %r22, %r21, %r20, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs11}, %r22; } ; SM70-NEXT: mov.b32 %r23, {%rs11, %rs7}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r23; +; SM70-NEXT: st.param.b32 [func_retval0], %r23; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fmulx2( @@ -436,7 +436,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-NEXT: mul.rn.f32 %f6, %f5, %f4; ; SM80-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fmulx2( @@ -459,7 +459,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-FTZ-NEXT: mul.rn.ftz.f32 %f6, %f5, %f4; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-FTZ-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fmulx2( @@ -470,7 +470,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM90-NEXT: ld.param.b32 %r1, [test_fmulx2_param_1]; ; SM90-NEXT: ld.param.b32 %r2, [test_fmulx2_param_0]; ; SM90-NEXT: mul.rn.bf16x2 %r3, %r2, %r1; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = fmul <2 x bfloat> %a, %b ret <2 x bfloat> %r @@ -520,7 +520,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM70-NEXT: selp.b32 %r22, %r21, %r20, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs11}, %r22; } ; SM70-NEXT: mov.b32 %r23, {%rs11, %rs7}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r23; +; SM70-NEXT: st.param.b32 [func_retval0], %r23; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fdiv( @@ -543,7 +543,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-NEXT: div.rn.f32 %f6, %f5, %f4; ; SM80-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fdiv( @@ -566,7 +566,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-FTZ-NEXT: div.rn.ftz.f32 %f6, %f5, %f4; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM80-FTZ-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fdiv( @@ -589,7 +589,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM90-NEXT: div.rn.f32 %f6, %f5, %f4; ; SM90-NEXT: cvt.rn.bf16.f32 %rs6, %f6; ; SM90-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = fdiv <2 x bfloat> %a, %b ret <2 x bfloat> %r @@ -602,7 +602,7 @@ define bfloat @test_extract_0(<2 x bfloat> %a) #0 { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [test_extract_0_param_0]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; ; CHECK-NEXT: ret; %e = extractelement <2 x bfloat> %a, i32 0 ret bfloat %e @@ -615,7 +615,7 @@ define bfloat @test_extract_1(<2 x bfloat> %a) #0 { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.b16 %rs1, [test_extract_1_param_0+2]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; ; CHECK-NEXT: ret; %e = extractelement <2 x bfloat> %a, i32 1 ret bfloat %e @@ -631,7 +631,7 @@ define float @test_fpext_float(bfloat %a) #0 { ; SM70-NEXT: ld.param.u16 %r1, [test_fpext_float_param_0]; ; SM70-NEXT: shl.b32 %r2, %r1, 16; ; SM70-NEXT: mov.b32 %f1, %r2; -; SM70-NEXT: st.param.f32 [func_retval0+0], %f1; +; SM70-NEXT: st.param.f32 [func_retval0], %f1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fpext_float( @@ -642,7 +642,7 @@ define float @test_fpext_float(bfloat %a) #0 { ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0]; ; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM80-NEXT: st.param.f32 [func_retval0+0], %f1; +; SM80-NEXT: st.param.f32 [func_retval0], %f1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fpext_float( @@ -653,7 +653,7 @@ define float @test_fpext_float(bfloat %a) #0 { ; SM80-FTZ-NEXT: // %bb.0: ; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0]; ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs1; -; SM80-FTZ-NEXT: st.param.f32 [func_retval0+0], %f1; +; SM80-FTZ-NEXT: st.param.f32 [func_retval0], %f1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fpext_float( @@ -664,7 +664,7 @@ define float @test_fpext_float(bfloat %a) #0 { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.b16 %rs1, [test_fpext_float_param_0]; ; SM90-NEXT: cvt.f32.bf16 %f1, %rs1; -; SM90-NEXT: st.param.f32 [func_retval0+0], %f1; +; SM90-NEXT: st.param.f32 [func_retval0], %f1; ; SM90-NEXT: ret; %r = fpext bfloat %a to float ret float %r @@ -688,7 +688,7 @@ define bfloat @test_fptrunc_float(float %a) #0 { ; SM70-NEXT: or.b32 %r5, %r1, 4194304; ; SM70-NEXT: selp.b32 %r6, %r5, %r4, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r6; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fptrunc_float( @@ -699,7 +699,7 @@ define bfloat @test_fptrunc_float(float %a) #0 { ; SM80-NEXT: // %bb.0: ; SM80-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0]; ; SM80-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fptrunc_float( @@ -710,7 +710,7 @@ define bfloat @test_fptrunc_float(float %a) #0 { ; SM80-FTZ-NEXT: // %bb.0: ; SM80-FTZ-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0]; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fptrunc_float( @@ -721,7 +721,7 @@ define bfloat @test_fptrunc_float(float %a) #0 { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.f32 %f1, [test_fptrunc_float_param_0]; ; SM90-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM90-NEXT: st.param.b16 [func_retval0], %rs1; ; SM90-NEXT: ret; %r = fptrunc float %a to bfloat ret bfloat %r @@ -748,7 +748,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM70-NEXT: or.b32 %r7, %r3, 4194304; ; SM70-NEXT: selp.b32 %r8, %r7, %r6, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fadd_imm_1( @@ -761,7 +761,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; ; SM80-NEXT: add.rn.f32 %f2, %f1, 0f3F800000; ; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fadd_imm_1( @@ -774,7 +774,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs1; ; SM80-FTZ-NEXT: add.rn.ftz.f32 %f2, %f1, 0f3F800000; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fadd_imm_1( @@ -785,7 +785,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM90-NEXT: ld.param.b16 %rs1, [test_fadd_imm_1_param_0]; ; SM90-NEXT: mov.b16 %rs2, 0x3F80; ; SM90-NEXT: add.rn.bf16 %rs3, %rs1, %rs2; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %r = fadd bfloat %a, 1.0 ret bfloat %r @@ -805,7 +805,7 @@ define bfloat @test_select_cc_bf16_f64(double %a, double %b, bfloat %c, bfloat % ; CHECK-NEXT: ld.param.b16 %rs1, [test_select_cc_bf16_f64_param_2]; ; CHECK-NEXT: ld.param.b16 %rs2, [test_select_cc_bf16_f64_param_3]; ; CHECK-NEXT: selp.b16 %rs3, %rs1, %rs2, %p1; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NEXT: ret; %cc = fcmp olt double %a, %b %r = select i1 %cc, bfloat %c, bfloat %d @@ -851,7 +851,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM70-NEXT: cvt.u32.u16 %r19, %rs1; ; SM70-NEXT: shl.b32 %r20, %r19, 16; ; SM70-NEXT: mov.b32 %f8, %r20; -; SM70-NEXT: st.param.v4.f32 [func_retval0+0], {%f8, %f7, %f6, %f5}; +; SM70-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM70-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM70-NEXT: ret; ; @@ -877,7 +877,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM80-NEXT: cvt.f32.bf16 %f6, %rs3; ; SM80-NEXT: cvt.f32.bf16 %f7, %rs2; ; SM80-NEXT: cvt.f32.bf16 %f8, %rs1; -; SM80-NEXT: st.param.v4.f32 [func_retval0+0], {%f8, %f7, %f6, %f5}; +; SM80-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM80-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM80-NEXT: ret; ; @@ -903,7 +903,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f6, %rs3; ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f7, %rs2; ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f8, %rs1; -; SM80-FTZ-NEXT: st.param.v4.f32 [func_retval0+0], {%f8, %f7, %f6, %f5}; +; SM80-FTZ-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM80-FTZ-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM80-FTZ-NEXT: ret; ; @@ -929,7 +929,7 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 { ; SM90-NEXT: cvt.f32.bf16 %f6, %rs3; ; SM90-NEXT: cvt.f32.bf16 %f7, %rs2; ; SM90-NEXT: cvt.f32.bf16 %f8, %rs1; -; SM90-NEXT: st.param.v4.f32 [func_retval0+0], {%f8, %f7, %f6, %f5}; +; SM90-NEXT: st.param.v4.f32 [func_retval0], {%f8, %f7, %f6, %f5}; ; SM90-NEXT: st.param.v4.f32 [func_retval0+16], {%f4, %f3, %f2, %f1}; ; SM90-NEXT: ret; %load = load <8 x bfloat>, ptr addrspace(3) %arg, align 16 @@ -950,7 +950,7 @@ define i16 @test_fptosi_i16(bfloat %a) { ; SM70-NEXT: mov.b32 %f1, %r2; ; SM70-NEXT: cvt.rzi.s16.f32 %rs1, %f1; ; SM70-NEXT: cvt.u32.u16 %r3, %rs1; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM70-NEXT: st.param.b32 [func_retval0], %r3; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fptosi_i16( @@ -964,7 +964,7 @@ define i16 @test_fptosi_i16(bfloat %a) { ; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; ; SM80-NEXT: cvt.rzi.s16.f32 %rs2, %f1; ; SM80-NEXT: cvt.u32.u16 %r1, %rs2; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fptosi_i16( @@ -978,7 +978,7 @@ define i16 @test_fptosi_i16(bfloat %a) { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rzi.ftz.s16.f32 %rs2, %f1; ; SM80-FTZ-NEXT: cvt.u32.u16 %r1, %rs2; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fptosi_i16( @@ -990,7 +990,7 @@ define i16 @test_fptosi_i16(bfloat %a) { ; SM90-NEXT: ld.param.b16 %rs1, [test_fptosi_i16_param_0]; ; SM90-NEXT: cvt.rzi.s16.bf16 %rs2, %rs1; ; SM90-NEXT: cvt.u32.u16 %r1, %rs2; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM90-NEXT: st.param.b32 [func_retval0], %r1; ; SM90-NEXT: ret; %r = fptosi bfloat %a to i16 ret i16 %r @@ -1009,7 +1009,7 @@ define i16 @test_fptoui_i16(bfloat %a) { ; SM70-NEXT: mov.b32 %f1, %r2; ; SM70-NEXT: cvt.rzi.u16.f32 %rs1, %f1; ; SM70-NEXT: cvt.u32.u16 %r3, %rs1; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM70-NEXT: st.param.b32 [func_retval0], %r3; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_fptoui_i16( @@ -1023,7 +1023,7 @@ define i16 @test_fptoui_i16(bfloat %a) { ; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; ; SM80-NEXT: cvt.rzi.u16.f32 %rs2, %f1; ; SM80-NEXT: cvt.u32.u16 %r1, %rs2; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM80-NEXT: st.param.b32 [func_retval0], %r1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_fptoui_i16( @@ -1037,7 +1037,7 @@ define i16 @test_fptoui_i16(bfloat %a) { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rzi.ftz.u16.f32 %rs2, %f1; ; SM80-FTZ-NEXT: cvt.u32.u16 %r1, %rs2; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_fptoui_i16( @@ -1049,7 +1049,7 @@ define i16 @test_fptoui_i16(bfloat %a) { ; SM90-NEXT: ld.param.b16 %rs1, [test_fptoui_i16_param_0]; ; SM90-NEXT: cvt.rzi.u16.bf16 %rs2, %rs1; ; SM90-NEXT: cvt.u32.u16 %r1, %rs2; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM90-NEXT: st.param.b32 [func_retval0], %r1; ; SM90-NEXT: ret; %r = fptoui bfloat %a to i16 ret i16 %r @@ -1074,7 +1074,7 @@ define bfloat @test_sitofp_i16(i16 %a) { ; SM70-NEXT: or.b32 %r5, %r1, 4194304; ; SM70-NEXT: selp.b32 %r6, %r5, %r4, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs2}, %r6; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM70-NEXT: st.param.b16 [func_retval0], %rs2; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_sitofp_i16( @@ -1086,7 +1086,7 @@ define bfloat @test_sitofp_i16(i16 %a) { ; SM80-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0]; ; SM80-NEXT: cvt.rn.f32.s16 %f1, %rs1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_sitofp_i16( @@ -1098,7 +1098,7 @@ define bfloat @test_sitofp_i16(i16 %a) { ; SM80-FTZ-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0]; ; SM80-FTZ-NEXT: cvt.rn.f32.s16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_sitofp_i16( @@ -1108,7 +1108,7 @@ define bfloat @test_sitofp_i16(i16 %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u16 %rs1, [test_sitofp_i16_param_0]; ; SM90-NEXT: cvt.rn.bf16.s16 %rs2, %rs1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM90-NEXT: st.param.b16 [func_retval0], %rs2; ; SM90-NEXT: ret; %r = sitofp i16 %a to bfloat ret bfloat %r @@ -1133,7 +1133,7 @@ define bfloat @test_uitofp_i8(i8 %a) { ; SM70-NEXT: or.b32 %r5, %r1, 4194304; ; SM70-NEXT: selp.b32 %r6, %r5, %r4, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs2}, %r6; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM70-NEXT: st.param.b16 [func_retval0], %rs2; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_uitofp_i8( @@ -1145,7 +1145,7 @@ define bfloat @test_uitofp_i8(i8 %a) { ; SM80-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0]; ; SM80-NEXT: cvt.rn.f32.u16 %f1, %rs1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_uitofp_i8( @@ -1157,7 +1157,7 @@ define bfloat @test_uitofp_i8(i8 %a) { ; SM80-FTZ-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0]; ; SM80-FTZ-NEXT: cvt.rn.f32.u16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_uitofp_i8( @@ -1167,7 +1167,7 @@ define bfloat @test_uitofp_i8(i8 %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u8 %rs1, [test_uitofp_i8_param_0]; ; SM90-NEXT: cvt.rn.bf16.u16 %rs2, %rs1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM90-NEXT: st.param.b16 [func_retval0], %rs2; ; SM90-NEXT: ret; %r = uitofp i8 %a to bfloat ret bfloat %r @@ -1195,7 +1195,7 @@ define bfloat @test_uitofp_i1(i1 %a) { ; SM70-NEXT: or.b32 %r6, %r2, 4194304; ; SM70-NEXT: selp.b32 %r7, %r6, %r5, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs3}, %r7; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM70-NEXT: st.param.b16 [func_retval0], %rs3; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_uitofp_i1( @@ -1212,7 +1212,7 @@ define bfloat @test_uitofp_i1(i1 %a) { ; SM80-NEXT: selp.u32 %r1, 1, 0, %p1; ; SM80-NEXT: cvt.rn.f32.u32 %f1, %r1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs3, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_uitofp_i1( @@ -1229,7 +1229,7 @@ define bfloat @test_uitofp_i1(i1 %a) { ; SM80-FTZ-NEXT: selp.u32 %r1, 1, 0, %p1; ; SM80-FTZ-NEXT: cvt.rn.f32.u32 %f1, %r1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs3, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_uitofp_i1( @@ -1244,7 +1244,7 @@ define bfloat @test_uitofp_i1(i1 %a) { ; SM90-NEXT: setp.eq.b16 %p1, %rs2, 1; ; SM90-NEXT: selp.u32 %r1, 1, 0, %p1; ; SM90-NEXT: cvt.rn.bf16.u32 %rs3, %r1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %r = uitofp i1 %a to bfloat ret bfloat %r @@ -1269,7 +1269,7 @@ define bfloat @test_uitofp_i16(i16 %a) { ; SM70-NEXT: or.b32 %r5, %r1, 4194304; ; SM70-NEXT: selp.b32 %r6, %r5, %r4, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs2}, %r6; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM70-NEXT: st.param.b16 [func_retval0], %rs2; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_uitofp_i16( @@ -1281,7 +1281,7 @@ define bfloat @test_uitofp_i16(i16 %a) { ; SM80-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0]; ; SM80-NEXT: cvt.rn.f32.u16 %f1, %rs1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_uitofp_i16( @@ -1293,7 +1293,7 @@ define bfloat @test_uitofp_i16(i16 %a) { ; SM80-FTZ-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0]; ; SM80-FTZ-NEXT: cvt.rn.f32.u16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs2, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_uitofp_i16( @@ -1303,7 +1303,7 @@ define bfloat @test_uitofp_i16(i16 %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u16 %rs1, [test_uitofp_i16_param_0]; ; SM90-NEXT: cvt.rn.bf16.u16 %rs2, %rs1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM90-NEXT: st.param.b16 [func_retval0], %rs2; ; SM90-NEXT: ret; %r = uitofp i16 %a to bfloat ret bfloat %r @@ -1328,7 +1328,7 @@ define bfloat @test_uitofp_i32(i32 %a) { ; SM70-NEXT: or.b32 %r6, %r2, 4194304; ; SM70-NEXT: selp.b32 %r7, %r6, %r5, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_uitofp_i32( @@ -1341,7 +1341,7 @@ define bfloat @test_uitofp_i32(i32 %a) { ; SM80-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0]; ; SM80-NEXT: cvt.rn.f32.u32 %f1, %r1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_uitofp_i32( @@ -1354,7 +1354,7 @@ define bfloat @test_uitofp_i32(i32 %a) { ; SM80-FTZ-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0]; ; SM80-FTZ-NEXT: cvt.rn.f32.u32 %f1, %r1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_uitofp_i32( @@ -1365,7 +1365,7 @@ define bfloat @test_uitofp_i32(i32 %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u32 %r1, [test_uitofp_i32_param_0]; ; SM90-NEXT: cvt.rn.bf16.u32 %rs1, %r1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM90-NEXT: st.param.b16 [func_retval0], %rs1; ; SM90-NEXT: ret; %r = uitofp i32 %a to bfloat ret bfloat %r @@ -1391,7 +1391,7 @@ define bfloat @test_uitofp_i64(i64 %a) { ; SM70-NEXT: or.b32 %r5, %r1, 4194304; ; SM70-NEXT: selp.b32 %r6, %r5, %r4, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r6; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_uitofp_i64( @@ -1404,7 +1404,7 @@ define bfloat @test_uitofp_i64(i64 %a) { ; SM80-NEXT: ld.param.u64 %rd1, [test_uitofp_i64_param_0]; ; SM80-NEXT: cvt.rn.f32.u64 %f1, %rd1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_uitofp_i64( @@ -1417,7 +1417,7 @@ define bfloat @test_uitofp_i64(i64 %a) { ; SM80-FTZ-NEXT: ld.param.u64 %rd1, [test_uitofp_i64_param_0]; ; SM80-FTZ-NEXT: cvt.rn.f32.u64 %f1, %rd1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs1, %f1; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs1; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_uitofp_i64( @@ -1428,7 +1428,7 @@ define bfloat @test_uitofp_i64(i64 %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.u64 %rd1, [test_uitofp_i64_param_0]; ; SM90-NEXT: cvt.rn.bf16.u64 %rs1, %rd1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM90-NEXT: st.param.b16 [func_retval0], %rs1; ; SM90-NEXT: ret; %r = uitofp i64 %a to bfloat ret bfloat %r @@ -1455,7 +1455,7 @@ define bfloat @test_roundeven(bfloat %a) { ; SM70-NEXT: or.b32 %r7, %r3, 4194304; ; SM70-NEXT: selp.b32 %r8, %r7, %r6, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r8; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_roundeven( @@ -1468,7 +1468,7 @@ define bfloat @test_roundeven(bfloat %a) { ; SM80-NEXT: cvt.f32.bf16 %f1, %rs1; ; SM80-NEXT: cvt.rni.f32.f32 %f2, %f1; ; SM80-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_roundeven( @@ -1481,7 +1481,7 @@ define bfloat @test_roundeven(bfloat %a) { ; SM80-FTZ-NEXT: cvt.ftz.f32.bf16 %f1, %rs1; ; SM80-FTZ-NEXT: cvt.rni.ftz.f32.f32 %f2, %f1; ; SM80-FTZ-NEXT: cvt.rn.bf16.f32 %rs2, %f2; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs2; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_roundeven( @@ -1491,7 +1491,7 @@ define bfloat @test_roundeven(bfloat %a) { ; SM90-NEXT: // %bb.0: ; SM90-NEXT: ld.param.b16 %rs1, [test_roundeven_param_0]; ; SM90-NEXT: cvt.rni.bf16.bf16 %rs2, %rs1; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs2; +; SM90-NEXT: st.param.b16 [func_retval0], %rs2; ; SM90-NEXT: ret; %r = call bfloat @llvm.roundeven.bf16(bfloat %a) ret bfloat %r @@ -1527,7 +1527,7 @@ define bfloat @test_maximum(bfloat %a, bfloat %b) { ; SM70-NEXT: mov.b32 %f3, %r6; ; SM70-NEXT: setp.eq.f32 %p5, %f3, 0f00000000; ; SM70-NEXT: selp.b16 %rs10, %rs8, %rs6, %p5; -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs10; +; SM70-NEXT: st.param.b16 [func_retval0], %rs10; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_maximum( @@ -1538,7 +1538,7 @@ define bfloat @test_maximum(bfloat %a, bfloat %b) { ; SM80-NEXT: ld.param.b16 %rs1, [test_maximum_param_0]; ; SM80-NEXT: ld.param.b16 %rs2, [test_maximum_param_1]; ; SM80-NEXT: max.NaN.bf16 %rs3, %rs1, %rs2; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_maximum( @@ -1549,7 +1549,7 @@ define bfloat @test_maximum(bfloat %a, bfloat %b) { ; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_maximum_param_0]; ; SM80-FTZ-NEXT: ld.param.b16 %rs2, [test_maximum_param_1]; ; SM80-FTZ-NEXT: max.NaN.bf16 %rs3, %rs1, %rs2; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_maximum( @@ -1560,7 +1560,7 @@ define bfloat @test_maximum(bfloat %a, bfloat %b) { ; SM90-NEXT: ld.param.b16 %rs1, [test_maximum_param_0]; ; SM90-NEXT: ld.param.b16 %rs2, [test_maximum_param_1]; ; SM90-NEXT: max.NaN.bf16 %rs3, %rs1, %rs2; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %r = call bfloat @llvm.maximum.bf16(bfloat %a, bfloat %b) ret bfloat %r @@ -1590,7 +1590,7 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) { ; SM70-NEXT: or.b32 %r9, %r5, 4194304; ; SM70-NEXT: selp.b32 %r10, %r9, %r8, %p1; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r10; } -; SM70-NEXT: st.param.b16 [func_retval0+0], %rs1; +; SM70-NEXT: st.param.b16 [func_retval0], %rs1; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_maxnum( @@ -1601,7 +1601,7 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) { ; SM80-NEXT: ld.param.b16 %rs1, [test_maxnum_param_0]; ; SM80-NEXT: ld.param.b16 %rs2, [test_maxnum_param_1]; ; SM80-NEXT: max.bf16 %rs3, %rs1, %rs2; -; SM80-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_maxnum( @@ -1612,7 +1612,7 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) { ; SM80-FTZ-NEXT: ld.param.b16 %rs1, [test_maxnum_param_0]; ; SM80-FTZ-NEXT: ld.param.b16 %rs2, [test_maxnum_param_1]; ; SM80-FTZ-NEXT: max.bf16 %rs3, %rs1, %rs2; -; SM80-FTZ-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM80-FTZ-NEXT: st.param.b16 [func_retval0], %rs3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_maxnum( @@ -1623,7 +1623,7 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) { ; SM90-NEXT: ld.param.b16 %rs1, [test_maxnum_param_0]; ; SM90-NEXT: ld.param.b16 %rs2, [test_maxnum_param_1]; ; SM90-NEXT: max.bf16 %rs3, %rs1, %rs2; -; SM90-NEXT: st.param.b16 [func_retval0+0], %rs3; +; SM90-NEXT: st.param.b16 [func_retval0], %rs3; ; SM90-NEXT: ret; %r = call bfloat @llvm.maxnum.bf16(bfloat %a, bfloat %b) ret bfloat %r @@ -1681,7 +1681,7 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM70-NEXT: setp.eq.f32 %p10, %f6, 0f00000000; ; SM70-NEXT: selp.b16 %rs20, %rs18, %rs16, %p10; ; SM70-NEXT: mov.b32 %r15, {%rs20, %rs12}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r15; +; SM70-NEXT: st.param.b32 [func_retval0], %r15; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_maximum_v2( @@ -1692,7 +1692,7 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM80-NEXT: ld.param.b32 %r1, [test_maximum_v2_param_1]; ; SM80-NEXT: ld.param.b32 %r2, [test_maximum_v2_param_0]; ; SM80-NEXT: max.NaN.bf16x2 %r3, %r2, %r1; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_maximum_v2( @@ -1703,7 +1703,7 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_maximum_v2_param_1]; ; SM80-FTZ-NEXT: ld.param.b32 %r2, [test_maximum_v2_param_0]; ; SM80-FTZ-NEXT: max.NaN.bf16x2 %r3, %r2, %r1; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_maximum_v2( @@ -1714,7 +1714,7 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM90-NEXT: ld.param.b32 %r1, [test_maximum_v2_param_1]; ; SM90-NEXT: ld.param.b32 %r2, [test_maximum_v2_param_0]; ; SM90-NEXT: max.NaN.bf16x2 %r3, %r2, %r1; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = call <2 x bfloat> @llvm.maximum.bf16(<2 x bfloat> %a, <2 x bfloat> %b) ret <2 x bfloat> %r @@ -1764,7 +1764,7 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM70-NEXT: selp.b32 %r22, %r21, %r20, %p2; ; SM70-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs11}, %r22; } ; SM70-NEXT: mov.b32 %r23, {%rs11, %rs7}; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r23; +; SM70-NEXT: st.param.b32 [func_retval0], %r23; ; SM70-NEXT: ret; ; ; SM80-LABEL: test_maxnum_v2( @@ -1775,7 +1775,7 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM80-NEXT: ld.param.b32 %r1, [test_maxnum_v2_param_1]; ; SM80-NEXT: ld.param.b32 %r2, [test_maxnum_v2_param_0]; ; SM80-NEXT: max.bf16x2 %r3, %r2, %r1; -; SM80-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-NEXT: ret; ; ; SM80-FTZ-LABEL: test_maxnum_v2( @@ -1786,7 +1786,7 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM80-FTZ-NEXT: ld.param.b32 %r1, [test_maxnum_v2_param_1]; ; SM80-FTZ-NEXT: ld.param.b32 %r2, [test_maxnum_v2_param_0]; ; SM80-FTZ-NEXT: max.bf16x2 %r3, %r2, %r1; -; SM80-FTZ-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM80-FTZ-NEXT: st.param.b32 [func_retval0], %r3; ; SM80-FTZ-NEXT: ret; ; ; SM90-LABEL: test_maxnum_v2( @@ -1797,7 +1797,7 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) { ; SM90-NEXT: ld.param.b32 %r1, [test_maxnum_v2_param_1]; ; SM90-NEXT: ld.param.b32 %r2, [test_maxnum_v2_param_0]; ; SM90-NEXT: max.bf16x2 %r3, %r2, %r1; -; SM90-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM90-NEXT: st.param.b32 [func_retval0], %r3; ; SM90-NEXT: ret; %r = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) ret <2 x bfloat> %r diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll index f61205eb88fc24..a53c90ac6db8b6 100644 --- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll +++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll @@ -16,7 +16,7 @@ declare <2 x bfloat> @llvm.cos.f16(<2 x bfloat> %a) #0 ; CHECK-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_sin(<2 x bfloat> %a) #0 #1 { %r = call <2 x bfloat> @llvm.sin.f16(<2 x bfloat> %a) @@ -33,7 +33,7 @@ define <2 x bfloat> @test_sin(<2 x bfloat> %a) #0 #1 { ; CHECK-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_cos(<2 x bfloat> %a) #0 #1 { %r = call <2 x bfloat> @llvm.cos.f16(<2 x bfloat> %a) diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll index 8d40a9ef54dca9..925ae4245a4c20 100644 --- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll @@ -7,7 +7,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ; CHECK-LABEL: test_ret_const( ; CHECK: mov.b32 [[T:%r[0-9+]]], 1073758080; -; CHECK: st.param.b32 [func_retval0+0], [[T]]; +; CHECK: st.param.b32 [func_retval0], [[T]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_ret_const() #0 { @@ -30,7 +30,7 @@ define <2 x bfloat> @test_ret_const() #0 { ; SM80-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; SM80-DAG: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_fadd_imm_0(<2 x bfloat> %a) #0 { @@ -47,7 +47,7 @@ define <2 x bfloat> @test_fadd_imm_0(<2 x bfloat> %a) #0 { ; SM80: add.rn.f32 [[FR:%f[0-9]+]], [[FA]], 0f3F800000; ; SM80: cvt.rn.bf16.f32 [[R:%rs[0-9]+]], [[FR]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define bfloat @test_fadd_imm_1(bfloat %a) #0 { @@ -72,7 +72,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 { ; SM80-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[FR1]]; ; SM80: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]}; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { @@ -97,7 +97,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[FR1]]; ; SM80: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]}; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { @@ -119,7 +119,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; CHECK-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[FR0]]; ; CHECK-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[FR1]]; ; CHECK-NEXT: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { @@ -131,7 +131,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; CHECK-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_fneg_param_0]; ; CHECK-DAG: xor.b32 [[IHH0:%r[0-9]+]], [[A]], -2147450880; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[IHH0]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[IHH0]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_fneg(<2 x bfloat> %a) #0 { %r = fneg <2 x bfloat> %a @@ -175,15 +175,15 @@ declare <2 x bfloat> @test_callee(<2 x bfloat> %a, <2 x bfloat> %b) #0 ; CHECK: { ; CHECK-DAG: .param .align 4 .b8 param0[4]; ; CHECK-DAG: .param .align 4 .b8 param1[4]; -; CHECK-DAG: st.param.b32 [param0+0], [[A]]; -; CHECK-DAG: st.param.b32 [param1+0], [[B]]; +; CHECK-DAG: st.param.b32 [param0], [[A]]; +; CHECK-DAG: st.param.b32 [param1], [[B]]; ; CHECK-DAG: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, ; CHECK: ); -; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_call(<2 x bfloat> %a, <2 x bfloat> %b) #0 { @@ -197,7 +197,7 @@ define <2 x bfloat> @test_call(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; CHECK-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2] ; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1; ; CHECK-NEXT: selp.b32 [[R:%r[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_select(<2 x bfloat> %a, <2 x bfloat> %b, i1 zeroext %c) #0 { @@ -227,7 +227,7 @@ define <2 x bfloat> @test_select(<2 x bfloat> %a, <2 x bfloat> %b, i1 zeroext %c ; CHECK-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_select_cc(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c, <2 x bfloat> %d) #0 { @@ -255,7 +255,7 @@ define <2 x bfloat> @test_select_cc(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloa ; ; CHECK-DAG: selp.f32 [[R0:%f[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.f32 [[R1:%f[0-9]+]], [[A1]], [[B1]], [[P1]]; -; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {[[R0]], [[R1]]}; ; CHECK-NEXT: ret; define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b, <2 x bfloat> %c, <2 x bfloat> %d) #0 { @@ -276,7 +276,7 @@ define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b, ; CHECK-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x bfloat> @test_select_cc_bf16_f32(<2 x bfloat> %a, <2 x bfloat> %b, <2 x float> %c, <2 x float> %d) #0 { @@ -290,7 +290,7 @@ define <2 x bfloat> @test_select_cc_bf16_f32(<2 x bfloat> %a, <2 x bfloat> %b, ; CHECK-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_fptrunc_2xfloat(<2 x float> %a) #0 { %r = fptrunc <2 x float> %a to <2 x bfloat> @@ -302,7 +302,7 @@ define <2 x bfloat> @test_fptrunc_2xfloat(<2 x float> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.f32.bf16 [[R0:%f[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.f32.bf16 [[R1:%f[0-9]+]], [[A1]]; -; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {[[R0]], [[R1]]}; ; CHECK: ret; define <2 x float> @test_fpext_2xfloat(<2 x bfloat> %a) #0 { %r = fpext <2 x bfloat> %a to <2 x float> @@ -311,7 +311,7 @@ define <2 x float> @test_fpext_2xfloat(<2 x bfloat> %a) #0 { ; CHECK-LABEL: test_bitcast_2xbf16_to_2xi16( ; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_bitcast_2xbf16_to_2xi16_param_0]; -; CHECK: st.param.b32 [func_retval0+0], [[A]] +; CHECK: st.param.b32 [func_retval0], [[A]] ; CHECK: ret; define <2 x i16> @test_bitcast_2xbf16_to_2xi16(<2 x bfloat> %a) #0 { %r = bitcast <2 x bfloat> %a to <2 x i16> @@ -321,7 +321,7 @@ define <2 x i16> @test_bitcast_2xbf16_to_2xi16(<2 x bfloat> %a) #0 { ; CHECK-LABEL: test_bitcast_2xi16_to_2xbf16( ; CHECK: ld.param.b32 [[R]], [test_bitcast_2xi16_to_2xbf16_param_0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_bitcast_2xi16_to_2xbf16(<2 x i16> %a) #0 { %r = bitcast <2 x i16> %a to <2 x bfloat> @@ -362,7 +362,7 @@ declare <2 x bfloat> @llvm.fmuladd.f16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bf ; CHECK-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_sqrt(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.sqrt.f16(<2 x bfloat> %a) @@ -375,7 +375,7 @@ define <2 x bfloat> @test_sqrt(<2 x bfloat> %a) #0 { ; CHECK-DAG: ld.param.b32 [[C:%r[0-9]+]], [test_fmuladd_param_2]; ; ; CHECK: fma.rn.bf16x2 [[RA:%r[0-9]+]], [[A]], [[B]], [[C]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[RA]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[RA]]; ; CHECK: ret; define <2 x bfloat> @test_fmuladd(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) #0 { %r = call <2 x bfloat> @llvm.fmuladd.f16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) @@ -385,7 +385,7 @@ define <2 x bfloat> @test_fmuladd(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> ; CHECK-LABEL: test_fabs( ; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_fabs_param_0]; ; CHECK: and.b32 [[R:%r[0-9]+]], [[A]], 2147450879; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_fabs(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.fabs.f16(<2 x bfloat> %a) @@ -407,7 +407,7 @@ define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; CHECK-DAG: ld.param.b32 [[AF0:%r[0-9]+]], [test_minnum_param_0]; ; CHECK-DAG: ld.param.b32 [[BF0:%r[0-9]+]], [test_minnum_param_1]; ; CHECK-DAG: min.bf16x2 [[RF0:%r[0-9]+]], [[AF0]], [[BF0]]; -; CHECK: st.param.b32 [func_retval0+0], [[RF0]]; +; CHECK: st.param.b32 [func_retval0], [[RF0]]; ; CHECK: ret; define <2 x bfloat> @test_minnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 { %r = call <2 x bfloat> @llvm.minnum.f16(<2 x bfloat> %a, <2 x bfloat> %b) @@ -418,7 +418,7 @@ define <2 x bfloat> @test_minnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; CHECK-DAG: ld.param.b32 [[AF0:%r[0-9]+]], [test_maxnum_param_0]; ; CHECK-DAG: ld.param.b32 [[BF0:%r[0-9]+]], [test_maxnum_param_1]; ; CHECK-DAG: max.bf16x2 [[RF0:%r[0-9]+]], [[AF0]], [[BF0]]; -; CHECK: st.param.b32 [func_retval0+0], [[RF0]]; +; CHECK: st.param.b32 [func_retval0], [[RF0]]; ; CHECK: ret; define <2 x bfloat> @test_maxnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 { %r = call <2 x bfloat> @llvm.maxnum.f16(<2 x bfloat> %a, <2 x bfloat> %b) @@ -439,7 +439,7 @@ define <2 x bfloat> @test_maxnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 { ; SM80-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; SM80-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_floor(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.floor.f16(<2 x bfloat> %a) @@ -458,7 +458,7 @@ define <2 x bfloat> @test_floor(<2 x bfloat> %a) #0 { ; SM80-DAG: cvt.rn.bf16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; SM80-DAG: cvt.rn.bf16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_ceil(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.ceil.f16(<2 x bfloat> %a) @@ -471,7 +471,7 @@ define <2 x bfloat> @test_ceil(<2 x bfloat> %a) #0 { ; SM90: cvt.rzi.bf16.bf16 [[R1:%rs[0-9]+]], [[A1]]; ; SM90: cvt.rzi.bf16.bf16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_trunc(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.trunc.f16(<2 x bfloat> %a) @@ -484,7 +484,7 @@ define <2 x bfloat> @test_trunc(<2 x bfloat> %a) #0 { ; SM90: cvt.rni.bf16.bf16 [[R1:%rs[0-9]+]], [[A1]]; ; SM90: cvt.rni.bf16.bf16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_rint(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.rint.f16(<2 x bfloat> %a) @@ -498,7 +498,7 @@ define <2 x bfloat> @test_rint(<2 x bfloat> %a) #0 { ; CHECK: or.b32 {{.*}}, [[R1]], 1056964608; ; CHECK: and.b32 [[R2:%r[0-9]+]], {{.*}}, -2147483648; ; CHECK: or.b32 {{.*}}, [[R2]], 1056964608; -; CHECK: st.param.b32 [func_retval0+0], {{.*}}; +; CHECK: st.param.b32 [func_retval0], {{.*}}; ; CHECK: ret; define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 { %r = call <2 x bfloat> @llvm.round.f16(<2 x bfloat> %a) @@ -526,7 +526,7 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 { ; SM90-DAG: and.b32 [[R1:%r[0-9]+]], [[B]], -2147450880; ; SM90-DAG: and.b32 [[R2:%r[0-9]+]], [[A]], 2147450879; ; SM90-DAG: or.b32 [[R:%r[0-9]+]], [[R2]], [[R1]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x bfloat> @test_copysign(<2 x bfloat> %a, <2 x bfloat> %b) #0 { %r = call <2 x bfloat> @llvm.copysign.f16(<2 x bfloat> %a, <2 x bfloat> %b) diff --git a/llvm/test/CodeGen/NVPTX/bswap.ll b/llvm/test/CodeGen/NVPTX/bswap.ll index 3f929ec6a75d0a..461cecf57270eb 100644 --- a/llvm/test/CodeGen/NVPTX/bswap.ll +++ b/llvm/test/CodeGen/NVPTX/bswap.ll @@ -16,7 +16,7 @@ define i16 @bswap16(i16 %a) { ; CHECK-NEXT: shl.b16 %rs3, %rs1, 8; ; CHECK-NEXT: or.b16 %rs4, %rs3, %rs2; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %b = tail call i16 @llvm.bswap.i16(i16 %a) ret i16 %b @@ -31,7 +31,7 @@ define i32 @bswap32(i32 %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [bswap32_param_0]; ; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 291; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %b = tail call i32 @llvm.bswap.i32(i32 %a) ret i32 %b @@ -46,7 +46,7 @@ define <2 x i16> @bswapv2i16(<2 x i16> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [bswapv2i16_param_0]; ; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 8961; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %b = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %a) ret <2 x i16> %b @@ -65,7 +65,7 @@ define i64 @bswap64(i64 %a) { ; CHECK-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r3}, %rd1; } ; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 291; ; CHECK-NEXT: mov.b64 %rd2, {%r4, %r2}; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd2; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd2; ; CHECK-NEXT: ret; %b = tail call i64 @llvm.bswap.i64(i64 %a) ret i64 %b diff --git a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll index 3fbed871850bc3..0ce9a58b2e6ecb 100644 --- a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll +++ b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll @@ -45,9 +45,9 @@ entry: store float %3, ptr %arrayidx7, align 4 ; CHECK: .param .b64 param0; -; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A_REG]] +; CHECK-NEXT: st.param.b64 [param0], %rd[[A_REG]] ; CHECK-NEXT: .param .b64 param1; -; CHECK-NEXT: st.param.b64 [param1+0], %rd[[SP_REG]] +; CHECK-NEXT: st.param.b64 [param1], %rd[[SP_REG]] ; CHECK-NEXT: call.uni ; CHECK-NEXT: callee, diff --git a/llvm/test/CodeGen/NVPTX/call_bitcast_byval.ll b/llvm/test/CodeGen/NVPTX/call_bitcast_byval.ll index bd723a296e620f..5cf70a6aea5c22 100644 --- a/llvm/test/CodeGen/NVPTX/call_bitcast_byval.ll +++ b/llvm/test/CodeGen/NVPTX/call_bitcast_byval.ll @@ -14,7 +14,7 @@ target triple = "nvptx64-nvidia-cuda" %complex_half = type { half, half } ; CHECK: .param .align 2 .b8 param2[4]; -; CHECK: st.param.b16 [param2+0], %rs1; +; CHECK: st.param.b16 [param2], %rs1; ; CHECK: st.param.b16 [param2+2], %rs2; ; CHECK: .param .align 2 .b8 retval0[4]; ; CHECK-NEXT: prototype_0 : .callprototype (.param .align 2 .b8 _[4]) _ (.param .b32 _, .param .b32 _, .param .align 2 .b8 _[4]); @@ -37,7 +37,7 @@ define internal void @callee(ptr byval(%"class.complex") %byval_arg) { define void @boom() { %fp = call ptr @usefp(ptr @callee) ; CHECK: .param .align 2 .b8 param0[4]; - ; CHECK: st.param.b16 [param0+0], %rs1; + ; CHECK: st.param.b16 [param0], %rs1; ; CHECK: st.param.b16 [param0+2], %rs2; ; CHECK: .callprototype ()_ (.param .align 2 .b8 _[4]); call void %fp(ptr byval(%"class.complex") null) diff --git a/llvm/test/CodeGen/NVPTX/chain-different-as.ll b/llvm/test/CodeGen/NVPTX/chain-different-as.ll index 18d06647cfe05f..293281e17dd36a 100644 --- a/llvm/test/CodeGen/NVPTX/chain-different-as.ll +++ b/llvm/test/CodeGen/NVPTX/chain-different-as.ll @@ -11,7 +11,7 @@ define i64 @test() nounwind readnone { ; CHECK-NEXT: mov.u64 %rd2, 42; ; CHECK-NEXT: st.u64 [%rd1], %rd2; ; CHECK-NEXT: ld.global.u64 %rd3, [%rd1]; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; ; CHECK-NEXT: ret; %addr0 = inttoptr i64 1 to ptr %addr1 = inttoptr i64 1 to ptr addrspace(1) diff --git a/llvm/test/CodeGen/NVPTX/cmpxchg.ll b/llvm/test/CodeGen/NVPTX/cmpxchg.ll index 85ae5f0c8f6013..f7cc32b962b9c8 100644 --- a/llvm/test/CodeGen/NVPTX/cmpxchg.ll +++ b/llvm/test/CodeGen/NVPTX/cmpxchg.ll @@ -47,7 +47,7 @@ define i8 @relaxed_sys_i8(ptr %addr, i8 %cmp, i8 %new) { ; SM30-NEXT: mov.u32 %r20, %r8; ; SM30-NEXT: @%p2 bra $L__BB0_1; ; SM30-NEXT: $L__BB0_3: // %partword.cmpxchg.end -; SM30-NEXT: st.param.b32 [func_retval0+0], %r13; +; SM30-NEXT: st.param.b32 [func_retval0], %r13; ; SM30-NEXT: ret; ; ; SM70-LABEL: relaxed_sys_i8( @@ -87,7 +87,7 @@ define i8 @relaxed_sys_i8(ptr %addr, i8 %cmp, i8 %new) { ; SM70-NEXT: @%p2 bra $L__BB0_1; ; SM70-NEXT: $L__BB0_3: // %partword.cmpxchg.end ; SM70-NEXT: cvt.u32.u16 %r2, %rs9; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM70-NEXT: st.param.b32 [func_retval0], %r2; ; SM70-NEXT: ret; %pairold = cmpxchg ptr %addr, i8 %cmp, i8 %new seq_cst seq_cst ret i8 %new @@ -132,7 +132,7 @@ define i16 @relaxed_sys_i16(ptr %addr, i16 %cmp, i16 %new) { ; SM30-NEXT: mov.u32 %r19, %r8; ; SM30-NEXT: @%p2 bra $L__BB1_1; ; SM30-NEXT: $L__BB1_3: // %partword.cmpxchg.end -; SM30-NEXT: st.param.b32 [func_retval0+0], %r14; +; SM30-NEXT: st.param.b32 [func_retval0], %r14; ; SM30-NEXT: ret; ; ; SM70-LABEL: relaxed_sys_i16( @@ -147,7 +147,7 @@ define i16 @relaxed_sys_i16(ptr %addr, i16 %cmp, i16 %new) { ; SM70-NEXT: ld.param.u16 %rs2, [relaxed_sys_i16_param_2]; ; SM70-NEXT: atom.cas.b16 %rs3, [%rd1], %rs1, %rs2; ; SM70-NEXT: cvt.u32.u16 %r1, %rs2; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r1; +; SM70-NEXT: st.param.b32 [func_retval0], %r1; ; SM70-NEXT: ret; %pairold = cmpxchg ptr %addr, i16 %cmp, i16 %new seq_cst seq_cst ret i16 %new @@ -165,7 +165,7 @@ define i32 @relaxed_sys_i32(ptr %addr, i32 %cmp, i32 %new) { ; SM30-NEXT: ld.param.u32 %r1, [relaxed_sys_i32_param_1]; ; SM30-NEXT: ld.param.u32 %r2, [relaxed_sys_i32_param_2]; ; SM30-NEXT: atom.cas.b32 %r3, [%rd1], %r1, %r2; -; SM30-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM30-NEXT: st.param.b32 [func_retval0], %r2; ; SM30-NEXT: ret; ; ; SM70-LABEL: relaxed_sys_i32( @@ -178,7 +178,7 @@ define i32 @relaxed_sys_i32(ptr %addr, i32 %cmp, i32 %new) { ; SM70-NEXT: ld.param.u32 %r1, [relaxed_sys_i32_param_1]; ; SM70-NEXT: ld.param.u32 %r2, [relaxed_sys_i32_param_2]; ; SM70-NEXT: atom.cas.b32 %r3, [%rd1], %r1, %r2; -; SM70-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM70-NEXT: st.param.b32 [func_retval0], %r2; ; SM70-NEXT: ret; %pairold = cmpxchg ptr %addr, i32 %cmp, i32 %new seq_cst seq_cst ret i32 %new @@ -195,7 +195,7 @@ define i64 @relaxed_sys_i64(ptr %addr, i64 %cmp, i64 %new) { ; SM30-NEXT: ld.param.u64 %rd2, [relaxed_sys_i64_param_1]; ; SM30-NEXT: ld.param.u64 %rd3, [relaxed_sys_i64_param_2]; ; SM30-NEXT: atom.cas.b64 %rd4, [%rd1], %rd2, %rd3; -; SM30-NEXT: st.param.b64 [func_retval0+0], %rd3; +; SM30-NEXT: st.param.b64 [func_retval0], %rd3; ; SM30-NEXT: ret; ; ; SM70-LABEL: relaxed_sys_i64( @@ -207,7 +207,7 @@ define i64 @relaxed_sys_i64(ptr %addr, i64 %cmp, i64 %new) { ; SM70-NEXT: ld.param.u64 %rd2, [relaxed_sys_i64_param_1]; ; SM70-NEXT: ld.param.u64 %rd3, [relaxed_sys_i64_param_2]; ; SM70-NEXT: atom.cas.b64 %rd4, [%rd1], %rd2, %rd3; -; SM70-NEXT: st.param.b64 [func_retval0+0], %rd3; +; SM70-NEXT: st.param.b64 [func_retval0], %rd3; ; SM70-NEXT: ret; %pairold = cmpxchg ptr %addr, i64 %cmp, i64 %new seq_cst seq_cst ret i64 %new diff --git a/llvm/test/CodeGen/NVPTX/combine-mad.ll b/llvm/test/CodeGen/NVPTX/combine-mad.ll index 56bfaa14c5877c..1b22cfde39725f 100644 --- a/llvm/test/CodeGen/NVPTX/combine-mad.ll +++ b/llvm/test/CodeGen/NVPTX/combine-mad.ll @@ -14,7 +14,7 @@ define i32 @test1(i32 %n, i32 %m) { ; CHECK-NEXT: ld.param.u32 %r1, [test1_param_0]; ; CHECK-NEXT: ld.param.u32 %r2, [test1_param_1]; ; CHECK-NEXT: mad.lo.s32 %r3, %r2, %r1, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %add = add i32 %n, 1 %mul = mul i32 %add, %m @@ -31,7 +31,7 @@ define i32 @test1_rev(i32 %n, i32 %m) { ; CHECK-NEXT: ld.param.u32 %r1, [test1_rev_param_0]; ; CHECK-NEXT: ld.param.u32 %r2, [test1_rev_param_1]; ; CHECK-NEXT: mad.lo.s32 %r3, %r2, %r1, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %add = add i32 %n, 1 %mul = mul i32 %m, %add @@ -53,7 +53,7 @@ define i32 @test2(i32 %n, i32 %m, i32 %s) { ; CHECK-NEXT: setp.lt.s32 %p1, %r3, 1; ; CHECK-NEXT: mad.lo.s32 %r4, %r2, %r1, %r2; ; CHECK-NEXT: selp.b32 %r5, %r2, %r4, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %add = add i32 %n, 1 %cond = icmp slt i32 %s, 1 @@ -77,7 +77,7 @@ define i32 @test2_rev1(i32 %n, i32 %m, i32 %s) { ; CHECK-NEXT: setp.lt.s32 %p1, %r3, 1; ; CHECK-NEXT: mad.lo.s32 %r4, %r2, %r1, %r2; ; CHECK-NEXT: selp.b32 %r5, %r4, %r2, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %add = add i32 %n, 1 %cond = icmp slt i32 %s, 1 @@ -101,7 +101,7 @@ define i32 @test2_rev2(i32 %n, i32 %m, i32 %s) { ; CHECK-NEXT: setp.lt.s32 %p1, %r3, 1; ; CHECK-NEXT: mad.lo.s32 %r4, %r2, %r1, %r2; ; CHECK-NEXT: selp.b32 %r5, %r4, %r2, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %add = add i32 %n, 1 %cond = icmp slt i32 %s, 1 @@ -126,7 +126,7 @@ define i32 @test3(i32 %n, i32 %m, i32 %s) { ; CHECK-NEXT: setp.lt.s32 %p1, %r4, 1; ; CHECK-NEXT: selp.b32 %r5, 1, %r2, %p1; ; CHECK-NEXT: mul.lo.s32 %r6, %r5, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %add = add i32 %n, 3 %cond = icmp slt i32 %s, 1 @@ -152,7 +152,7 @@ define i32 @test4(i32 %a, i32 %b, i32 %c, i1 %p) { ; CHECK-NEXT: ld.param.u32 %r3, [test4_param_2]; ; CHECK-NEXT: mad.lo.s32 %r4, %r1, %r2, %r3; ; CHECK-NEXT: selp.b32 %r5, %r4, %r3, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %mul = mul i32 %a, %b %sel = select i1 %p, i32 %mul, i32 0 @@ -176,7 +176,7 @@ define i32 @test4_rev(i32 %a, i32 %b, i32 %c, i1 %p) { ; CHECK-NEXT: ld.param.u32 %r3, [test4_rev_param_2]; ; CHECK-NEXT: mad.lo.s32 %r4, %r1, %r2, %r3; ; CHECK-NEXT: selp.b32 %r5, %r3, %r4, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %mul = mul i32 %a, %b %sel = select i1 %p, i32 0, i32 %mul diff --git a/llvm/test/CodeGen/NVPTX/compute-ptx-value-vts.ll b/llvm/test/CodeGen/NVPTX/compute-ptx-value-vts.ll index a88c5637f089b1..5deafb3ceed784 100644 --- a/llvm/test/CodeGen/NVPTX/compute-ptx-value-vts.ll +++ b/llvm/test/CodeGen/NVPTX/compute-ptx-value-vts.ll @@ -10,7 +10,7 @@ define <6 x half> @half6() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b16 %rs1, 0x0000; -; CHECK-NEXT: st.param.v4.b16 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b16 [func_retval0+8], {%rs1, %rs1}; ; CHECK-NEXT: ret; ret <6 x half> zeroinitializer @@ -23,7 +23,7 @@ define <10 x half> @half10() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b16 %rs1, 0x0000; -; CHECK-NEXT: st.param.v4.b16 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b16 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b16 [func_retval0+16], {%rs1, %rs1}; ; CHECK-NEXT: ret; @@ -37,7 +37,7 @@ define <12 x i8> @byte12() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: ret; @@ -51,7 +51,7 @@ define <20 x i8> @byte20() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+12], {%rs1, %rs1, %rs1, %rs1}; diff --git a/llvm/test/CodeGen/NVPTX/convert-int-sm20.ll b/llvm/test/CodeGen/NVPTX/convert-int-sm20.ll index d7e2cede8a9915..b1850185f0c763 100644 --- a/llvm/test/CodeGen/NVPTX/convert-int-sm20.ll +++ b/llvm/test/CodeGen/NVPTX/convert-int-sm20.ll @@ -11,7 +11,7 @@ define i16 @cvt_i16_i32(i32 %x) { ; CHECK: ld.param.u16 %r[[R0:[0-9]+]], [cvt_i16_i32_param_{{[0-9]+}}] -; CHECK: st.param.b32 [func_retval{{[0-9]+}}+0], %r[[R0]] +; CHECK: st.param.b32 [func_retval{{[0-9]+}}], %r[[R0]] ; CHECK: ret %a = trunc i32 %x to i16 ret i16 %a @@ -19,7 +19,7 @@ define i16 @cvt_i16_i32(i32 %x) { define i16 @cvt_i16_i64(i64 %x) { ; CHECK: ld.param.u16 %r[[R0:[0-9]+]], [cvt_i16_i64_param_{{[0-9]+}}] -; CHECK: st.param.b32 [func_retval{{[0-9]+}}+0], %r[[R0]] +; CHECK: st.param.b32 [func_retval{{[0-9]+}}], %r[[R0]] ; CHECK: ret %a = trunc i64 %x to i16 ret i16 %a @@ -31,7 +31,7 @@ define i16 @cvt_i16_i64(i64 %x) { define i32 @cvt_i32_i16(i16 %x) { ; CHECK: ld.param.u16 %r[[R0:[0-9]+]], [cvt_i32_i16_param_{{[0-9]+}}] -; CHECK: st.param.b32 [func_retval{{[0-9]+}}+0], %r[[R0]] +; CHECK: st.param.b32 [func_retval{{[0-9]+}}], %r[[R0]] ; CHECK: ret %a = zext i16 %x to i32 ret i32 %a @@ -39,7 +39,7 @@ define i32 @cvt_i32_i16(i16 %x) { define i32 @cvt_i32_i64(i64 %x) { ; CHECK: ld.param.u32 %r[[R0:[0-9]+]], [cvt_i32_i64_param_{{[0-9]+}}] -; CHECK: st.param.b32 [func_retval{{[0-9]+}}+0], %r[[R0]] +; CHECK: st.param.b32 [func_retval{{[0-9]+}}], %r[[R0]] ; CHECK: ret %a = trunc i64 %x to i32 ret i32 %a @@ -51,7 +51,7 @@ define i32 @cvt_i32_i64(i64 %x) { define i64 @cvt_i64_i16(i16 %x) { ; CHECK: ld.param.u16 %rd[[R0:[0-9]+]], [cvt_i64_i16_param_{{[0-9]+}}] -; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]] +; CHECK: st.param.b64 [func_retval{{[0-9]+}}], %rd[[R0]] ; CHECK: ret %a = zext i16 %x to i64 ret i64 %a @@ -59,7 +59,7 @@ define i64 @cvt_i64_i16(i16 %x) { define i64 @cvt_i64_i32(i32 %x) { ; CHECK: ld.param.u32 %rd[[R0:[0-9]+]], [cvt_i64_i32_param_{{[0-9]+}}] -; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]] +; CHECK: st.param.b64 [func_retval{{[0-9]+}}], %rd[[R0]] ; CHECK: ret %a = zext i32 %x to i64 ret i64 %a diff --git a/llvm/test/CodeGen/NVPTX/copysign.ll b/llvm/test/CodeGen/NVPTX/copysign.ll index a6aad1c2f012b3..ba7db68b3977d0 100644 --- a/llvm/test/CodeGen/NVPTX/copysign.ll +++ b/llvm/test/CodeGen/NVPTX/copysign.ll @@ -14,7 +14,7 @@ define float @fcopysign_f_f(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_f_param_0]; ; CHECK-NEXT: ld.param.f32 %f2, [fcopysign_f_f_param_1]; ; CHECK-NEXT: copysign.f32 %f3, %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %val = call float @llvm.copysign.f32(float %a, float %b) ret float %val @@ -29,7 +29,7 @@ define double @fcopysign_d_d(double %a, double %b) { ; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_d_param_0]; ; CHECK-NEXT: ld.param.f64 %fd2, [fcopysign_d_d_param_1]; ; CHECK-NEXT: copysign.f64 %fd3, %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %val = call double @llvm.copysign.f64(double %a, double %b) ret double %val @@ -51,7 +51,7 @@ define float @fcopysign_f_d(float %a, double %b) { ; CHECK-NEXT: and.b64 %rd3, %rd2, 1; ; CHECK-NEXT: setp.eq.b64 %p1, %rd3, 1; ; CHECK-NEXT: selp.f32 %f4, %f3, %f2, %p1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NEXT: ret; %c = fptrunc double %b to float %val = call float @llvm.copysign.f32(float %a, float %c) @@ -74,7 +74,7 @@ define float @fcopysign_f_h(float %a, half %b) { ; CHECK-NEXT: and.b16 %rs3, %rs2, 1; ; CHECK-NEXT: setp.eq.b16 %p1, %rs3, 1; ; CHECK-NEXT: selp.f32 %f4, %f3, %f2, %p1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NEXT: ret; %c = fpext half %b to float %val = call float @llvm.copysign.f32(float %a, float %c) @@ -97,7 +97,7 @@ define double @fcopysign_d_f(double %a, float %b) { ; CHECK-NEXT: and.b32 %r3, %r2, 1; ; CHECK-NEXT: setp.eq.b32 %p1, %r3, 1; ; CHECK-NEXT: selp.f64 %fd4, %fd3, %fd2, %p1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd4; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd4; ; CHECK-NEXT: ret; %c = fpext float %b to double %val = call double @llvm.copysign.f64(double %a, double %c) @@ -120,7 +120,7 @@ define double @fcopysign_d_h(double %a, half %b) { ; CHECK-NEXT: and.b16 %rs3, %rs2, 1; ; CHECK-NEXT: setp.eq.b16 %p1, %rs3, 1; ; CHECK-NEXT: selp.f64 %fd4, %fd3, %fd2, %p1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd4; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd4; ; CHECK-NEXT: ret; %c = fpext half %b to double %val = call double @llvm.copysign.f64(double %a, double %c) diff --git a/llvm/test/CodeGen/NVPTX/dot-product.ll b/llvm/test/CodeGen/NVPTX/dot-product.ll index 36529bbef90332..8d3d7238d36fd5 100644 --- a/llvm/test/CodeGen/NVPTX/dot-product.ll +++ b/llvm/test/CodeGen/NVPTX/dot-product.ll @@ -19,7 +19,7 @@ define i32 @test_dp4a_u32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp4a_u32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp4a_u32_u32_param_2]; ; CHECK-NEXT: dp4a.u32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp4a.u.u(i32 %a, i32 %b, i32 %c) ret i32 %call @@ -34,7 +34,7 @@ define i32 @test_dp4a_u32imm_u32imm(i32 %c) { ; CHECK-NEXT: ld.param.u32 %r1, [test_dp4a_u32imm_u32imm_param_0]; ; CHECK-NEXT: mov.b32 %r2, 0; ; CHECK-NEXT: dp4a.u32.u32 %r3, %r2, %r2, %r1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp4a.u.u(i32 0, i32 0, i32 %c) ret i32 %call @@ -50,7 +50,7 @@ define i32 @test_dp4a_u32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp4a_u32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp4a_u32_s32_param_2]; ; CHECK-NEXT: dp4a.u32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp4a.u.s(i32 %a, i32 %b, i32 %c) ret i32 %call @@ -66,7 +66,7 @@ define i32 @test_dp4a_s32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp4a_s32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp4a_s32_u32_param_2]; ; CHECK-NEXT: dp4a.s32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp4a.s.u(i32 %a, i32 %b, i32 %c) ret i32 %call @@ -82,7 +82,7 @@ define i32 @test_dp4a_s32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp4a_s32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp4a_s32_s32_param_2]; ; CHECK-NEXT: dp4a.s32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp4a.s.s(i32 %a, i32 %b, i32 %c) ret i32 %call @@ -103,7 +103,7 @@ define i32 @test_dp2a_lo_u32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_lo_u32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_lo_u32_u32_param_2]; ; CHECK-NEXT: dp2a.lo.u32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.u.u(i32 %a, i32 %b, i1 0, i32 %c) ret i32 %call @@ -119,7 +119,7 @@ define i32 @test_dp2a_lo_u32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_lo_u32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_lo_u32_s32_param_2]; ; CHECK-NEXT: dp2a.lo.u32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.u.s(i32 %a, i32 %b, i1 0, i32 %c) ret i32 %call @@ -135,7 +135,7 @@ define i32 @test_dp2a_lo_s32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_lo_s32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_lo_s32_u32_param_2]; ; CHECK-NEXT: dp2a.lo.s32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.s.u(i32 %a, i32 %b, i1 0, i32 %c) ret i32 %call @@ -151,7 +151,7 @@ define i32 @test_dp2a_lo_s32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_lo_s32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_lo_s32_s32_param_2]; ; CHECK-NEXT: dp2a.lo.s32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.s.s(i32 %a, i32 %b, i1 0, i32 %c) ret i32 %call @@ -167,7 +167,7 @@ define i32 @test_dp2a_hi_u32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_hi_u32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_hi_u32_u32_param_2]; ; CHECK-NEXT: dp2a.hi.u32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.u.u(i32 %a, i32 %b, i1 1, i32 %c) ret i32 %call @@ -183,7 +183,7 @@ define i32 @test_dp2a_hi_u32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_hi_u32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_hi_u32_s32_param_2]; ; CHECK-NEXT: dp2a.hi.u32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.u.s(i32 %a, i32 %b, i1 1, i32 %c) ret i32 %call @@ -199,7 +199,7 @@ define i32 @test_dp2a_hi_s32_u32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_hi_s32_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_hi_s32_u32_param_2]; ; CHECK-NEXT: dp2a.hi.s32.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.s.u(i32 %a, i32 %b, i1 1, i32 %c) ret i32 %call @@ -215,7 +215,7 @@ define i32 @test_dp2a_hi_s32_s32(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [test_dp2a_hi_s32_s32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_dp2a_hi_s32_s32_param_2]; ; CHECK-NEXT: dp2a.hi.s32.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %call = call i32 @llvm.nvvm.idp2a.s.s(i32 %a, i32 %b, i1 1, i32 %c) ret i32 %call diff --git a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll index ce81957f2a3934..44f39df0249008 100644 --- a/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll +++ b/llvm/test/CodeGen/NVPTX/dynamic_stackalloc.ll @@ -18,7 +18,7 @@ ; CHECK-32-NEXT: cvta.local.u32 %r[[ALLOCA]], %r[[ALLOCA]]; ; CHECK-32-NEXT: { // callseq 0, 0 ; CHECK-32-NEXT: .param .b32 param0; -; CHECK-32-NEXT: st.param.b32 [param0+0], %r[[ALLOCA]]; +; CHECK-32-NEXT: st.param.b32 [param0], %r[[ALLOCA]]; ; CHECK-64: ld.param.u64 %rd[[SIZE:[0-9]]], [test_dynamic_stackalloc_param_0]; ; CHECK-64-NEXT: add.s64 %rd[[SIZE2:[0-9]]], %rd[[SIZE]], 7; @@ -27,7 +27,7 @@ ; CHECK-64-NEXT: cvta.local.u64 %rd[[ALLOCA]], %rd[[ALLOCA]]; ; CHECK-64-NEXT: { // callseq 0, 0 ; CHECK-64-NEXT: .param .b64 param0; -; CHECK-64-NEXT: st.param.b64 [param0+0], %rd[[ALLOCA]]; +; CHECK-64-NEXT: st.param.b64 [param0], %rd[[ALLOCA]]; ; CHECK-NEXT: .param .b32 retval0; ; CHECK-NEXT: call.uni (retval0), diff --git a/llvm/test/CodeGen/NVPTX/elect.ll b/llvm/test/CodeGen/NVPTX/elect.ll index 358dfef9185238..71e1111562f26f 100644 --- a/llvm/test/CodeGen/NVPTX/elect.ll +++ b/llvm/test/CodeGen/NVPTX/elect.ll @@ -16,7 +16,7 @@ define {i32, i1} @elect_sync(i32 %mask) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [elect_sync_param_0]; ; CHECK-NEXT: elect.sync %r2|%p1, %r1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p1; ; CHECK-NEXT: st.param.b8 [func_retval0+4], %rs1; ; CHECK-NEXT: ret; @@ -33,7 +33,7 @@ define {i32, i1} @elect_sync_imm() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: elect.sync %r1|%p1, -1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p1; ; CHECK-NEXT: st.param.b8 [func_retval0+4], %rs1; ; CHECK-NEXT: ret; @@ -54,7 +54,7 @@ define {i32, i1} @elect_sync_twice(i32 %mask) { ; CHECK-NEXT: ld.param.u32 %r1, [elect_sync_twice_param_0]; ; CHECK-NEXT: elect.sync %r2|%p1, %r1; ; CHECK-NEXT: elect.sync %r3|%p2, %r1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: selp.u16 %rs1, -1, 0, %p1; ; CHECK-NEXT: st.param.b8 [func_retval0+4], %rs1; ; CHECK-NEXT: ret; diff --git a/llvm/test/CodeGen/NVPTX/extractelement.ll b/llvm/test/CodeGen/NVPTX/extractelement.ll index 367c20749a9f36..9b2d514f2a1cb1 100644 --- a/llvm/test/CodeGen/NVPTX/extractelement.ll +++ b/llvm/test/CodeGen/NVPTX/extractelement.ll @@ -16,7 +16,7 @@ define i16 @test_v2i8(i16 %a) { ; CHECK-NEXT: shr.s16 %rs3, %rs1, 8; ; CHECK-NEXT: add.s16 %rs4, %rs2, %rs3; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %v = bitcast i16 %a to <2 x i8> %r0 = extractelement <2 x i8> %v, i64 0 @@ -42,7 +42,7 @@ define i1 @test_v2i8_load(ptr %a) { ; CHECK-NEXT: and.b16 %rs6, %rs5, 255; ; CHECK-NEXT: setp.eq.s16 %p1, %rs6, 0; ; CHECK-NEXT: selp.u32 %r1, 1, 0, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %v = load <2 x i8>, ptr %a, align 4 %r0 = extractelement <2 x i8> %v, i64 0 @@ -72,7 +72,7 @@ define i16 @test_v4i8(i32 %a) { ; CHECK-NEXT: add.s16 %rs6, %rs3, %rs4; ; CHECK-NEXT: add.s16 %rs7, %rs5, %rs6; ; CHECK-NEXT: cvt.u32.u16 %r6, %rs7; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %v = bitcast i32 %a to <4 x i8> %r0 = extractelement <4 x i8> %v, i64 0 @@ -103,7 +103,7 @@ define i32 @test_v4i8_s32(i32 %a) { ; CHECK-NEXT: add.s32 %r6, %r2, %r3; ; CHECK-NEXT: add.s32 %r7, %r4, %r5; ; CHECK-NEXT: add.s32 %r8, %r6, %r7; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; %v = bitcast i32 %a to <4 x i8> %r0 = extractelement <4 x i8> %v, i64 0 @@ -134,7 +134,7 @@ define i32 @test_v4i8_u32(i32 %a) { ; CHECK-NEXT: add.s32 %r6, %r2, %r3; ; CHECK-NEXT: add.s32 %r7, %r4, %r5; ; CHECK-NEXT: add.s32 %r8, %r6, %r7; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; %v = bitcast i32 %a to <4 x i8> %r0 = extractelement <4 x i8> %v, i64 0 @@ -188,7 +188,7 @@ define i16 @test_v8i8(i64 %a) { ; CHECK-NEXT: add.s16 %rs14, %rs11, %rs12; ; CHECK-NEXT: add.s16 %rs15, %rs13, %rs14; ; CHECK-NEXT: cvt.u32.u16 %r13, %rs15; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r13; +; CHECK-NEXT: st.param.b32 [func_retval0], %r13; ; CHECK-NEXT: ret; %v = bitcast i64 %a to <8 x i8> %r0 = extractelement <8 x i8> %v, i64 0 diff --git a/llvm/test/CodeGen/NVPTX/f16-instructions.ll b/llvm/test/CodeGen/NVPTX/f16-instructions.ll index 14e02a49f6e5e4..f78cfc31726217 100644 --- a/llvm/test/CodeGen/NVPTX/f16-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f16-instructions.ll @@ -44,7 +44,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ; CHECK-LABEL: test_ret_const( ; CHECK: mov.b16 [[R:%rs[0-9]+]], 0x3C00; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_ret_const() #0 { ret half 1.0 @@ -59,7 +59,7 @@ define half @test_ret_const() #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fadd(half %a, half %b) #0 { %r = fadd half %a, %b @@ -75,7 +75,7 @@ define half @test_fadd(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <1 x half> @test_fadd_v1f16(<1 x half> %a, <1 x half> %b) #0 { %r = fadd <1 x half> %a, %b @@ -92,7 +92,7 @@ define <1 x half> @test_fadd_v1f16(<1 x half> %a, <1 x half> %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fadd_imm_0(half %b) #0 { %r = fadd half 1.0, %b @@ -108,7 +108,7 @@ define half @test_fadd_imm_0(half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], 0f3F800000; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fadd_imm_1(half %a) #0 { %r = fadd half %a, 1.0 @@ -124,7 +124,7 @@ define half @test_fadd_imm_1(half %a) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fsub(half %a, half %b) #0 { %r = fsub half %a, %b @@ -141,7 +141,7 @@ define half @test_fsub(half %a, half %b) #0 { ; CHECK-NOF16-DAG: mov.f32 [[Z:%f[0-9]+]], 0f00000000; ; CHECK-NOF16-NEXT: sub.rn.f32 [[R32:%f[0-9]+]], [[Z]], [[A32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_old_fneg(half %a) #0 { %r = fsub half 0.0, %a @@ -153,7 +153,7 @@ define half @test_old_fneg(half %a) #0 { ; CHECK-F16-NOFTZ-NEXT: neg.f16 [[R:%rs[0-9]+]], [[A]]; ; CHECK-F16-FTZ-NEXT: neg.ftz.f16 [[R:%rs[0-9]+]], [[A]]; ; CHECK-NOF16-NEXT: xor.b16 [[R:%rs[0-9]+]], [[A]], -32768; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fneg(half %a) #0 { %r = fneg half %a @@ -169,7 +169,7 @@ define half @test_fneg(half %a) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[B32:%f[0-9]+]], [[B]] ; CHECK-NOF16-NEXT: mul.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fmul(half %a, half %b) #0 { %r = fmul half %a, %b @@ -186,7 +186,7 @@ define half @test_fmul(half %a, half %b) #0 { ; CHECK-F16-FTZ-DAG: cvt.ftz.f32.f16 [[F1:%f[0-9]+]], [[B]]; ; CHECK-F16-FTZ-NEXT: div.rn.ftz.f32 [[FR:%f[0-9]+]], [[F0]], [[F1]]; ; CHECK-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[FR]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_fdiv(half %a, half %b) #0 { %r = fdiv half %a, %b @@ -211,7 +211,7 @@ define half @test_fdiv(half %a, half %b) #0 { ; CHECK-NEXT: testp.infinite.f32 [[ISBINF:%p[0-9]+]], [[FB]]; ; CHECK-NEXT: selp.f32 [[RESULT:%f[0-9]+]], [[FA]], [[RF]], [[ISBINF]]; ; CHECK-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RESULT]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_frem(half %a, half %b) #0 { %r = frem half %a, %b @@ -231,7 +231,7 @@ define void @test_store(half %a, ptr %b) #0 { ; CHECK-LABEL: test_load( ; CHECK: ld.param.u64 %[[PTR:rd[0-9]+]], [test_load_param_0]; ; CHECK-NEXT: ld.b16 [[R:%rs[0-9]+]], [%[[PTR]]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_load(ptr %a) #0 { %r = load half, ptr %a @@ -260,8 +260,8 @@ declare half @test_callee(half %a, half %b) #0 ; CHECK: { ; CHECK-DAG: .param .align 2 .b8 param0[2]; ; CHECK-DAG: .param .align 2 .b8 param1[2]; -; CHECK-DAG: st.param.b16 [param0+0], [[A]]; -; CHECK-DAG: st.param.b16 [param1+0], [[B]]; +; CHECK-DAG: st.param.b16 [param0], [[A]]; +; CHECK-DAG: st.param.b16 [param1], [[B]]; ; CHECK-DAG: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -269,9 +269,9 @@ declare half @test_callee(half %a, half %b) #0 ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_call(half %a, half %b) #0 { %r = call half @test_callee(half %a, half %b) @@ -284,8 +284,8 @@ define half @test_call(half %a, half %b) #0 { ; CHECK: { ; CHECK-DAG: .param .align 2 .b8 param0[2]; ; CHECK-DAG: .param .align 2 .b8 param1[2]; -; CHECK-DAG: st.param.b16 [param0+0], [[B]]; -; CHECK-DAG: st.param.b16 [param1+0], [[A]]; +; CHECK-DAG: st.param.b16 [param0], [[B]]; +; CHECK-DAG: st.param.b16 [param1], [[A]]; ; CHECK-DAG: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -293,9 +293,9 @@ define half @test_call(half %a, half %b) #0 { ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_call_flipped(half %a, half %b) #0 { %r = call half @test_callee(half %b, half %a) @@ -308,8 +308,8 @@ define half @test_call_flipped(half %a, half %b) #0 { ; CHECK: { ; CHECK-DAG: .param .align 2 .b8 param0[2]; ; CHECK-DAG: .param .align 2 .b8 param1[2]; -; CHECK-DAG: st.param.b16 [param0+0], [[B]]; -; CHECK-DAG: st.param.b16 [param1+0], [[A]]; +; CHECK-DAG: st.param.b16 [param0], [[B]]; +; CHECK-DAG: st.param.b16 [param1], [[A]]; ; CHECK-DAG: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -317,9 +317,9 @@ define half @test_call_flipped(half %a, half %b) #0 { ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_tailcall_flipped(half %a, half %b) #0 { %r = tail call half @test_callee(half %b, half %a) @@ -331,7 +331,7 @@ define half @test_tailcall_flipped(half %a, half %b) #0 { ; CHECK-DAG: ld.param.b16 [[B:%rs[0-9]+]], [test_select_param_1]; ; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1; ; CHECK-NEXT: selp.b16 [[R:%rs[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_select(half %a, half %b, i1 zeroext %c) #0 { %r = select i1 %c, half %a, half %b @@ -348,7 +348,7 @@ define half @test_select(half %a, half %b, i1 zeroext %c) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]]; ; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]] ; CHECK: selp.b16 [[R:%rs[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_select_cc(half %a, half %b, half %c, half %d) #0 { %cc = fcmp une half %c, %d @@ -367,7 +367,7 @@ define half @test_select_cc(half %a, half %b, half %c, half %d) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[CF:%f[0-9]+]], [[C]]; ; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[CF]], [[DF]] ; CHECK-NEXT: selp.f32 [[R:%f[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.f32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.f32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define float @test_select_cc_f32_f16(float %a, float %b, half %c, half %d) #0 { %cc = fcmp une half %c, %d @@ -383,7 +383,7 @@ define float @test_select_cc_f32_f16(float %a, float %b, half %c, half %d) #0 { ; CHECK-F16-FTZ-DAG: setp.neu.ftz.f32 [[PRED:%p[0-9]+]], [[C]], [[D]] ; CHECK-DAG: ld.param.b16 [[B:%rs[0-9]+]], [test_select_cc_f16_f32_param_1]; ; CHECK-NEXT: selp.b16 [[R:%rs[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.b16 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define half @test_select_cc_f16_f32(half %a, half %b, float %c, float %d) #0 { %cc = fcmp une float %c, %d @@ -400,7 +400,7 @@ define half @test_select_cc_f16_f32(half %a, half %b, float %c, float %d) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.neu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_une(half %a, half %b) #0 { %r = fcmp une half %a, %b @@ -416,7 +416,7 @@ define i1 @test_fcmp_une(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.equ.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ueq(half %a, half %b) #0 { %r = fcmp ueq half %a, %b @@ -432,7 +432,7 @@ define i1 @test_fcmp_ueq(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.gtu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ugt(half %a, half %b) #0 { %r = fcmp ugt half %a, %b @@ -448,7 +448,7 @@ define i1 @test_fcmp_ugt(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.geu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_uge(half %a, half %b) #0 { %r = fcmp uge half %a, %b @@ -464,7 +464,7 @@ define i1 @test_fcmp_uge(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.ltu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ult(half %a, half %b) #0 { %r = fcmp ult half %a, %b @@ -480,7 +480,7 @@ define i1 @test_fcmp_ult(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.leu.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ule(half %a, half %b) #0 { %r = fcmp ule half %a, %b @@ -497,7 +497,7 @@ define i1 @test_fcmp_ule(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.nan.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_uno(half %a, half %b) #0 { %r = fcmp uno half %a, %b @@ -513,7 +513,7 @@ define i1 @test_fcmp_uno(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.ne.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_one(half %a, half %b) #0 { %r = fcmp one half %a, %b @@ -529,7 +529,7 @@ define i1 @test_fcmp_one(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.eq.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_oeq(half %a, half %b) #0 { %r = fcmp oeq half %a, %b @@ -545,7 +545,7 @@ define i1 @test_fcmp_oeq(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.gt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ogt(half %a, half %b) #0 { %r = fcmp ogt half %a, %b @@ -561,7 +561,7 @@ define i1 @test_fcmp_ogt(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.ge.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_oge(half %a, half %b) #0 { %r = fcmp oge half %a, %b @@ -577,7 +577,7 @@ define i1 @test_fcmp_oge(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.lt.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_olt(half %a, half %b) #0 { %r = fcmp olt half %a, %b @@ -593,7 +593,7 @@ define i1 @test_fcmp_olt(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.le.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ole(half %a, half %b) #0 { %r = fcmp ole half %a, %b @@ -609,7 +609,7 @@ define i1 @test_fcmp_ole(half %a, half %b) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-NOF16: setp.num.f32 [[PRED:%p[0-9]+]], [[AF]], [[BF]] ; CHECK-NEXT: selp.u32 [[R:%r[0-9]+]], 1, 0, [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i1 @test_fcmp_ord(half %a, half %b) #0 { %r = fcmp ord half %a, %b @@ -649,13 +649,13 @@ else: ; CHECK: mov.u16 [[R:%rs[0-9]+]], [[AB:%rs[0-9]+]]; ; CHECK: ld.b16 [[AB:%rs[0-9]+]], [%[[P1]]]; ; CHECK: { -; CHECK: st.param.b64 [param0+0], %[[P1]]; +; CHECK: st.param.b64 [param0], %[[P1]]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_dummy ; CHECK: } ; CHECK: setp.eq.b32 [[PRED:%p[0-9]+]], %r{{[0-9]+}}, 1; ; CHECK: @[[PRED]] bra [[LOOP]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_phi(ptr %p1) #0 { entry: @@ -674,7 +674,7 @@ declare i1 @test_dummy(ptr %p1) #0 ; CHECK-LABEL: test_fptosi_i32( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fptosi_i32_param_0]; ; CHECK: cvt.rzi.s32.f16 [[R:%r[0-9]+]], [[A]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define i32 @test_fptosi_i32(half %a) #0 { %r = fptosi half %a to i32 @@ -684,7 +684,7 @@ define i32 @test_fptosi_i32(half %a) #0 { ; CHECK-LABEL: test_fptosi_i64( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fptosi_i64_param_0]; ; CHECK: cvt.rzi.s64.f16 [[R:%rd[0-9]+]], [[A]]; -; CHECK: st.param.b64 [func_retval0+0], [[R]]; +; CHECK: st.param.b64 [func_retval0], [[R]]; ; CHECK: ret; define i64 @test_fptosi_i64(half %a) #0 { %r = fptosi half %a to i64 @@ -694,7 +694,7 @@ define i64 @test_fptosi_i64(half %a) #0 { ; CHECK-LABEL: test_fptoui_i32( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fptoui_i32_param_0]; ; CHECK: cvt.rzi.u32.f16 [[R:%r[0-9]+]], [[A]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define i32 @test_fptoui_i32(half %a) #0 { %r = fptoui half %a to i32 @@ -704,7 +704,7 @@ define i32 @test_fptoui_i32(half %a) #0 { ; CHECK-LABEL: test_fptoui_i64( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fptoui_i64_param_0]; ; CHECK: cvt.rzi.u64.f16 [[R:%rd[0-9]+]], [[A]]; -; CHECK: st.param.b64 [func_retval0+0], [[R]]; +; CHECK: st.param.b64 [func_retval0], [[R]]; ; CHECK: ret; define i64 @test_fptoui_i64(half %a) #0 { %r = fptoui half %a to i64 @@ -714,7 +714,7 @@ define i64 @test_fptoui_i64(half %a) #0 { ; CHECK-LABEL: test_uitofp_i32( ; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_uitofp_i32_param_0]; ; CHECK: cvt.rn.f16.u32 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_uitofp_i32(i32 %a) #0 { %r = uitofp i32 %a to half @@ -724,7 +724,7 @@ define half @test_uitofp_i32(i32 %a) #0 { ; CHECK-LABEL: test_uitofp_i64( ; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_uitofp_i64_param_0]; ; CHECK: cvt.rn.f16.u64 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_uitofp_i64(i64 %a) #0 { %r = uitofp i64 %a to half @@ -734,7 +734,7 @@ define half @test_uitofp_i64(i64 %a) #0 { ; CHECK-LABEL: test_sitofp_i32( ; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_sitofp_i32_param_0]; ; CHECK: cvt.rn.f16.s32 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_sitofp_i32(i32 %a) #0 { %r = sitofp i32 %a to half @@ -744,7 +744,7 @@ define half @test_sitofp_i32(i32 %a) #0 { ; CHECK-LABEL: test_sitofp_i64( ; CHECK: ld.param.u64 [[A:%rd[0-9]+]], [test_sitofp_i64_param_0]; ; CHECK: cvt.rn.f16.s64 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_sitofp_i64(i64 %a) #0 { %r = sitofp i64 %a to half @@ -761,7 +761,7 @@ define half @test_sitofp_i64(i64 %a) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]] ; CHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 { %c = uitofp i32 %a to half @@ -779,7 +779,7 @@ define half @test_uitofp_i32_fadd(i32 %a, half %b) #0 { ; XCHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]] ; XCHECK-NOF16-NEXT: add.rn.f32 [[R32:%f[0-9]+]], [[B32]], [[C32]]; ; XCHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 { %c = sitofp i32 %a to half @@ -790,7 +790,7 @@ define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 { ; CHECK-LABEL: test_fptrunc_float( ; CHECK: ld.param.f32 [[A:%f[0-9]+]], [test_fptrunc_float_param_0]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_fptrunc_float(float %a) #0 { %r = fptrunc float %a to half @@ -800,7 +800,7 @@ define half @test_fptrunc_float(float %a) #0 { ; CHECK-LABEL: test_fptrunc_double( ; CHECK: ld.param.f64 [[A:%fd[0-9]+]], [test_fptrunc_double_param_0]; ; CHECK: cvt.rn.f16.f64 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_fptrunc_double(double %a) #0 { %r = fptrunc double %a to half @@ -811,7 +811,7 @@ define half @test_fptrunc_double(double %a) #0 { ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fpext_float_param_0]; ; CHECK-NOFTZ: cvt.f32.f16 [[R:%f[0-9]+]], [[A]]; ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[R:%f[0-9]+]], [[A]]; -; CHECK: st.param.f32 [func_retval0+0], [[R]]; +; CHECK: st.param.f32 [func_retval0], [[R]]; ; CHECK: ret; define float @test_fpext_float(half %a) #0 { %r = fpext half %a to float @@ -821,7 +821,7 @@ define float @test_fpext_float(half %a) #0 { ; CHECK-LABEL: test_fpext_double( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_fpext_double_param_0]; ; CHECK: cvt.f64.f16 [[R:%fd[0-9]+]], [[A]]; -; CHECK: st.param.f64 [func_retval0+0], [[R]]; +; CHECK: st.param.f64 [func_retval0], [[R]]; ; CHECK: ret; define double @test_fpext_double(half %a) #0 { %r = fpext half %a to double @@ -832,7 +832,7 @@ define double @test_fpext_double(half %a) #0 { ; CHECK-LABEL: test_bitcast_halftoi16( ; CHECK: ld.param.b16 [[AH:%rs[0-9]+]], [test_bitcast_halftoi16_param_0]; ; CHECK: cvt.u32.u16 [[R:%r[0-9]+]], [[AH]] -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define i16 @test_bitcast_halftoi16(half %a) #0 { %r = bitcast half %a to i16 @@ -841,7 +841,7 @@ define i16 @test_bitcast_halftoi16(half %a) #0 { ; CHECK-LABEL: test_bitcast_i16tohalf( ; CHECK: ld.param.u16 [[AS:%rs[0-9]+]], [test_bitcast_i16tohalf_param_0]; -; CHECK: st.param.b16 [func_retval0+0], [[AS]]; +; CHECK: st.param.b16 [func_retval0], [[AS]]; ; CHECK: ret; define half @test_bitcast_i16tohalf(i16 %a) #0 { %r = bitcast i16 %a to half @@ -880,7 +880,7 @@ declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0 ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[AF:%f[0-9]+]], [[A]]; ; CHECK-F16-FTZ: sqrt.rn.ftz.f32 [[RF:%f[0-9]+]], [[AF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_sqrt(half %a) #0 { %r = call half @llvm.sqrt.f16(half %a) @@ -900,7 +900,7 @@ define half @test_sqrt(half %a) #0 { ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[AF:%f[0-9]+]], [[A]]; ; CHECK: sin.approx.f32 [[RF:%f[0-9]+]], [[AF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_sin(half %a) #0 #1 { %r = call half @llvm.sin.f16(half %a) @@ -913,7 +913,7 @@ define half @test_sin(half %a) #0 #1 { ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[AF:%f[0-9]+]], [[A]]; ; CHECK: cos.approx.f32 [[RF:%f[0-9]+]], [[AF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_cos(half %a) #0 #1 { %r = call half @llvm.cos.f16(half %a) @@ -973,7 +973,7 @@ define half @test_cos(half %a) #0 #1 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]] ; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret define half @test_fma(half %a, half %b, half %c) #0 { %r = call half @llvm.fma.f16(half %a, half %b, half %c) @@ -987,7 +987,7 @@ define half @test_fma(half %a, half %b, half %c) #0 { ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[AF:%f[0-9]+]], [[A]]; ; CHECK-F16-FTZ: abs.ftz.f32 [[RF:%f[0-9]+]], [[AF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_fabs(half %a) #0 { %r = call half @llvm.fabs.f16(half %a) @@ -1004,7 +1004,7 @@ define half @test_fabs(half %a) #0 { ; CHECK-F16-FTZ-DAG: cvt.ftz.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-F16-FTZ: min.ftz.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_minnum(half %a, half %b) #0 { %r = call half @llvm.minnum.f16(half %a, half %b) @@ -1021,7 +1021,7 @@ define half @test_minnum(half %a, half %b) #0 { ; CHECK-F16-FTZ-DAG: cvt.ftz.f32.f16 [[BF:%f[0-9]+]], [[B]]; ; CHECK-F16-FTZ: max.ftz.f32 [[RF:%f[0-9]+]], [[AF]], [[BF]]; ; CHECK: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[RF]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_maxnum(half %a, half %b) #0 { %r = call half @llvm.maxnum.f16(half %a, half %b) @@ -1034,7 +1034,7 @@ define half @test_maxnum(half %a, half %b) #0 { ; CHECK-DAG: and.b16 [[AX:%rs[0-9]+]], [[AH]], 32767; ; CHECK-DAG: and.b16 [[BX:%rs[0-9]+]], [[BH]], -32768; ; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]]; -; CHECK: st.param.b16 [func_retval0+0], [[RX]]; +; CHECK: st.param.b16 [func_retval0], [[RX]]; ; CHECK: ret; define half @test_copysign(half %a, half %b) #0 { %r = call half @llvm.copysign.f16(half %a, half %b) @@ -1049,7 +1049,7 @@ define half @test_copysign(half %a, half %b) #0 { ; CHECK-DAG: and.b32 [[BX0:%r[0-9]+]], [[B]], -2147483648; ; CHECK-DAG: mov.b32 {tmp, [[BX2:%rs[0-9]+]]}, [[BX0]]; ; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]]; -; CHECK: st.param.b16 [func_retval0+0], [[RX]]; +; CHECK: st.param.b16 [func_retval0], [[RX]]; ; CHECK: ret; define half @test_copysign_f32(half %a, float %b) #0 { %tb = fptrunc float %b to half @@ -1066,7 +1066,7 @@ define half @test_copysign_f32(half %a, float %b) #0 { ; CHECK-DAG: shr.u64 [[BX1:%rd[0-9]+]], [[BX0]], 48; ; CHECK-DAG: cvt.u16.u64 [[BX2:%rs[0-9]+]], [[BX1]]; ; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX2]]; -; CHECK: st.param.b16 [func_retval0+0], [[RX]]; +; CHECK: st.param.b16 [func_retval0], [[RX]]; ; CHECK: ret; define half @test_copysign_f64(half %a, double %b) #0 { %tb = fptrunc double %b to half @@ -1082,7 +1082,7 @@ define half @test_copysign_f64(half %a, double %b) #0 { ; CHECK: or.b16 [[RX:%rs[0-9]+]], [[AX]], [[BX]]; ; CHECK-NOFTZ: cvt.f32.f16 [[XR:%f[0-9]+]], [[RX]]; ; CHECK-F16-FTZ: cvt.ftz.f32.f16 [[XR:%f[0-9]+]], [[RX]]; -; CHECK: st.param.f32 [func_retval0+0], [[XR]]; +; CHECK: st.param.f32 [func_retval0], [[XR]]; ; CHECK: ret; define float @test_copysign_extended(half %a, half %b) #0 { %r = call half @llvm.copysign.f16(half %a, half %b) @@ -1093,7 +1093,7 @@ define float @test_copysign_extended(half %a, half %b) #0 { ; CHECK-LABEL: test_floor( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_floor_param_0]; ; CHECK: cvt.rmi.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_floor(half %a) #0 { %r = call half @llvm.floor.f16(half %a) @@ -1103,7 +1103,7 @@ define half @test_floor(half %a) #0 { ; CHECK-LABEL: test_ceil( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_ceil_param_0]; ; CHECK: cvt.rpi.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_ceil(half %a) #0 { %r = call half @llvm.ceil.f16(half %a) @@ -1113,7 +1113,7 @@ define half @test_ceil(half %a) #0 { ; CHECK-LABEL: test_trunc( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_trunc_param_0]; ; CHECK: cvt.rzi.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_trunc(half %a) #0 { %r = call half @llvm.trunc.f16(half %a) @@ -1123,7 +1123,7 @@ define half @test_trunc(half %a) #0 { ; CHECK-LABEL: test_rint( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_rint_param_0]; ; CHECK: cvt.rni.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_rint(half %a) #0 { %r = call half @llvm.rint.f16(half %a) @@ -1133,7 +1133,7 @@ define half @test_rint(half %a) #0 { ; CHECK-LABEL: test_nearbyint( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_nearbyint_param_0]; ; CHECK: cvt.rni.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_nearbyint(half %a) #0 { %r = call half @llvm.nearbyint.f16(half %a) @@ -1143,7 +1143,7 @@ define half @test_nearbyint(half %a) #0 { ; CHECK-LABEL: test_roundeven( ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_roundeven_param_0]; ; CHECK: cvt.rni.f16.f16 [[R:%rs[0-9]+]], [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_roundeven(half %a) #0 { %r = call half @llvm.roundeven.f16(half %a) @@ -1155,7 +1155,7 @@ define half @test_roundeven(half %a) #0 { ; check the use of sign mask and 0.5 to implement round ; CHECK: and.b32 [[R:%r[0-9]+]], {{.*}}, -2147483648; ; CHECK: or.b32 {{.*}}, [[R]], 1056964608; -; CHECK: st.param.b16 [func_retval0+0], {{.*}}; +; CHECK: st.param.b16 [func_retval0], {{.*}}; ; CHECK: ret; define half @test_round(half %a) #0 { %r = call half @llvm.round.f16(half %a) @@ -1173,7 +1173,7 @@ define half @test_round(half %a) #0 { ; CHECK-NOF16-DAG: cvt.f32.f16 [[C32:%f[0-9]+]], [[C]] ; CHECK-NOF16-NEXT: fma.rn.f32 [[R32:%f[0-9]+]], [[A32]], [[B32]], [[C32]]; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 [[R:%rs[0-9]+]], [[R32]] -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_fmuladd(half %a, half %b, half %c) #0 { %r = call half @llvm.fmuladd.f16(half %a, half %b, half %c) diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll index b41f63b783d390..b11c69e064c4a6 100644 --- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll @@ -32,7 +32,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ; CHECK-LABEL: test_ret_const( ; CHECK: mov.b32 [[R:%r[0-9+]]], 1073757184; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_ret_const() #0 { ret <2 x half> @@ -41,7 +41,7 @@ define <2 x half> @test_ret_const() #0 { ; CHECK-LABEL: test_extract_0( ; CHECK: ld.param.b32 [[A:%r[0-9]+]], [test_extract_0_param_0]; ; CHECK: mov.b32 {[[R:%rs[0-9]+]], tmp}, [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_extract_0(<2 x half> %a) #0 { %e = extractelement <2 x half> %a, i32 0 @@ -51,7 +51,7 @@ define half @test_extract_0(<2 x half> %a) #0 { ; CHECK-LABEL: test_extract_1( ; CHECK: ld.param.b32 [[A:%r[0-9]+]], [test_extract_1_param_0]; ; CHECK: mov.b32 {tmp, [[R:%rs[0-9]+]]}, [[A]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_extract_1(<2 x half> %a) #0 { %e = extractelement <2 x half> %a, i32 1 @@ -64,7 +64,7 @@ define half @test_extract_1(<2 x half> %a) #0 { ; CHECK-DAG: setp.eq.s64 [[PRED:%p[0-9]+]], [[IDX]], 0; ; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[A]]; ; CHECK: selp.b16 [[R:%rs[0-9]+]], [[E0]], [[E1]], [[PRED]]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK: ret; define half @test_extract_i(<2 x half> %a, i64 %idx) #0 { %e = extractelement <2 x half> %a, i64 %idx @@ -89,7 +89,7 @@ define half @test_extract_i(<2 x half> %a, i64 %idx) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 { %r = fadd <2 x half> %a, %b @@ -112,7 +112,7 @@ define <2 x half> @test_fadd(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 { %r = fadd <2 x half> , %a @@ -134,7 +134,7 @@ define <2 x half> @test_fadd_imm_0(<2 x half> %a) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 { %r = fadd <2 x half> %a, @@ -159,7 +159,7 @@ define <2 x half> @test_fadd_imm_1(<2 x half> %a) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 { %r = fsub <2 x half> %a, %b @@ -182,7 +182,7 @@ define <2 x half> @test_fsub(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fneg(<2 x half> %a) #0 { %r = fsub <2 x half> , %a @@ -206,7 +206,7 @@ define <2 x half> @test_fneg(<2 x half> %a) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 { %r = fmul <2 x half> %a, %b @@ -227,7 +227,7 @@ define <2 x half> @test_fmul(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[FR0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]]; ; CHECK-NEXT: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 { %r = fdiv <2 x half> %a, %b @@ -265,7 +265,7 @@ define <2 x half> @test_fdiv(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; -- merge into f16x2 and return it. ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_frem(<2 x half> %a, <2 x half> %b) #0 { %r = frem <2 x half> %a, %b @@ -333,15 +333,15 @@ declare <2 x half> @test_callee(<2 x half> %a, <2 x half> %b) #0 ; CHECK: { ; CHECK-DAG: .param .align 4 .b8 param0[4]; ; CHECK-DAG: .param .align 4 .b8 param1[4]; -; CHECK-DAG: st.param.b32 [param0+0], [[A]]; -; CHECK-DAG: st.param.b32 [param1+0], [[B]]; +; CHECK-DAG: st.param.b32 [param0], [[A]]; +; CHECK-DAG: st.param.b32 [param1], [[B]]; ; CHECK-DAG: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, ; CHECK: ); -; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_call(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @test_callee(<2 x half> %a, <2 x half> %b) @@ -354,15 +354,15 @@ define <2 x half> @test_call(<2 x half> %a, <2 x half> %b) #0 { ; CHECK: { ; CHECK-DAG: .param .align 4 .b8 param0[4]; ; CHECK-DAG: .param .align 4 .b8 param1[4]; -; CHECK-DAG: st.param.b32 [param0+0], [[B]]; -; CHECK-DAG: st.param.b32 [param1+0], [[A]]; +; CHECK-DAG: st.param.b32 [param0], [[B]]; +; CHECK-DAG: st.param.b32 [param1], [[A]]; ; CHECK-DAG: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, ; CHECK: ); -; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_call_flipped(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a) @@ -375,15 +375,15 @@ define <2 x half> @test_call_flipped(<2 x half> %a, <2 x half> %b) #0 { ; CHECK: { ; CHECK-DAG: .param .align 4 .b8 param0[4]; ; CHECK-DAG: .param .align 4 .b8 param1[4]; -; CHECK-DAG: st.param.b32 [param0+0], [[B]]; -; CHECK-DAG: st.param.b32 [param1+0], [[A]]; +; CHECK-DAG: st.param.b32 [param0], [[B]]; +; CHECK-DAG: st.param.b32 [param1], [[A]]; ; CHECK-DAG: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_callee, ; CHECK: ); -; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; CHECK-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_tailcall_flipped(<2 x half> %a, <2 x half> %b) #0 { %r = tail call <2 x half> @test_callee(<2 x half> %b, <2 x half> %a) @@ -396,7 +396,7 @@ define <2 x half> @test_tailcall_flipped(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2] ; CHECK-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1; ; CHECK-NEXT: selp.b32 [[R:%r[0-9]+]], [[A]], [[B]], [[PRED]]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_select(<2 x half> %a, <2 x half> %b, i1 zeroext %c) #0 { %r = select i1 %c, <2 x half> %a, <2 x half> %b @@ -425,7 +425,7 @@ define <2 x half> @test_select(<2 x half> %a, <2 x half> %b, i1 zeroext %c) #0 { ; CHECK-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) #0 { %cc = fcmp une <2 x half> %c, %d @@ -451,7 +451,7 @@ define <2 x half> @test_select_cc(<2 x half> %a, <2 x half> %b, <2 x half> %c, < ; ; CHECK-DAG: selp.f32 [[R0:%f[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.f32 [[R1:%f[0-9]+]], [[A1]], [[B1]], [[P1]]; -; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {[[R0]], [[R1]]}; ; CHECK-NEXT: ret; define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b, <2 x half> %c, <2 x half> %d) #0 { @@ -472,7 +472,7 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b, ; CHECK-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; CHECK-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; CHECK-NEXT: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b, <2 x float> %c, <2 x float> %d) #0 { @@ -494,7 +494,7 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b, ; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -516,7 +516,7 @@ define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.equ.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.equ.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -538,7 +538,7 @@ define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.gtu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.gtu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -560,7 +560,7 @@ define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.geu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.geu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -582,7 +582,7 @@ define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.ltu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ltu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -604,7 +604,7 @@ define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.leu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.leu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -627,7 +627,7 @@ define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.nan.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.nan.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -649,7 +649,7 @@ define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.ne.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ne.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -671,7 +671,7 @@ define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.eq.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.eq.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -693,7 +693,7 @@ define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.gt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.gt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -715,7 +715,7 @@ define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.ge.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ge.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -737,7 +737,7 @@ define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.lt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.lt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -759,7 +759,7 @@ define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.le.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.le.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -781,7 +781,7 @@ define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: setp.num.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.num.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; -; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; +; CHECK-NEXT: st.param.b8 [func_retval0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; ; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; @@ -795,7 +795,7 @@ define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.rzi.s32.f16 [[R0:%r[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rzi.s32.f16 [[R1:%r[0-9]+]], [[A1]]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]} +; CHECK: st.param.v2.b32 [func_retval0], {[[R0]], [[R1]]} ; CHECK: ret; define <2 x i32> @test_fptosi_i32(<2 x half> %a) #0 { %r = fptosi <2 x half> %a to <2 x i32> @@ -807,7 +807,7 @@ define <2 x i32> @test_fptosi_i32(<2 x half> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.rzi.s64.f16 [[R0:%rd[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rzi.s64.f16 [[R1:%rd[0-9]+]], [[A1]]; -; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]} +; CHECK: st.param.v2.b64 [func_retval0], {[[R0]], [[R1]]} ; CHECK: ret; define <2 x i64> @test_fptosi_i64(<2 x half> %a) #0 { %r = fptosi <2 x half> %a to <2 x i64> @@ -819,7 +819,7 @@ define <2 x i64> @test_fptosi_i64(<2 x half> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.rzi.u32.f16 [[R0:%r[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rzi.u32.f16 [[R1:%r[0-9]+]], [[A1]]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]} +; CHECK: st.param.v2.b32 [func_retval0], {[[R0]], [[R1]]} ; CHECK: ret; define <2 x i32> @test_fptoui_2xi32(<2 x half> %a) #0 { %r = fptoui <2 x half> %a to <2 x i32> @@ -831,7 +831,7 @@ define <2 x i32> @test_fptoui_2xi32(<2 x half> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.rzi.u64.f16 [[R0:%rd[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rzi.u64.f16 [[R1:%rd[0-9]+]], [[A1]]; -; CHECK: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]} +; CHECK: st.param.v2.b64 [func_retval0], {[[R0]], [[R1]]} ; CHECK: ret; define <2 x i64> @test_fptoui_2xi64(<2 x half> %a) #0 { %r = fptoui <2 x half> %a to <2 x i64> @@ -843,7 +843,7 @@ define <2 x i64> @test_fptoui_2xi64(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rn.f16.u32 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.u32 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_uitofp_2xi32(<2 x i32> %a) #0 { %r = uitofp <2 x i32> %a to <2 x half> @@ -855,7 +855,7 @@ define <2 x half> @test_uitofp_2xi32(<2 x i32> %a) #0 { ; CHECK-DAG: cvt.rn.f16.u64 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.u64 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_uitofp_2xi64(<2 x i64> %a) #0 { %r = uitofp <2 x i64> %a to <2 x half> @@ -867,7 +867,7 @@ define <2 x half> @test_uitofp_2xi64(<2 x i64> %a) #0 { ; CHECK-DAG: cvt.rn.f16.s32 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.s32 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_sitofp_2xi32(<2 x i32> %a) #0 { %r = sitofp <2 x i32> %a to <2 x half> @@ -879,7 +879,7 @@ define <2 x half> @test_sitofp_2xi32(<2 x i32> %a) #0 { ; CHECK-DAG: cvt.rn.f16.s64 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.s64 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_sitofp_2xi64(<2 x i64> %a) #0 { %r = sitofp <2 x i64> %a to <2 x half> @@ -906,7 +906,7 @@ define <2 x half> @test_sitofp_2xi64(<2 x i64> %a) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 { %c = uitofp <2 x i32> %a to <2 x half> @@ -934,7 +934,7 @@ define <2 x half> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 { %c = sitofp <2 x i32> %a to <2 x half> @@ -947,7 +947,7 @@ define <2 x half> @test_sitofp_2xi32_fadd(<2 x i32> %a, <2 x half> %b) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 { %r = fptrunc <2 x float> %a to <2 x half> @@ -959,7 +959,7 @@ define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 { ; CHECK-DAG: cvt.rn.f16.f64 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.rn.f16.f64 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 { %r = fptrunc <2 x double> %a to <2 x half> @@ -971,7 +971,7 @@ define <2 x half> @test_fptrunc_2xdouble(<2 x double> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.f32.f16 [[R0:%f[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.f32.f16 [[R1:%f[0-9]+]], [[A1]]; -; CHECK-NEXT: st.param.v2.f32 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.v2.f32 [func_retval0], {[[R0]], [[R1]]}; ; CHECK: ret; define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 { %r = fpext <2 x half> %a to <2 x float> @@ -983,7 +983,7 @@ define <2 x float> @test_fpext_2xfloat(<2 x half> %a) #0 { ; CHECK: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; CHECK-DAG: cvt.f64.f16 [[R0:%fd[0-9]+]], [[A0]]; ; CHECK-DAG: cvt.f64.f16 [[R1:%fd[0-9]+]], [[A1]]; -; CHECK-NEXT: st.param.v2.f64 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.v2.f64 [func_retval0], {[[R0]], [[R1]]}; ; CHECK: ret; define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 { %r = fpext <2 x half> %a to <2 x double> @@ -993,7 +993,7 @@ define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 { ; CHECK-LABEL: test_bitcast_2xhalf_to_2xi16( ; CHECK: ld.param.u32 [[A:%r[0-9]+]], [test_bitcast_2xhalf_to_2xi16_param_0]; -; CHECK: st.param.b32 [func_retval0+0], [[A]] +; CHECK: st.param.b32 [func_retval0], [[A]] ; CHECK: ret; define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 { %r = bitcast <2 x half> %a to <2 x i16> @@ -1002,7 +1002,7 @@ define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 { ; CHECK-LABEL: test_bitcast_2xi16_to_2xhalf( ; CHECK: ld.param.u32 [[R:%r[0-9]+]], [test_bitcast_2xi16_to_2xhalf_param_0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 { %r = bitcast <2 x i16> %a to <2 x half> @@ -1012,7 +1012,7 @@ define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 { ; CHECK-LABEL: test_bitcast_float_to_2xhalf( ; CHECK: ld.param.f32 [[AF1:%f[0-9]+]], [test_bitcast_float_to_2xhalf_param_0]; ; CHECK: mov.b32 [[R:%r[0-9]+]], [[AF1]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_bitcast_float_to_2xhalf(float %a) #0 { %r = bitcast float %a to <2 x half> @@ -1022,7 +1022,7 @@ define <2 x half> @test_bitcast_float_to_2xhalf(float %a) #0 { ; CHECK-LABEL: test_bitcast_2xhalf_to_float( ; CHECK: ld.param.u32 [[R:%r[0-9]+]], [test_bitcast_2xhalf_to_float_param_0]; ; CHECK: mov.b32 [[AF1:%f[0-9]+]], [[R]]; -; CHECK: st.param.f32 [func_retval0+0], [[AF1]]; +; CHECK: st.param.f32 [func_retval0], [[AF1]]; ; CHECK: ret; define float @test_bitcast_2xhalf_to_float(<2 x half> %a) #0 { %r = bitcast <2 x half> %a to float @@ -1063,7 +1063,7 @@ declare <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_sqrt(<2 x half> %a) #0 { %r = call <2 x half> @llvm.sqrt.f16(<2 x half> %a) @@ -1087,7 +1087,7 @@ define <2 x half> @test_sqrt(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_sin(<2 x half> %a) #0 #1 { %r = call <2 x half> @llvm.sin.f16(<2 x half> %a) @@ -1104,7 +1104,7 @@ define <2 x half> @test_sin(<2 x half> %a) #0 #1 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_cos(<2 x half> %a) #0 #1 { %r = call <2 x half> @llvm.cos.f16(<2 x half> %a) @@ -1175,7 +1175,7 @@ define <2 x half> @test_cos(<2 x half> %a) #0 #1 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 { %r = call <2 x half> @llvm.fma.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) @@ -1193,7 +1193,7 @@ define <2 x half> @test_fma(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; CHECK-F16: and.b32 [[R:%r[0-9]+]], [[A]], 2147450879; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_fabs(<2 x half> %a) #0 { %r = call <2 x half> @llvm.fabs.f16(<2 x half> %a) @@ -1214,7 +1214,7 @@ define <2 x half> @test_fabs(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @llvm.minnum.f16(<2 x half> %a, <2 x half> %b) @@ -1235,7 +1235,7 @@ define <2 x half> @test_minnum(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-DAG: cvt.rn.f16.f32 [[R0:%rs[0-9]+]], [[RF0]]; ; CHECK-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[RF1]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @llvm.maxnum.f16(<2 x half> %a, <2 x half> %b) @@ -1257,7 +1257,7 @@ define <2 x half> @test_maxnum(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-F16-DAG: and.b32 [[R0:%r[0-9]+]], [[B]], -2147450880; ; CHECK-F16-DAG: and.b32 [[R1:%r[0-9]+]], [[A]], 2147450879; ; CHECK-F16-DAG: or.b32 [[R:%r[0-9]+]], [[R1]], [[R0]] -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_copysign(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) @@ -1285,7 +1285,7 @@ define <2 x half> @test_copysign(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-F16-DAG: and.b32 [[R3:%r[0-9]+]], [[R2]], -2147450880; ; CHECK-F16-DAG: and.b32 [[R4:%r[0-9]+]], [[A]], 2147450879; ; CHECK-F16-DAG: or.b32 [[R:%r[0-9]+]], [[R4]], [[R3]] -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 { %tb = fptrunc <2 x float> %b to <2 x half> @@ -1316,7 +1316,7 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 { ; CHECK-F16-DAG: and.b32 [[R3:%r[0-9]+]], [[R2]], -2147450880; ; CHECK-F16-DAG: and.b32 [[R4:%r[0-9]+]], [[A]], 2147450879; ; CHECK-F16-DAG: or.b32 [[R:%r[0-9]+]], [[R4]], [[R3]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 { %tb = fptrunc <2 x double> %b to <2 x half> @@ -1343,7 +1343,7 @@ define <2 x half> @test_copysign_f64(<2 x half> %a, <2 x double> %b) #0 { ; CHECK-F16-DAG: mov.b32 {[[R3:%rs[0-9]+]], [[R4:%rs[0-9]+]]}, [[R2]] ; CHECK-F16-DAG: cvt.f32.f16 [[XR0:%f[0-9]+]], [[R3]] ; CHECK-F16-DAG: cvt.f32.f16 [[XR1:%f[0-9]+]], [[R4]] -; CHECK: st.param.v2.f32 [func_retval0+0], {[[XR0]], [[XR1]]}; +; CHECK: st.param.v2.f32 [func_retval0], {[[XR0]], [[XR1]]}; ; CHECK: ret; define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { %r = call <2 x half> @llvm.copysign.f16(<2 x half> %a, <2 x half> %b) @@ -1357,7 +1357,7 @@ define <2 x float> @test_copysign_extended(<2 x half> %a, <2 x half> %b) #0 { ; CHECK-DAG: cvt.rmi.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rmi.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_floor(<2 x half> %a) #0 { %r = call <2 x half> @llvm.floor.f16(<2 x half> %a) @@ -1370,7 +1370,7 @@ define <2 x half> @test_floor(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rpi.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rpi.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_ceil(<2 x half> %a) #0 { %r = call <2 x half> @llvm.ceil.f16(<2 x half> %a) @@ -1383,7 +1383,7 @@ define <2 x half> @test_ceil(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rzi.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rzi.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_trunc(<2 x half> %a) #0 { %r = call <2 x half> @llvm.trunc.f16(<2 x half> %a) @@ -1396,7 +1396,7 @@ define <2 x half> @test_trunc(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rni.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rni.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_rint(<2 x half> %a) #0 { %r = call <2 x half> @llvm.rint.f16(<2 x half> %a) @@ -1409,7 +1409,7 @@ define <2 x half> @test_rint(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rni.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rni.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_nearbyint(<2 x half> %a) #0 { %r = call <2 x half> @llvm.nearbyint.f16(<2 x half> %a) @@ -1422,7 +1422,7 @@ define <2 x half> @test_nearbyint(<2 x half> %a) #0 { ; CHECK-DAG: cvt.rni.f16.f16 [[R1:%rs[0-9]+]], [[A1]]; ; CHECK-DAG: cvt.rni.f16.f16 [[R0:%rs[0-9]+]], [[A0]]; ; CHECK: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_roundeven(<2 x half> %a) #0 { %r = call <2 x half> @llvm.roundeven.f16(<2 x half> %a) @@ -1436,7 +1436,7 @@ define <2 x half> @test_roundeven(<2 x half> %a) #0 { ; CHECK: or.b32 {{.*}}, [[R1]], 1056964608; ; CHECK: and.b32 [[R2:%r[0-9]+]], {{.*}}, -2147483648; ; CHECK: or.b32 {{.*}}, [[R2]], 1056964608; -; CHECK: st.param.b32 [func_retval0+0], {{.*}}; +; CHECK: st.param.b32 [func_retval0], {{.*}}; ; CHECK: ret; define <2 x half> @test_round(<2 x half> %a) #0 { %r = call <2 x half> @llvm.round.f16(<2 x half> %a) @@ -1465,7 +1465,7 @@ define <2 x half> @test_round(<2 x half> %a) #0 { ; CHECK-NOF16-DAG: cvt.rn.f16.f32 [[R1:%rs[0-9]+]], [[FR1]] ; CHECK-NOF16: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} ; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define <2 x half> @test_fmuladd(<2 x half> %a, <2 x half> %b, <2 x half> %c) #0 { %r = call <2 x half> @llvm.fmuladd.f16(<2 x half> %a, <2 x half> %b, <2 x half> %c) diff --git a/llvm/test/CodeGen/NVPTX/i128-param.ll b/llvm/test/CodeGen/NVPTX/i128-param.ll index c2f23124049cae..8ad5ab6a287523 100644 --- a/llvm/test/CodeGen/NVPTX/i128-param.ll +++ b/llvm/test/CodeGen/NVPTX/i128-param.ll @@ -30,9 +30,9 @@ start: ; CHECK: { // callseq [[CALLSEQ_ID:[0-9]]], 0 ; CHECK: .param .align 16 .b8 param0[16]; - ; CHECK-NEXT: st.param.v2.b64 [param0+0], {%[[REG0]], %[[REG1]]} + ; CHECK-NEXT: st.param.v2.b64 [param0], {%[[REG0]], %[[REG1]]} ; CHECK: .param .align 16 .b8 param1[16]; - ; CHECK-NEXT: st.param.v2.b64 [param1+0], {%[[REG2]], %[[REG3]]} + ; CHECK-NEXT: st.param.v2.b64 [param1], {%[[REG2]], %[[REG3]]} ; CHECK: } // callseq [[CALLSEQ_ID]] call void @callee(i128 %0, i128 %1, ptr %2) @@ -49,9 +49,9 @@ start: ; CHECK: { // callseq [[CALLSEQ_ID:[0-9]]], 0 ; CHECK: .param .align 16 .b8 param0[16]; - ; CHECK: st.param.v2.b64 [param0+0], {%[[REG0]], %[[REG1]]} + ; CHECK: st.param.v2.b64 [param0], {%[[REG0]], %[[REG1]]} ; CHECK: .param .align 16 .b8 param1[16]; - ; CHECK: st.param.v2.b64 [param1+0], {%[[REG2]], %[[REG3]]} + ; CHECK: st.param.v2.b64 [param1], {%[[REG2]], %[[REG3]]} ; CHECK: } // callseq [[CALLSEQ_ID]] call void @callee(i128 %0, i128 %1, ptr %2) diff --git a/llvm/test/CodeGen/NVPTX/i128-retval.ll b/llvm/test/CodeGen/NVPTX/i128-retval.ll index df173536c297f8..554c43b52bf021 100644 --- a/llvm/test/CodeGen/NVPTX/i128-retval.ll +++ b/llvm/test/CodeGen/NVPTX/i128-retval.ll @@ -4,7 +4,7 @@ ; CHECK-LABEL: .visible .func (.param .align 16 .b8 func_retval0[16]) callee( define i128 @callee(i128) { ; CHECK: ld.param.v2.u64 {%[[REG0:rd[0-9]+]], %[[REG1:rd[0-9]+]]}, [callee_param_0]; - ; CHECK: st.param.v2.b64 [func_retval0+0], {%[[REG0]], %[[REG1]]} + ; CHECK: st.param.v2.b64 [func_retval0], {%[[REG0]], %[[REG1]]} ret i128 %0 } @@ -17,7 +17,7 @@ start: ; CHECK: { // callseq 0, 0 ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), - ; CHECK: ld.param.v2.b64 {%[[REG2:rd[0-9]+]], %[[REG3:rd[0-9]+]]}, [retval0+0]; + ; CHECK: ld.param.v2.b64 {%[[REG2:rd[0-9]+]], %[[REG3:rd[0-9]+]]}, [retval0]; ; CHECK: } // callseq 0 %a = call i128 @callee(i128 %0) diff --git a/llvm/test/CodeGen/NVPTX/i128-struct.ll b/llvm/test/CodeGen/NVPTX/i128-struct.ll index cecfd4f6ce42ae..d7a00a66bf4486 100644 --- a/llvm/test/CodeGen/NVPTX/i128-struct.ll +++ b/llvm/test/CodeGen/NVPTX/i128-struct.ll @@ -8,7 +8,7 @@ define { i128, i128 } @foo(i64 %a, i32 %b) { %3 = insertvalue { i128, i128 } undef, i128 %1, 0 %4 = insertvalue { i128, i128 } %3, i128 %2, 1 - ; CHECK: st.param.v2.b64 [func_retval0+0], {%[[REG1:rd[0-9]+]], %[[REG2:rd[0-9]+]]}; + ; CHECK: st.param.v2.b64 [func_retval0], {%[[REG1:rd[0-9]+]], %[[REG2:rd[0-9]+]]}; ; CHECK: st.param.v2.b64 [func_retval0+16], {%[[REG3:rd[0-9]+]], %[[REG4:rd[0-9]+]]}; ret { i128, i128 } %4 } diff --git a/llvm/test/CodeGen/NVPTX/i128.ll b/llvm/test/CodeGen/NVPTX/i128.ll index 396c29512933c1..895787d68adfee 100644 --- a/llvm/test/CodeGen/NVPTX/i128.ll +++ b/llvm/test/CodeGen/NVPTX/i128.ll @@ -145,7 +145,7 @@ define i128 @srem_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: xor.b64 %rd112, %rd110, %rd2; ; CHECK-NEXT: sub.cc.s64 %rd113, %rd111, %rd2; ; CHECK-NEXT: subc.cc.s64 %rd114, %rd112, %rd2; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd113, %rd114}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd113, %rd114}; ; CHECK-NEXT: ret; %div = srem i128 %lhs, %rhs ret i128 %div @@ -279,7 +279,7 @@ define i128 @urem_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: mul.lo.s64 %rd98, %rd3, %rd113; ; CHECK-NEXT: sub.cc.s64 %rd99, %rd41, %rd98; ; CHECK-NEXT: subc.cc.s64 %rd100, %rd42, %rd97; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd99, %rd100}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd99, %rd100}; ; CHECK-NEXT: ret; %div = urem i128 %lhs, %rhs ret i128 %div @@ -299,7 +299,7 @@ define i128 @srem_i128_pow2k(i128 %lhs) { ; CHECK-NEXT: and.b64 %rd7, %rd5, -8589934592; ; CHECK-NEXT: sub.cc.s64 %rd8, %rd1, %rd7; ; CHECK-NEXT: subc.cc.s64 %rd9, %rd2, %rd6; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd8, %rd9}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd9}; ; CHECK-NEXT: ret; %div = srem i128 %lhs, 8589934592 ret i128 %div @@ -314,7 +314,7 @@ define i128 @urem_i128_pow2k(i128 %lhs) { ; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [urem_i128_pow2k_param_0]; ; CHECK-NEXT: and.b64 %rd3, %rd1, 8589934591; ; CHECK-NEXT: mov.u64 %rd4, 0; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd3, %rd4}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd4}; ; CHECK-NEXT: ret; %div = urem i128 %lhs, 8589934592 ret i128 %div @@ -456,7 +456,7 @@ define i128 @sdiv_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: xor.b64 %rd105, %rd121, %rd5; ; CHECK-NEXT: sub.cc.s64 %rd106, %rd104, %rd5; ; CHECK-NEXT: subc.cc.s64 %rd107, %rd105, %rd5; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd106, %rd107}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd106, %rd107}; ; CHECK-NEXT: ret; %div = sdiv i128 %lhs, %rhs ret i128 %div @@ -582,7 +582,7 @@ define i128 @udiv_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: or.b64 %rd105, %rd97, %rd92; ; CHECK-NEXT: or.b64 %rd106, %rd94, %rd91; ; CHECK-NEXT: $L__BB5_5: // %udiv-end -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd105, %rd106}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd105, %rd106}; ; CHECK-NEXT: ret; %div = udiv i128 %lhs, %rhs ret i128 %div @@ -603,7 +603,7 @@ define i128 @sdiv_i128_pow2k(i128 %lhs) { ; CHECK-NEXT: shr.u64 %rd8, %rd5, 33; ; CHECK-NEXT: or.b64 %rd9, %rd8, %rd7; ; CHECK-NEXT: shr.s64 %rd10, %rd6, 33; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd9, %rd10}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd9, %rd10}; ; CHECK-NEXT: ret; %div = sdiv i128 %lhs, 8589934592 ret i128 %div @@ -620,7 +620,7 @@ define i128 @udiv_i128_pow2k(i128 %lhs) { ; CHECK-NEXT: shr.u64 %rd4, %rd1, 33; ; CHECK-NEXT: or.b64 %rd5, %rd4, %rd3; ; CHECK-NEXT: shr.u64 %rd6, %rd2, 33; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd5, %rd6}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd5, %rd6}; ; CHECK-NEXT: ret; %div = udiv i128 %lhs, 8589934592 ret i128 %div @@ -636,7 +636,7 @@ define i128 @add_i128(i128 %lhs, i128 %rhs) { ; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [add_i128_param_1]; ; CHECK-NEXT: add.cc.s64 %rd5, %rd1, %rd3; ; CHECK-NEXT: addc.cc.s64 %rd6, %rd2, %rd4; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd5, %rd6}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd5, %rd6}; ; CHECK-NEXT: ret; %result = add i128 %lhs, %rhs ret i128 %result diff --git a/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll index ce9adfc7aa4f19..988438bebea6d0 100644 --- a/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll @@ -21,7 +21,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" ; COMMON-LABEL: test_ret_const( ; COMMON: mov.b32 [[R:%r[0-9+]]], 131073; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_ret_const() #0 { ret <2 x i16> @@ -31,7 +31,7 @@ define <2 x i16> @test_ret_const() #0 { ; COMMON: ld.param.u32 [[A:%r[0-9]+]], [test_extract_0_param_0]; ; COMMON: mov.b32 {[[RS:%rs[0-9]+]], tmp}, [[A]]; ; COMMON: cvt.u32.u16 [[R:%r[0-9]+]], [[RS]]; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define i16 @test_extract_0(<2 x i16> %a) #0 { %e = extractelement <2 x i16> %a, i32 0 @@ -42,7 +42,7 @@ define i16 @test_extract_0(<2 x i16> %a) #0 { ; COMMON: ld.param.u32 [[A:%r[0-9]+]], [test_extract_1_param_0]; ; COMMON: mov.b32 {tmp, [[RS:%rs[0-9]+]]}, [[A]]; ; COMMON: cvt.u32.u16 [[R:%r[0-9]+]], [[RS]]; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define i16 @test_extract_1(<2 x i16> %a) #0 { %e = extractelement <2 x i16> %a, i32 1 @@ -56,7 +56,7 @@ define i16 @test_extract_1(<2 x i16> %a) #0 { ; COMMON-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[A]]; ; COMMON: selp.b16 [[RS:%rs[0-9]+]], [[E0]], [[E1]], [[PRED]]; ; COMMON: cvt.u32.u16 [[R:%r[0-9]+]], [[RS]]; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define i16 @test_extract_i(<2 x i16> %a, i64 %idx) #0 { %e = extractelement <2 x i16> %a, i64 %idx @@ -75,7 +75,7 @@ define i16 @test_extract_i(<2 x i16> %a, i64 %idx) #0 { ; NO-I16x2-DAG: add.s16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_add(<2 x i16> %a, <2 x i16> %b) #0 { %r = add <2 x i16> %a, %b @@ -94,7 +94,7 @@ define <2 x i16> @test_add(<2 x i16> %a, <2 x i16> %b) #0 { ; NO-I16x2-DAG: add.s16 [[RS3:%rs[0-9]+]], [[RS1]], 2; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS2]], [[RS3]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_add_imm_0(<2 x i16> %a) #0 { %r = add <2 x i16> , %a @@ -112,7 +112,7 @@ define <2 x i16> @test_add_imm_0(<2 x i16> %a) #0 { ; NO-I16x2-DAG: add.s16 [[RS3:%rs[0-9]+]], [[RS1]], 2; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS2]], [[RS3]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_add_imm_1(<2 x i16> %a) #0 { %r = add <2 x i16> %a, @@ -130,7 +130,7 @@ define <2 x i16> @test_add_imm_1(<2 x i16> %a) #0 { ; COMMON-DAG: sub.s16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; COMMON-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_sub(<2 x i16> %a, <2 x i16> %b) #0 { %r = sub <2 x i16> %a, %b @@ -149,7 +149,7 @@ define <2 x i16> @test_sub(<2 x i16> %a, <2 x i16> %b) #0 { ; NO-I16x2-DAG: max.s16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_smax(<2 x i16> %a, <2 x i16> %b) #0 { %cmp = icmp sgt <2 x i16> %a, %b @@ -169,7 +169,7 @@ define <2 x i16> @test_smax(<2 x i16> %a, <2 x i16> %b) #0 { ; NO-I16x2-DAG: max.u16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_umax(<2 x i16> %a, <2 x i16> %b) #0 { %cmp = icmp ugt <2 x i16> %a, %b @@ -189,7 +189,7 @@ define <2 x i16> @test_umax(<2 x i16> %a, <2 x i16> %b) #0 { ; NO-I16x2-DAG: min.s16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_smin(<2 x i16> %a, <2 x i16> %b) #0 { %cmp = icmp sle <2 x i16> %a, %b @@ -209,7 +209,7 @@ define <2 x i16> @test_smin(<2 x i16> %a, <2 x i16> %b) #0 { ; NO-I16x2-DAG: min.u16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; NO-I16x2-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_umin(<2 x i16> %a, <2 x i16> %b) #0 { %cmp = icmp ule <2 x i16> %a, %b @@ -227,7 +227,7 @@ define <2 x i16> @test_umin(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: mul.lo.s16 [[RS5:%rs[0-9]+]], [[RS1]], [[RS3]]; ; COMMON-DAG: mov.b32 [[R:%r[0-9]+]], {[[RS4]], [[RS5]]}; ; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_mul(<2 x i16> %a, <2 x i16> %b) #0 { %r = mul <2 x i16> %a, %b @@ -239,7 +239,7 @@ define <2 x i16> @test_mul(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_or_param_0]; ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_or_param_1]; ; COMMON-NEXT: or.b32 [[R:%r[0-9]+]], [[A]], [[B]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_or(<2 x i16> %a, <2 x i16> %b) #0 { %r = or <2 x i16> %a, %b @@ -255,7 +255,7 @@ define <2 x i16> @test_or(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: mov.u16 [[C5:%rs[0-9]+]], 5; ; COMMON-DAG: mov.b32 [[R2:%r[0-9]+]], {[[A]], [[C5]]}; ; COMMON: or.b32 [[R:%r[0-9]+]], [[R2]], [[R1]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; define <2 x i16> @test_or_computed(i16 %a) { %ins.0 = insertelement <2 x i16> zeroinitializer, i16 %a, i32 0 %ins.1 = insertelement <2 x i16> %ins.0, i16 5, i32 1 @@ -267,7 +267,7 @@ define <2 x i16> @test_or_computed(i16 %a) { ; COMMON-LABEL: test_or_imm_0( ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_or_imm_0_param_0]; ; COMMON-NEXT: or.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_or_imm_0(<2 x i16> %a) #0 { %r = or <2 x i16> , %a @@ -277,7 +277,7 @@ define <2 x i16> @test_or_imm_0(<2 x i16> %a) #0 { ; COMMON-LABEL: test_or_imm_1( ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_or_imm_1_param_0]; ; COMMON-NEXT: or.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_or_imm_1(<2 x i16> %a) #0 { %r = or <2 x i16> %a, @@ -288,7 +288,7 @@ define <2 x i16> @test_or_imm_1(<2 x i16> %a) #0 { ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_xor_param_0]; ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_xor_param_1]; ; COMMON-NEXT: xor.b32 [[R:%r[0-9]+]], [[A]], [[B]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_xor(<2 x i16> %a, <2 x i16> %b) #0 { %r = xor <2 x i16> %a, %b @@ -302,7 +302,7 @@ define <2 x i16> @test_xor(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: mov.u16 [[C5:%rs[0-9]+]], 5; ; COMMON-DAG: mov.b32 [[R2:%r[0-9]+]], {[[A]], [[C5]]}; ; COMMON: xor.b32 [[R:%r[0-9]+]], [[R2]], [[R1]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; define <2 x i16> @test_xor_computed(i16 %a) { %ins.0 = insertelement <2 x i16> zeroinitializer, i16 %a, i32 0 %ins.1 = insertelement <2 x i16> %ins.0, i16 5, i32 1 @@ -314,7 +314,7 @@ define <2 x i16> @test_xor_computed(i16 %a) { ; COMMON-LABEL: test_xor_imm_0( ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_xor_imm_0_param_0]; ; COMMON-NEXT: xor.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_xor_imm_0(<2 x i16> %a) #0 { %r = xor <2 x i16> , %a @@ -324,7 +324,7 @@ define <2 x i16> @test_xor_imm_0(<2 x i16> %a) #0 { ; COMMON-LABEL: test_xor_imm_1( ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_xor_imm_1_param_0]; ; COMMON-NEXT: xor.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_xor_imm_1(<2 x i16> %a) #0 { %r = xor <2 x i16> %a, @@ -335,7 +335,7 @@ define <2 x i16> @test_xor_imm_1(<2 x i16> %a) #0 { ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_and_param_0]; ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_and_param_1]; ; COMMON-NEXT: and.b32 [[R:%r[0-9]+]], [[A]], [[B]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_and(<2 x i16> %a, <2 x i16> %b) #0 { %r = and <2 x i16> %a, %b @@ -351,7 +351,7 @@ define <2 x i16> @test_and(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: mov.u16 [[C5:%rs[0-9]+]], 5; ; COMMON-DAG: mov.b32 [[R2:%r[0-9]+]], {[[A]], [[C5]]}; ; COMMON: and.b32 [[R:%r[0-9]+]], [[R2]], [[R1]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; define <2 x i16> @test_and_computed(i16 %a) { %ins.0 = insertelement <2 x i16> zeroinitializer, i16 %a, i32 0 %ins.1 = insertelement <2 x i16> %ins.0, i16 5, i32 1 @@ -363,7 +363,7 @@ define <2 x i16> @test_and_computed(i16 %a) { ; COMMON-LABEL: test_and_imm_0( ; COMMON-DAG: ld.param.u32 [[A:%r[0-9]+]], [test_and_imm_0_param_0]; ; COMMON-NEXT: and.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_and_imm_0(<2 x i16> %a) #0 { %r = and <2 x i16> , %a @@ -373,7 +373,7 @@ define <2 x i16> @test_and_imm_0(<2 x i16> %a) #0 { ; COMMON-LABEL: test_and_imm_1( ; COMMON-DAG: ld.param.u32 [[B:%r[0-9]+]], [test_and_imm_1_param_0]; ; COMMON-NEXT: and.b32 [[R:%r[0-9]+]], [[A]], 131073; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_and_imm_1(<2 x i16> %a) #0 { %r = and <2 x i16> %a, @@ -441,15 +441,15 @@ declare <2 x i16> @test_callee(<2 x i16> %a, <2 x i16> %b) #0 ; COMMON: { ; COMMON-DAG: .param .align 4 .b8 param0[4]; ; COMMON-DAG: .param .align 4 .b8 param1[4]; -; COMMON-DAG: st.param.b32 [param0+0], [[A]]; -; COMMON-DAG: st.param.b32 [param1+0], [[B]]; +; COMMON-DAG: st.param.b32 [param0], [[A]]; +; COMMON-DAG: st.param.b32 [param1], [[B]]; ; COMMON-DAG: .param .align 4 .b8 retval0[4]; ; COMMON: call.uni (retval0), ; COMMON-NEXT: test_callee, ; COMMON: ); -; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; COMMON-NEXT: } -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_call(<2 x i16> %a, <2 x i16> %b) #0 { %r = call <2 x i16> @test_callee(<2 x i16> %a, <2 x i16> %b) @@ -462,15 +462,15 @@ define <2 x i16> @test_call(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON: { ; COMMON-DAG: .param .align 4 .b8 param0[4]; ; COMMON-DAG: .param .align 4 .b8 param1[4]; -; COMMON-DAG: st.param.b32 [param0+0], [[B]]; -; COMMON-DAG: st.param.b32 [param1+0], [[A]]; +; COMMON-DAG: st.param.b32 [param0], [[B]]; +; COMMON-DAG: st.param.b32 [param1], [[A]]; ; COMMON-DAG: .param .align 4 .b8 retval0[4]; ; COMMON: call.uni (retval0), ; COMMON-NEXT: test_callee, ; COMMON: ); -; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; COMMON-NEXT: } -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_call_flipped(<2 x i16> %a, <2 x i16> %b) #0 { %r = call <2 x i16> @test_callee(<2 x i16> %b, <2 x i16> %a) @@ -483,15 +483,15 @@ define <2 x i16> @test_call_flipped(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON: { ; COMMON-DAG: .param .align 4 .b8 param0[4]; ; COMMON-DAG: .param .align 4 .b8 param1[4]; -; COMMON-DAG: st.param.b32 [param0+0], [[B]]; -; COMMON-DAG: st.param.b32 [param1+0], [[A]]; +; COMMON-DAG: st.param.b32 [param0], [[B]]; +; COMMON-DAG: st.param.b32 [param1], [[A]]; ; COMMON-DAG: .param .align 4 .b8 retval0[4]; ; COMMON: call.uni (retval0), ; COMMON-NEXT: test_callee, ; COMMON: ); -; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; +; COMMON-NEXT: ld.param.b32 [[R:%r[0-9]+]], [retval0]; ; COMMON-NEXT: } -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_tailcall_flipped(<2 x i16> %a, <2 x i16> %b) #0 { %r = tail call <2 x i16> @test_callee(<2 x i16> %b, <2 x i16> %a) @@ -504,7 +504,7 @@ define <2 x i16> @test_tailcall_flipped(<2 x i16> %a, <2 x i16> %b) #0 { ; COMMON-DAG: ld.param.u8 [[C:%rs[0-9]+]], [test_select_param_2] ; COMMON-DAG: setp.eq.b16 [[PRED:%p[0-9]+]], %rs{{.*}}, 1; ; COMMON-NEXT: selp.b32 [[R:%r[0-9]+]], [[A]], [[B]], [[PRED]]; -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_select(<2 x i16> %a, <2 x i16> %b, i1 zeroext %c) #0 { %r = select i1 %c, <2 x i16> %a, <2 x i16> %b @@ -525,7 +525,7 @@ define <2 x i16> @test_select(<2 x i16> %a, <2 x i16> %b, i1 zeroext %c) #0 { ; COMMON-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; COMMON-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; COMMON: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_select_cc(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) #0 { %cc = icmp ne <2 x i16> %c, %d @@ -544,7 +544,7 @@ define <2 x i16> @test_select_cc(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x ; COMMON-DAG: setp.ne.s16 [[P1:%p[0-9]+]], [[C1]], [[D1]] ; COMMON-DAG: selp.b32 [[R0:%r[0-9]+]], [[A0]], [[B0]], [[P0]]; ; COMMON-DAG: selp.b32 [[R1:%r[0-9]+]], [[A1]], [[B1]], [[P1]]; -; COMMON-NEXT: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}; +; COMMON-NEXT: st.param.v2.b32 [func_retval0], {[[R0]], [[R1]]}; ; COMMON-NEXT: ret; define <2 x i32> @test_select_cc_i32_i16(<2 x i32> %a, <2 x i32> %b, <2 x i16> %c, <2 x i16> %d) #0 { @@ -565,7 +565,7 @@ define <2 x i32> @test_select_cc_i32_i16(<2 x i32> %a, <2 x i32> %b, ; COMMON-DAG: selp.b16 [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]]; ; COMMON-DAG: selp.b16 [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]]; ; COMMON: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; COMMON-NEXT: st.param.b32 [func_retval0+0], [[R]]; +; COMMON-NEXT: st.param.b32 [func_retval0], [[R]]; ; COMMON-NEXT: ret; define <2 x i16> @test_select_cc_i16_i32(<2 x i16> %a, <2 x i16> %b, <2 x i32> %c, <2 x i32> %d) #0 { @@ -580,7 +580,7 @@ define <2 x i16> @test_select_cc_i16_i32(<2 x i16> %a, <2 x i16> %b, ; COMMON-DAG: cvt.u16.u32 [[R0:%rs[0-9]+]], [[A0]]; ; COMMON-DAG: cvt.u16.u32 [[R1:%rs[0-9]+]], [[A1]]; ; COMMON: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 { %r = trunc <2 x i32> %a to <2 x i16> @@ -592,7 +592,7 @@ define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 { ; COMMON-DAG: cvt.u16.u64 [[R0:%rs[0-9]+]], [[A0]]; ; COMMON-DAG: cvt.u16.u64 [[R1:%rs[0-9]+]], [[A1]]; ; COMMON: mov.b32 [[R:%r[0-9]+]], {[[R0]], [[R1]]} -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define <2 x i16> @test_trunc_2xi64(<2 x i64> %a) #0 { %r = trunc <2 x i64> %a to <2 x i16> @@ -604,7 +604,7 @@ define <2 x i16> @test_trunc_2xi64(<2 x i64> %a) #0 { ; COMMON: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; COMMON-DAG: cvt.u32.u16 [[R0:%r[0-9]+]], [[A0]]; ; COMMON-DAG: cvt.u32.u16 [[R1:%r[0-9]+]], [[A1]]; -; COMMON-NEXT: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]}; +; COMMON-NEXT: st.param.v2.b32 [func_retval0], {[[R0]], [[R1]]}; ; COMMON: ret; define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 { %r = zext <2 x i16> %a to <2 x i32> @@ -616,7 +616,7 @@ define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 { ; COMMON: mov.b32 {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]] ; COMMON-DAG: cvt.u64.u16 [[R0:%rd[0-9]+]], [[A0]]; ; COMMON-DAG: cvt.u64.u16 [[R1:%rd[0-9]+]], [[A1]]; -; COMMON-NEXT: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]}; +; COMMON-NEXT: st.param.v2.b64 [func_retval0], {[[R0]], [[R1]]}; ; COMMON: ret; define <2 x i64> @test_zext_2xi64(<2 x i16> %a) #0 { %r = zext <2 x i16> %a to <2 x i64> @@ -625,7 +625,7 @@ define <2 x i64> @test_zext_2xi64(<2 x i16> %a) #0 { ; COMMON-LABEL: test_bitcast_i32_to_2xi16( ; COMMON: ld.param.u32 [[R:%r[0-9]+]], [test_bitcast_i32_to_2xi16_param_0]; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define <2 x i16> @test_bitcast_i32_to_2xi16(i32 %a) #0 { %r = bitcast i32 %a to <2 x i16> @@ -634,7 +634,7 @@ define <2 x i16> @test_bitcast_i32_to_2xi16(i32 %a) #0 { ; COMMON-LABEL: test_bitcast_2xi16_to_i32( ; COMMON: ld.param.u32 [[R:%r[0-9]+]], [test_bitcast_2xi16_to_i32_param_0]; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define i32 @test_bitcast_2xi16_to_i32(<2 x i16> %a) #0 { %r = bitcast <2 x i16> %a to i32 @@ -645,7 +645,7 @@ define i32 @test_bitcast_2xi16_to_i32(<2 x i16> %a) #0 { ; COMMON: ld.param.u16 [[RS1:%rs[0-9]+]], [test_bitcast_2xi16_to_2xhalf_param_0]; ; COMMON: mov.u16 [[RS2:%rs[0-9]+]], 5; ; COMMON: mov.b32 [[R:%r[0-9]+]], {[[RS1]], [[RS2]]}; -; COMMON: st.param.b32 [func_retval0+0], [[R]]; +; COMMON: st.param.b32 [func_retval0], [[R]]; ; COMMON: ret; define <2 x half> @test_bitcast_2xi16_to_2xhalf(i16 %a) #0 { %ins.0 = insertelement <2 x i16> undef, i16 %a, i32 0 @@ -659,7 +659,7 @@ define <2 x half> @test_bitcast_2xi16_to_2xhalf(i16 %a) #0 { ; COMMON: ld.param.u32 [[R:%r[0-9]+]], [test_shufflevector_param_0]; ; COMMON: mov.b32 {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[R]]; ; COMMON: mov.b32 [[R1:%r[0-9]+]], {[[RS1]], [[RS0]]}; -; COMMON: st.param.b32 [func_retval0+0], [[R1]]; +; COMMON: st.param.b32 [func_retval0], [[R1]]; ; COMMON: ret; define <2 x i16> @test_shufflevector(<2 x i16> %a) #0 { %s = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> @@ -671,7 +671,7 @@ define <2 x i16> @test_shufflevector(<2 x i16> %a) #0 { ; COMMON: ld.param.u32 [[A:%r[0-9]+]], [test_insertelement_param_0]; ; COMMON: { .reg .b16 tmp; mov.b32 {[[R0:%rs[0-9]+]], tmp}, [[A]]; } ; COMMON: mov.b32 [[R1:%r[0-9]+]], {[[R0]], [[B]]}; -; COMMON: st.param.b32 [func_retval0+0], [[R1]]; +; COMMON: st.param.b32 [func_retval0], [[R1]]; ; COMMON: ret; define <2 x i16> @test_insertelement(<2 x i16> %a, i16 %x) #0 { %i = insertelement <2 x i16> %a, i16 %x, i64 1 diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll index 96a4359d0ec43e..5b5662a1eea766 100644 --- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll @@ -18,7 +18,7 @@ define <4 x i8> @test_ret_const() #0 { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, -66911489; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <4 x i8> } @@ -31,7 +31,7 @@ define i8 @test_extract_0(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_extract_0_param_0]; ; CHECK-NEXT: bfe.u32 %r2, %r1, 0, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %e = extractelement <4 x i8> %a, i32 0 ret i8 %e @@ -45,7 +45,7 @@ define i8 @test_extract_1(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_extract_1_param_0]; ; CHECK-NEXT: bfe.u32 %r2, %r1, 8, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %e = extractelement <4 x i8> %a, i32 1 ret i8 %e @@ -59,7 +59,7 @@ define i8 @test_extract_2(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_extract_2_param_0]; ; CHECK-NEXT: bfe.u32 %r2, %r1, 16, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %e = extractelement <4 x i8> %a, i32 2 ret i8 %e @@ -73,7 +73,7 @@ define i8 @test_extract_3(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_extract_3_param_0]; ; CHECK-NEXT: bfe.u32 %r2, %r1, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %e = extractelement <4 x i8> %a, i32 3 ret i8 %e @@ -91,7 +91,7 @@ define i8 @test_extract_i(<4 x i8> %a, i64 %idx) #0 { ; CHECK-NEXT: cvt.u32.u64 %r2, %rd1; ; CHECK-NEXT: shl.b32 %r3, %r2, 3; ; CHECK-NEXT: bfe.u32 %r4, %r1, %r3, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %e = extractelement <4 x i8> %a, i64 %idx ret i8 %e @@ -133,7 +133,7 @@ define <4 x i8> @test_add(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: add.s16 %rs12, %rs11, %rs10; ; CHECK-NEXT: cvt.u32.u16 %r16, %rs12; ; CHECK-NEXT: bfi.b32 %r17, %r16, %r13, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r17; +; CHECK-NEXT: st.param.b32 [func_retval0], %r17; ; CHECK-NEXT: ret; %r = add <4 x i8> %a, %b ret <4 x i8> %r @@ -166,7 +166,7 @@ define <4 x i8> @test_add_imm_0(<4 x i8> %a) #0 { ; CHECK-NEXT: add.s16 %rs8, %rs7, 4; ; CHECK-NEXT: cvt.u32.u16 %r11, %rs8; ; CHECK-NEXT: bfi.b32 %r12, %r11, %r9, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r12; +; CHECK-NEXT: st.param.b32 [func_retval0], %r12; ; CHECK-NEXT: ret; %r = add <4 x i8> , %a ret <4 x i8> %r @@ -199,7 +199,7 @@ define <4 x i8> @test_add_imm_1(<4 x i8> %a) #0 { ; CHECK-NEXT: add.s16 %rs8, %rs7, 4; ; CHECK-NEXT: cvt.u32.u16 %r11, %rs8; ; CHECK-NEXT: bfi.b32 %r12, %r11, %r9, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r12; +; CHECK-NEXT: st.param.b32 [func_retval0], %r12; ; CHECK-NEXT: ret; %r = add <4 x i8> %a, ret <4 x i8> %r @@ -241,7 +241,7 @@ define <4 x i8> @test_sub(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: sub.s16 %rs12, %rs11, %rs10; ; CHECK-NEXT: cvt.u32.u16 %r16, %rs12; ; CHECK-NEXT: bfi.b32 %r17, %r16, %r13, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r17; +; CHECK-NEXT: st.param.b32 [func_retval0], %r17; ; CHECK-NEXT: ret; %r = sub <4 x i8> %a, %b ret <4 x i8> %r @@ -283,7 +283,7 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: bfe.u32 %r23, %r2, 24, 8; ; CHECK-NEXT: selp.b32 %r24, %r11, %r23, %p1; ; CHECK-NEXT: bfi.b32 %r25, %r24, %r22, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r25; +; CHECK-NEXT: st.param.b32 [func_retval0], %r25; ; CHECK-NEXT: ret; %cmp = icmp sgt <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b @@ -318,7 +318,7 @@ define <4 x i8> @test_umax(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: bfi.b32 %r15, %r14, %r13, 16, 8; ; CHECK-NEXT: selp.b32 %r16, %r4, %r3, %p1; ; CHECK-NEXT: bfi.b32 %r17, %r16, %r15, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r17; +; CHECK-NEXT: st.param.b32 [func_retval0], %r17; ; CHECK-NEXT: ret; %cmp = icmp ugt <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b @@ -361,7 +361,7 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: bfe.u32 %r23, %r2, 24, 8; ; CHECK-NEXT: selp.b32 %r24, %r11, %r23, %p1; ; CHECK-NEXT: bfi.b32 %r25, %r24, %r22, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r25; +; CHECK-NEXT: st.param.b32 [func_retval0], %r25; ; CHECK-NEXT: ret; %cmp = icmp sle <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b @@ -396,7 +396,7 @@ define <4 x i8> @test_umin(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: bfi.b32 %r15, %r14, %r13, 16, 8; ; CHECK-NEXT: selp.b32 %r16, %r4, %r3, %p1; ; CHECK-NEXT: bfi.b32 %r17, %r16, %r15, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r17; +; CHECK-NEXT: st.param.b32 [func_retval0], %r17; ; CHECK-NEXT: ret; %cmp = icmp ule <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b @@ -436,7 +436,7 @@ define <4 x i8> @test_eq(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c) #0 { ; CHECK-NEXT: bfe.u32 %r20, %r3, 24, 8; ; CHECK-NEXT: selp.b32 %r21, %r5, %r20, %p1; ; CHECK-NEXT: bfi.b32 %r22, %r21, %r19, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r22; +; CHECK-NEXT: st.param.b32 [func_retval0], %r22; ; CHECK-NEXT: ret; %cmp = icmp eq <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %c @@ -476,7 +476,7 @@ define <4 x i8> @test_ne(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c) #0 { ; CHECK-NEXT: bfe.u32 %r20, %r3, 24, 8; ; CHECK-NEXT: selp.b32 %r21, %r5, %r20, %p1; ; CHECK-NEXT: bfi.b32 %r22, %r21, %r19, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r22; +; CHECK-NEXT: st.param.b32 [func_retval0], %r22; ; CHECK-NEXT: ret; %cmp = icmp ne <4 x i8> %a, %b %r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %c @@ -519,7 +519,7 @@ define <4 x i8> @test_mul(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: mul.lo.s16 %rs12, %rs11, %rs10; ; CHECK-NEXT: cvt.u32.u16 %r16, %rs12; ; CHECK-NEXT: bfi.b32 %r17, %r16, %r13, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r17; +; CHECK-NEXT: st.param.b32 [func_retval0], %r17; ; CHECK-NEXT: ret; %r = mul <4 x i8> %a, %b ret <4 x i8> %r @@ -534,7 +534,7 @@ define <4 x i8> @test_or(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r3, [test_or_param_1]; ; CHECK-NEXT: ld.param.u32 %r4, [test_or_param_0]; ; CHECK-NEXT: or.b32 %r5, %r4, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %r = or <4 x i8> %a, %b ret <4 x i8> %r @@ -554,7 +554,7 @@ define <4 x i8> @test_or_computed(i8 %a) { ; CHECK-NEXT: bfi.b32 %r4, 0, %r3, 24, 8; ; CHECK-NEXT: bfi.b32 %r6, 5, %r4, 8, 8; ; CHECK-NEXT: or.b32 %r8, %r6, %r4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; %ins.0 = insertelement <4 x i8> zeroinitializer, i8 %a, i32 0 %ins.1 = insertelement <4 x i8> %ins.0, i8 5, i32 1 @@ -570,7 +570,7 @@ define <4 x i8> @test_or_imm_0(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_or_imm_0_param_0]; ; CHECK-NEXT: or.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = or <4 x i8> , %a ret <4 x i8> %r @@ -584,7 +584,7 @@ define <4 x i8> @test_or_imm_1(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_or_imm_1_param_0]; ; CHECK-NEXT: or.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = or <4 x i8> %a, ret <4 x i8> %r @@ -599,7 +599,7 @@ define <4 x i8> @test_xor(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r3, [test_xor_param_1]; ; CHECK-NEXT: ld.param.u32 %r4, [test_xor_param_0]; ; CHECK-NEXT: xor.b32 %r5, %r4, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %r = xor <4 x i8> %a, %b ret <4 x i8> %r @@ -619,7 +619,7 @@ define <4 x i8> @test_xor_computed(i8 %a) { ; CHECK-NEXT: bfi.b32 %r4, 0, %r3, 24, 8; ; CHECK-NEXT: bfi.b32 %r6, 5, %r4, 8, 8; ; CHECK-NEXT: xor.b32 %r8, %r6, %r4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; %ins.0 = insertelement <4 x i8> zeroinitializer, i8 %a, i32 0 %ins.1 = insertelement <4 x i8> %ins.0, i8 5, i32 1 @@ -635,7 +635,7 @@ define <4 x i8> @test_xor_imm_0(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_xor_imm_0_param_0]; ; CHECK-NEXT: xor.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = xor <4 x i8> , %a ret <4 x i8> %r @@ -649,7 +649,7 @@ define <4 x i8> @test_xor_imm_1(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_xor_imm_1_param_0]; ; CHECK-NEXT: xor.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = xor <4 x i8> %a, ret <4 x i8> %r @@ -664,7 +664,7 @@ define <4 x i8> @test_and(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r3, [test_and_param_1]; ; CHECK-NEXT: ld.param.u32 %r4, [test_and_param_0]; ; CHECK-NEXT: and.b32 %r5, %r4, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; %r = and <4 x i8> %a, %b ret <4 x i8> %r @@ -684,7 +684,7 @@ define <4 x i8> @test_and_computed(i8 %a) { ; CHECK-NEXT: bfi.b32 %r4, 0, %r3, 24, 8; ; CHECK-NEXT: bfi.b32 %r6, 5, %r4, 8, 8; ; CHECK-NEXT: and.b32 %r8, %r6, %r4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; %ins.0 = insertelement <4 x i8> zeroinitializer, i8 %a, i32 0 %ins.1 = insertelement <4 x i8> %ins.0, i8 5, i32 1 @@ -700,7 +700,7 @@ define <4 x i8> @test_and_imm_0(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_and_imm_0_param_0]; ; CHECK-NEXT: and.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = and <4 x i8> , %a ret <4 x i8> %r @@ -714,7 +714,7 @@ define <4 x i8> @test_and_imm_1(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_and_imm_1_param_0]; ; CHECK-NEXT: and.b32 %r2, %r1, 67305985; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = and <4 x i8> %a, ret <4 x i8> %r @@ -828,9 +828,9 @@ define <4 x i8> @test_call(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r1, [test_call_param_0]; ; CHECK-NEXT: { // callseq 0, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.b32 [param0+0], %r1; +; CHECK-NEXT: st.param.b32 [param0], %r1; ; CHECK-NEXT: .param .align 4 .b8 param1[4]; -; CHECK-NEXT: st.param.b32 [param1+0], %r2; +; CHECK-NEXT: st.param.b32 [param1], %r2; ; CHECK-NEXT: .param .align 4 .b8 retval0[4]; ; CHECK-NEXT: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -838,9 +838,9 @@ define <4 x i8> @test_call(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b32 %r3, [retval0+0]; +; CHECK-NEXT: ld.param.b32 %r3, [retval0]; ; CHECK-NEXT: } // callseq 0 -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %r = call <4 x i8> @test_callee(<4 x i8> %a, <4 x i8> %b) ret <4 x i8> %r @@ -856,9 +856,9 @@ define <4 x i8> @test_call_flipped(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r1, [test_call_flipped_param_0]; ; CHECK-NEXT: { // callseq 1, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.b32 [param0+0], %r2; +; CHECK-NEXT: st.param.b32 [param0], %r2; ; CHECK-NEXT: .param .align 4 .b8 param1[4]; -; CHECK-NEXT: st.param.b32 [param1+0], %r1; +; CHECK-NEXT: st.param.b32 [param1], %r1; ; CHECK-NEXT: .param .align 4 .b8 retval0[4]; ; CHECK-NEXT: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -866,9 +866,9 @@ define <4 x i8> @test_call_flipped(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b32 %r3, [retval0+0]; +; CHECK-NEXT: ld.param.b32 %r3, [retval0]; ; CHECK-NEXT: } // callseq 1 -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %r = call <4 x i8> @test_callee(<4 x i8> %b, <4 x i8> %a) ret <4 x i8> %r @@ -884,9 +884,9 @@ define <4 x i8> @test_tailcall_flipped(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r1, [test_tailcall_flipped_param_0]; ; CHECK-NEXT: { // callseq 2, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.b32 [param0+0], %r2; +; CHECK-NEXT: st.param.b32 [param0], %r2; ; CHECK-NEXT: .param .align 4 .b8 param1[4]; -; CHECK-NEXT: st.param.b32 [param1+0], %r1; +; CHECK-NEXT: st.param.b32 [param1], %r1; ; CHECK-NEXT: .param .align 4 .b8 retval0[4]; ; CHECK-NEXT: call.uni (retval0), ; CHECK-NEXT: test_callee, @@ -894,9 +894,9 @@ define <4 x i8> @test_tailcall_flipped(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: param0, ; CHECK-NEXT: param1 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.b32 %r3, [retval0+0]; +; CHECK-NEXT: ld.param.b32 %r3, [retval0]; ; CHECK-NEXT: } // callseq 2 -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %r = tail call <4 x i8> @test_callee(<4 x i8> %b, <4 x i8> %a) ret <4 x i8> %r @@ -916,7 +916,7 @@ define <4 x i8> @test_select(<4 x i8> %a, <4 x i8> %b, i1 zeroext %c) #0 { ; CHECK-NEXT: ld.param.u32 %r2, [test_select_param_1]; ; CHECK-NEXT: ld.param.u32 %r1, [test_select_param_0]; ; CHECK-NEXT: selp.b32 %r3, %r1, %r2, %p1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %r = select i1 %c, <4 x i8> %a, <4 x i8> %b ret <4 x i8> %r @@ -960,7 +960,7 @@ define <4 x i8> @test_select_cc(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> ; CHECK-NEXT: bfe.u32 %r25, %r1, 24, 8; ; CHECK-NEXT: selp.b32 %r26, %r25, %r24, %p1; ; CHECK-NEXT: bfi.b32 %r27, %r26, %r23, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r27; +; CHECK-NEXT: st.param.b32 [func_retval0], %r27; ; CHECK-NEXT: ret; %cc = icmp ne <4 x i8> %c, %d %r = select <4 x i1> %cc, <4 x i8> %a, <4 x i8> %b @@ -994,7 +994,7 @@ define <4 x i32> @test_select_cc_i32_i8(<4 x i32> %a, <4 x i32> %b, ; CHECK-NEXT: selp.b32 %r20, %r3, %r7, %p3; ; CHECK-NEXT: selp.b32 %r21, %r2, %r6, %p2; ; CHECK-NEXT: selp.b32 %r22, %r1, %r5, %p1; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r22, %r21, %r20, %r19}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r22, %r21, %r20, %r19}; ; CHECK-NEXT: ret; <4 x i8> %c, <4 x i8> %d) #0 { %cc = icmp ne <4 x i8> %c, %d @@ -1032,7 +1032,7 @@ define <4 x i8> @test_select_cc_i8_i32(<4 x i8> %a, <4 x i8> %b, ; CHECK-NEXT: bfe.u32 %r23, %r1, 24, 8; ; CHECK-NEXT: selp.b32 %r24, %r23, %r22, %p1; ; CHECK-NEXT: bfi.b32 %r25, %r24, %r21, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r25; +; CHECK-NEXT: st.param.b32 [func_retval0], %r25; ; CHECK-NEXT: ret; <4 x i32> %c, <4 x i32> %d) #0 { %cc = icmp ne <4 x i32> %c, %d @@ -1051,7 +1051,7 @@ define <4 x i8> @test_trunc_2xi32(<4 x i32> %a) #0 { ; CHECK-NEXT: bfi.b32 %r5, %r2, %r1, 8, 8; ; CHECK-NEXT: bfi.b32 %r6, %r3, %r5, 16, 8; ; CHECK-NEXT: bfi.b32 %r7, %r4, %r6, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r7; +; CHECK-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NEXT: ret; %r = trunc <4 x i32> %a to <4 x i8> ret <4 x i8> %r @@ -1073,7 +1073,7 @@ define <4 x i8> @test_trunc_2xi64(<4 x i64> %a) #0 { ; CHECK-NEXT: bfi.b32 %r5, %r4, %r3, 16, 8; ; CHECK-NEXT: cvt.u32.u64 %r6, %rd4; ; CHECK-NEXT: bfi.b32 %r7, %r6, %r5, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r7; +; CHECK-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NEXT: ret; %r = trunc <4 x i64> %a to <4 x i8> ret <4 x i8> %r @@ -1090,7 +1090,7 @@ define <4 x i32> @test_zext_2xi32(<4 x i8> %a) #0 { ; CHECK-NEXT: bfe.u32 %r3, %r1, 16, 8; ; CHECK-NEXT: bfe.u32 %r4, %r1, 8, 8; ; CHECK-NEXT: bfe.u32 %r5, %r1, 0, 8; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r5, %r4, %r3, %r2}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r5, %r4, %r3, %r2}; ; CHECK-NEXT: ret; %r = zext <4 x i8> %a to <4 x i32> ret <4 x i32> %r @@ -1116,7 +1116,7 @@ define <4 x i64> @test_zext_2xi64(<4 x i8> %a) #0 { ; CHECK-NEXT: bfe.u32 %r5, %r1, 0, 8; ; CHECK-NEXT: cvt.u64.u32 %rd7, %r5; ; CHECK-NEXT: and.b64 %rd8, %rd7, 255; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd8, %rd6}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd8, %rd6}; ; CHECK-NEXT: st.param.v2.b64 [func_retval0+16], {%rd4, %rd2}; ; CHECK-NEXT: ret; %r = zext <4 x i8> %a to <4 x i64> @@ -1130,7 +1130,7 @@ define <4 x i8> @test_bitcast_i32_to_4xi8(i32 %a) #0 { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r1, [test_bitcast_i32_to_4xi8_param_0]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %r = bitcast i32 %a to <4 x i8> ret <4 x i8> %r @@ -1145,7 +1145,7 @@ define <4 x i8> @test_bitcast_float_to_4xi8(float %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [test_bitcast_float_to_4xi8_param_0]; ; CHECK-NEXT: mov.b32 %r1, %f1; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %r = bitcast float %a to <4 x i8> ret <4 x i8> %r @@ -1158,7 +1158,7 @@ define i32 @test_bitcast_4xi8_to_i32(<4 x i8> %a) #0 { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r2, [test_bitcast_4xi8_to_i32_param_0]; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %r = bitcast <4 x i8> %a to i32 ret i32 %r @@ -1173,7 +1173,7 @@ define float @test_bitcast_4xi8_to_float(<4 x i8> %a) #0 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.u32 %r2, [test_bitcast_4xi8_to_float_param_0]; ; CHECK-NEXT: mov.b32 %f1, %r2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f1; +; CHECK-NEXT: st.param.f32 [func_retval0], %f1; ; CHECK-NEXT: ret; %r = bitcast <4 x i8> %a to float ret float %r @@ -1192,7 +1192,7 @@ define <2 x half> @test_bitcast_4xi8_to_2xhalf(i8 %a) #0 { ; CHECK-NEXT: bfi.b32 %r2, 5, %r1, 8, 8; ; CHECK-NEXT: bfi.b32 %r3, 6, %r2, 16, 8; ; CHECK-NEXT: bfi.b32 %r4, 7, %r3, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %ins.0 = insertelement <4 x i8> undef, i8 %a, i32 0 %ins.1 = insertelement <4 x i8> %ins.0, i8 5, i32 1 @@ -1212,7 +1212,7 @@ define <4 x i8> @test_shufflevector(<4 x i8> %a) #0 { ; CHECK-NEXT: ld.param.u32 %r1, [test_shufflevector_param_0]; ; CHECK-NEXT: // implicit-def: %r3 ; CHECK-NEXT: prmt.b32 %r2, %r1, %r3, 291; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; %s = shufflevector <4 x i8> %a, <4 x i8> undef, <4 x i32> ret <4 x i8> %s @@ -1227,7 +1227,7 @@ define <4 x i8> @test_shufflevector_2(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-NEXT: ld.param.u32 %r2, [test_shufflevector_2_param_1]; ; CHECK-NEXT: ld.param.u32 %r1, [test_shufflevector_2_param_0]; ; CHECK-NEXT: prmt.b32 %r3, %r1, %r2, 9527; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %s = shufflevector <4 x i8> %a, <4 x i8> %b, <4 x i32> ret <4 x i8> %s @@ -1245,7 +1245,7 @@ define <4 x i8> @test_insertelement(<4 x i8> %a, i8 %x) #0 { ; CHECK-NEXT: ld.param.u32 %r1, [test_insertelement_param_0]; ; CHECK-NEXT: cvt.u32.u16 %r2, %rs1; ; CHECK-NEXT: bfi.b32 %r3, %r2, %r1, 8, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %i = insertelement <4 x i8> %a, i8 %x, i64 1 ret <4 x i8> %i @@ -1276,7 +1276,7 @@ define <4 x i8> @test_fptosi_4xhalf_to_4xi8(<4 x half> %a) #0 { ; CHECK-NEXT: bfi.b32 %r11, %r10, %r8, 16, 8; ; CHECK-NEXT: cvt.u32.u16 %r12, %rs12; ; CHECK-NEXT: bfi.b32 %r13, %r12, %r11, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r13; +; CHECK-NEXT: st.param.b32 [func_retval0], %r13; ; CHECK-NEXT: ret; %r = fptosi <4 x half> %a to <4 x i8> ret <4 x i8> %r @@ -1307,7 +1307,7 @@ define <4 x i8> @test_fptoui_4xhalf_to_4xi8(<4 x half> %a) #0 { ; CHECK-NEXT: bfi.b32 %r11, %r10, %r8, 16, 8; ; CHECK-NEXT: cvt.u32.u16 %r12, %rs12; ; CHECK-NEXT: bfi.b32 %r13, %r12, %r11, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r13; +; CHECK-NEXT: st.param.b32 [func_retval0], %r13; ; CHECK-NEXT: ret; %r = fptoui <4 x half> %a to <4 x i8> ret <4 x i8> %r diff --git a/llvm/test/CodeGen/NVPTX/indirect_byval.ll b/llvm/test/CodeGen/NVPTX/indirect_byval.ll index ac6c4e262fd60e..1799c86deda76d 100644 --- a/llvm/test/CodeGen/NVPTX/indirect_byval.ll +++ b/llvm/test/CodeGen/NVPTX/indirect_byval.ll @@ -27,9 +27,9 @@ define internal i32 @foo() { ; CHECK-NEXT: add.u64 %rd2, %SP, 0; ; CHECK-NEXT: { // callseq 0, 0 ; CHECK-NEXT: .param .align 1 .b8 param0[1]; -; CHECK-NEXT: st.param.b8 [param0+0], %rs1; +; CHECK-NEXT: st.param.b8 [param0], %rs1; ; CHECK-NEXT: .param .b64 param1; -; CHECK-NEXT: st.param.b64 [param1+0], %rd2; +; CHECK-NEXT: st.param.b64 [param1], %rd2; ; CHECK-NEXT: .param .b32 retval0; ; CHECK-NEXT: prototype_0 : .callprototype (.param .b32 _) _ (.param .align 1 .b8 _[1], .param .b64 _); ; CHECK-NEXT: call (retval0), @@ -39,9 +39,9 @@ define internal i32 @foo() { ; CHECK-NEXT: param1 ; CHECK-NEXT: ) ; CHECK-NEXT: , prototype_0; -; CHECK-NEXT: ld.param.b32 %r1, [retval0+0]; +; CHECK-NEXT: ld.param.b32 %r1, [retval0]; ; CHECK-NEXT: } // callseq 0 -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; entry: %s = alloca %struct.S, align 1 @@ -69,9 +69,9 @@ define internal i32 @bar() { ; CHECK-NEXT: add.u64 %rd3, %SP, 0; ; CHECK-NEXT: { // callseq 1, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.b64 [param0+0], %rd2; +; CHECK-NEXT: st.param.b64 [param0], %rd2; ; CHECK-NEXT: .param .b64 param1; -; CHECK-NEXT: st.param.b64 [param1+0], %rd3; +; CHECK-NEXT: st.param.b64 [param1], %rd3; ; CHECK-NEXT: .param .b32 retval0; ; CHECK-NEXT: prototype_1 : .callprototype (.param .b32 _) _ (.param .align 8 .b8 _[8], .param .b64 _); ; CHECK-NEXT: call (retval0), @@ -81,9 +81,9 @@ define internal i32 @bar() { ; CHECK-NEXT: param1 ; CHECK-NEXT: ) ; CHECK-NEXT: , prototype_1; -; CHECK-NEXT: ld.param.b32 %r1, [retval0+0]; +; CHECK-NEXT: ld.param.b32 %r1, [retval0]; ; CHECK-NEXT: } // callseq 1 -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; entry: %s = alloca %struct.U, align 8 diff --git a/llvm/test/CodeGen/NVPTX/jump-table.ll b/llvm/test/CodeGen/NVPTX/jump-table.ll index b201fb98f3e6bb..dbd4f8a55facfd 100644 --- a/llvm/test/CodeGen/NVPTX/jump-table.ll +++ b/llvm/test/CodeGen/NVPTX/jump-table.ll @@ -101,7 +101,7 @@ define i32 @test2(i32 %tmp158) { ; CHECK-NEXT: brx.idx %r2, $L_brx_0; ; CHECK-NEXT: $L__BB1_7: // %bb339 ; CHECK-NEXT: mov.b32 %r7, 12; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r7; +; CHECK-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_5: // %entry ; CHECK-NEXT: setp.eq.s32 %p3, %r1, 1024; @@ -109,27 +109,27 @@ define i32 @test2(i32 %tmp158) { ; CHECK-NEXT: bra.uni $L__BB1_6; ; CHECK-NEXT: $L__BB1_3: // %bb338 ; CHECK-NEXT: mov.b32 %r8, 11; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r8; +; CHECK-NEXT: st.param.b32 [func_retval0], %r8; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_10: // %bb342 ; CHECK-NEXT: mov.b32 %r4, 15; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_6: // %bb336 ; CHECK-NEXT: mov.b32 %r9, 10; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r9; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_8: // %bb340 ; CHECK-NEXT: mov.b32 %r6, 13; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_9: // %bb341 ; CHECK-NEXT: mov.b32 %r5, 14; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r5; +; CHECK-NEXT: st.param.b32 [func_retval0], %r5; ; CHECK-NEXT: ret; ; CHECK-NEXT: $L__BB1_11: // %bb343 ; CHECK-NEXT: mov.b32 %r3, 18; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; entry: switch i32 %tmp158, label %bb336 [ diff --git a/llvm/test/CodeGen/NVPTX/ldparam-v4.ll b/llvm/test/CodeGen/NVPTX/ldparam-v4.ll index dc20441a67a8bf..47f65ecbcfa6d5 100644 --- a/llvm/test/CodeGen/NVPTX/ldparam-v4.ll +++ b/llvm/test/CodeGen/NVPTX/ldparam-v4.ll @@ -6,7 +6,7 @@ declare <4 x float> @bar() ; CHECK-LABEL: .func foo( define void @foo(ptr %ptr) { ; CHECK: ld.param.u64 %[[PTR:rd[0-9]+]], [foo_param_0]; -; CHECK: ld.param.v4.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]], [[E2:%f[0-9]+]], [[E3:%f[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v4.f32 {[[E0:%f[0-9]+]], [[E1:%f[0-9]+]], [[E2:%f[0-9]+]], [[E3:%f[0-9]+]]}, [retval0]; ; CHECK: st.v4.f32 [%[[PTR]]], {[[E0]], [[E1]], [[E2]], [[E3]]} %val = tail call <4 x float> @bar() store <4 x float> %val, ptr %ptr diff --git a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll index d702ede61addf4..cac49b49970b73 100644 --- a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll +++ b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll @@ -8,11 +8,11 @@ ; PTX32: mov.u32 %SPL, __local_depot{{[0-9]+}}; ; PTX32: cvta.local.u32 %SP, %SPL; ; PTX32: ld.param.u32 %r{{[0-9]+}}, [foo_param_0]; -; PTX32: st.volatile.u32 [%SP+0], %r{{[0-9]+}}; +; PTX32: st.volatile.u32 [%SP], %r{{[0-9]+}}; ; PTX64: mov.u64 %SPL, __local_depot{{[0-9]+}}; ; PTX64: cvta.local.u64 %SP, %SPL; ; PTX64: ld.param.u32 %r{{[0-9]+}}, [foo_param_0]; -; PTX64: st.volatile.u32 [%SP+0], %r{{[0-9]+}}; +; PTX64: st.volatile.u32 [%SP], %r{{[0-9]+}}; define void @foo(i32 %a) { %local = alloca i32, align 4 store volatile i32 %a, ptr %local diff --git a/llvm/test/CodeGen/NVPTX/lower-alloca.ll b/llvm/test/CodeGen/NVPTX/lower-alloca.ll index 400184aaefb211..e09fb938ef0864 100644 --- a/llvm/test/CodeGen/NVPTX/lower-alloca.ll +++ b/llvm/test/CodeGen/NVPTX/lower-alloca.ll @@ -26,7 +26,7 @@ define void @alloca_in_explicit_local_as() { ; PTX-LABEL: .visible .func alloca_in_explicit_local_as( %A = alloca i32, addrspace(5) ; CHECK: store i32 0, ptr addrspace(5) {{%.+}} -; PTX: st.local.u32 [%SP+0], {{%r[0-9]+}} +; PTX: st.local.u32 [%SP], {{%r[0-9]+}} ; LOWERALLOCAONLY: [[V1:%.*]] = addrspacecast ptr addrspace(5) %A to ptr ; LOWERALLOCAONLY: store i32 0, ptr [[V1]], align 4 store i32 0, ptr addrspace(5) %A diff --git a/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll b/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll index 33fa3afc94b89d..9cfe9192772b89 100644 --- a/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll +++ b/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll @@ -43,7 +43,7 @@ define dso_local noundef i32 @non_kernel_function(ptr nocapture noundef readonly ; PTX-NEXT: ld.param.u64 %rd4, [non_kernel_function_param_0+8]; ; PTX-NEXT: st.u64 [%rd3], %rd4; ; PTX-NEXT: ld.param.u64 %rd5, [non_kernel_function_param_0]; -; PTX-NEXT: st.u64 [%SP+0], %rd5; +; PTX-NEXT: st.u64 [%SP], %rd5; ; PTX-NEXT: mov.u64 %rd6, gi; ; PTX-NEXT: cvta.global.u64 %rd7, %rd6; ; PTX-NEXT: selp.b64 %rd8, %rd2, %rd7, %p1; @@ -58,7 +58,7 @@ define dso_local noundef i32 @non_kernel_function(ptr nocapture noundef readonly ; PTX-NEXT: shl.b32 %r8, %r7, 24; ; PTX-NEXT: or.b32 %r9, %r8, %r6; ; PTX-NEXT: or.b32 %r10, %r9, %r4; -; PTX-NEXT: st.param.b32 [func_retval0+0], %r10; +; PTX-NEXT: st.param.b32 [func_retval0], %r10; ; PTX-NEXT: ret; entry: %a. = select i1 %b, ptr %a, ptr addrspacecast (ptr addrspace(1) @gi to ptr), !dbg !17 @@ -147,7 +147,7 @@ define void @grid_const_escape(ptr byval(%struct.s) align 4 %input) { ; PTX-NEXT: mov.u64 %rd1, escape; ; PTX-NEXT: { // callseq 0, 0 ; PTX-NEXT: .param .b64 param0; -; PTX-NEXT: st.param.b64 [param0+0], %rd4; +; PTX-NEXT: st.param.b64 [param0], %rd4; ; PTX-NEXT: .param .b32 retval0; ; PTX-NEXT: prototype_0 : .callprototype (.param .b32 _) _ (.param .b64 _); ; PTX-NEXT: call (retval0), @@ -156,7 +156,7 @@ define void @grid_const_escape(ptr byval(%struct.s) align 4 %input) { ; PTX-NEXT: param0 ; PTX-NEXT: ) ; PTX-NEXT: , prototype_0; -; PTX-NEXT: ld.param.b32 %r1, [retval0+0]; +; PTX-NEXT: ld.param.b32 %r1, [retval0]; ; PTX-NEXT: } // callseq 0 ; PTX-NEXT: ret; ; OPT-LABEL: define void @grid_const_escape( @@ -194,11 +194,11 @@ define void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32 ; PTX-NEXT: mov.u64 %rd1, escape3; ; PTX-NEXT: { // callseq 1, 0 ; PTX-NEXT: .param .b64 param0; -; PTX-NEXT: st.param.b64 [param0+0], %rd7; +; PTX-NEXT: st.param.b64 [param0], %rd7; ; PTX-NEXT: .param .b64 param1; -; PTX-NEXT: st.param.b64 [param1+0], %rd8; +; PTX-NEXT: st.param.b64 [param1], %rd8; ; PTX-NEXT: .param .b64 param2; -; PTX-NEXT: st.param.b64 [param2+0], %rd5; +; PTX-NEXT: st.param.b64 [param2], %rd5; ; PTX-NEXT: .param .b32 retval0; ; PTX-NEXT: prototype_1 : .callprototype (.param .b32 _) _ (.param .b64 _, .param .b64 _, .param .b64 _); ; PTX-NEXT: call (retval0), @@ -209,7 +209,7 @@ define void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32 ; PTX-NEXT: param2 ; PTX-NEXT: ) ; PTX-NEXT: , prototype_1; -; PTX-NEXT: ld.param.b32 %r2, [retval0+0]; +; PTX-NEXT: ld.param.b32 %r2, [retval0]; ; PTX-NEXT: } // callseq 1 ; PTX-NEXT: ret; ; OPT-LABEL: define void @multiple_grid_const_escape( @@ -307,7 +307,7 @@ define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) { ; PTX-NEXT: mov.u64 %rd1, escape; ; PTX-NEXT: { // callseq 2, 0 ; PTX-NEXT: .param .b64 param0; -; PTX-NEXT: st.param.b64 [param0+0], %rd6; +; PTX-NEXT: st.param.b64 [param0], %rd6; ; PTX-NEXT: .param .b32 retval0; ; PTX-NEXT: prototype_2 : .callprototype (.param .b32 _) _ (.param .b64 _); ; PTX-NEXT: call (retval0), @@ -316,7 +316,7 @@ define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) { ; PTX-NEXT: param0 ; PTX-NEXT: ) ; PTX-NEXT: , prototype_2; -; PTX-NEXT: ld.param.b32 %r3, [retval0+0]; +; PTX-NEXT: ld.param.b32 %r3, [retval0]; ; PTX-NEXT: } // callseq 2 ; PTX-NEXT: ret; ; OPT-LABEL: define void @grid_const_partial_escape( @@ -356,7 +356,7 @@ define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %outpu ; PTX-NEXT: mov.u64 %rd1, escape; ; PTX-NEXT: { // callseq 3, 0 ; PTX-NEXT: .param .b64 param0; -; PTX-NEXT: st.param.b64 [param0+0], %rd6; +; PTX-NEXT: st.param.b64 [param0], %rd6; ; PTX-NEXT: .param .b32 retval0; ; PTX-NEXT: prototype_3 : .callprototype (.param .b32 _) _ (.param .b64 _); ; PTX-NEXT: call (retval0), @@ -365,9 +365,9 @@ define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %outpu ; PTX-NEXT: param0 ; PTX-NEXT: ) ; PTX-NEXT: , prototype_3; -; PTX-NEXT: ld.param.b32 %r4, [retval0+0]; +; PTX-NEXT: ld.param.b32 %r4, [retval0]; ; PTX-NEXT: } // callseq 3 -; PTX-NEXT: st.param.b32 [func_retval0+0], %r3; +; PTX-NEXT: st.param.b32 [func_retval0], %r3; ; PTX-NEXT: ret; ; OPT-LABEL: define i32 @grid_const_partial_escapemem( ; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) [[INPUT:%.*]], ptr [[OUTPUT:%.*]]) #[[ATTR0]] { @@ -574,7 +574,7 @@ define i32 @grid_const_ptrtoint(ptr byval(i32) %input) { ; PTX-NEXT: cvta.param.u64 %rd3, %rd2; ; PTX-NEXT: cvt.u32.u64 %r2, %rd3; ; PTX-NEXT: add.s32 %r3, %r1, %r2; -; PTX-NEXT: st.param.b32 [func_retval0+0], %r3; +; PTX-NEXT: st.param.b32 [func_retval0], %r3; ; PTX-NEXT: ret; ; OPT-LABEL: define i32 @grid_const_ptrtoint( ; OPT-SAME: ptr byval(i32) align 4 [[INPUT:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/CodeGen/NVPTX/lower-args.ll b/llvm/test/CodeGen/NVPTX/lower-args.ll index d1bec032ec3a98..eba4f273fa709d 100644 --- a/llvm/test/CodeGen/NVPTX/lower-args.ll +++ b/llvm/test/CodeGen/NVPTX/lower-args.ll @@ -46,18 +46,18 @@ define void @load_padding(ptr nocapture readonly byval(%class.padded) %arg) { ; PTX-NEXT: mov.u64 %SPL, __local_depot1; ; PTX-NEXT: cvta.local.u64 %SP, %SPL; ; PTX-NEXT: ld.param.u64 %rd1, [load_padding_param_0]; -; PTX-NEXT: st.u64 [%SP+0], %rd1; +; PTX-NEXT: st.u64 [%SP], %rd1; ; PTX-NEXT: add.u64 %rd2, %SP, 0; ; PTX-NEXT: { // callseq 1, 0 ; PTX-NEXT: .param .b64 param0; -; PTX-NEXT: st.param.b64 [param0+0], %rd2; +; PTX-NEXT: st.param.b64 [param0], %rd2; ; PTX-NEXT: .param .b64 retval0; ; PTX-NEXT: call.uni (retval0), ; PTX-NEXT: escape, ; PTX-NEXT: ( ; PTX-NEXT: param0 ; PTX-NEXT: ); -; PTX-NEXT: ld.param.b64 %rd3, [retval0+0]; +; PTX-NEXT: ld.param.b64 %rd3, [retval0]; ; PTX-NEXT: } // callseq 1 ; PTX-NEXT: ret; %tmp = call ptr @escape(ptr nonnull align 16 %arg) diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll index bdd6c914384601..5161e5d029777e 100644 --- a/llvm/test/CodeGen/NVPTX/math-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll @@ -55,7 +55,7 @@ define float @ceil_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [ceil_float_param_0]; ; CHECK-NEXT: cvt.rpi.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.ceil.f32(float %a) ret float %b @@ -69,7 +69,7 @@ define float @ceil_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [ceil_float_ftz_param_0]; ; CHECK-NEXT: cvt.rpi.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.ceil.f32(float %a) ret float %b @@ -83,7 +83,7 @@ define double @ceil_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [ceil_double_param_0]; ; CHECK-NEXT: cvt.rpi.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.ceil.f64(double %a) ret double %b @@ -99,7 +99,7 @@ define float @floor_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [floor_float_param_0]; ; CHECK-NEXT: cvt.rmi.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.floor.f32(float %a) ret float %b @@ -113,7 +113,7 @@ define float @floor_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [floor_float_ftz_param_0]; ; CHECK-NEXT: cvt.rmi.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.floor.f32(float %a) ret float %b @@ -127,7 +127,7 @@ define double @floor_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [floor_double_param_0]; ; CHECK-NEXT: cvt.rmi.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.floor.f64(double %a) ret double %b @@ -157,7 +157,7 @@ define float @round_float(float %a) { ; CHECK-NEXT: cvt.rzi.f32.f32 %f7, %f1; ; CHECK-NEXT: setp.lt.f32 %p2, %f5, 0f3F000000; ; CHECK-NEXT: selp.f32 %f8, %f7, %f6, %p2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f8; +; CHECK-NEXT: st.param.f32 [func_retval0], %f8; ; CHECK-NEXT: ret; %b = call float @llvm.round.f32(float %a) ret float %b @@ -185,7 +185,7 @@ define float @round_float_ftz(float %a) #1 { ; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f7, %f1; ; CHECK-NEXT: setp.lt.ftz.f32 %p2, %f5, 0f3F000000; ; CHECK-NEXT: selp.f32 %f8, %f7, %f6, %p2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f8; +; CHECK-NEXT: st.param.f32 [func_retval0], %f8; ; CHECK-NEXT: ret; %b = call float @llvm.round.f32(float %a) ret float %b @@ -208,7 +208,7 @@ define double @round_double(double %a) { ; CHECK-NEXT: copysign.f64 %fd6, %fd1, %fd5; ; CHECK-NEXT: setp.gt.f64 %p2, %fd2, 0d4330000000000000; ; CHECK-NEXT: selp.f64 %fd7, %fd1, %fd6, %p2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd7; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd7; ; CHECK-NEXT: ret; %b = call double @llvm.round.f64(double %a) ret double %b @@ -224,7 +224,7 @@ define float @nearbyint_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [nearbyint_float_param_0]; ; CHECK-NEXT: cvt.rni.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.nearbyint.f32(float %a) ret float %b @@ -238,7 +238,7 @@ define float @nearbyint_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [nearbyint_float_ftz_param_0]; ; CHECK-NEXT: cvt.rni.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.nearbyint.f32(float %a) ret float %b @@ -252,7 +252,7 @@ define double @nearbyint_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [nearbyint_double_param_0]; ; CHECK-NEXT: cvt.rni.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.nearbyint.f64(double %a) ret double %b @@ -268,7 +268,7 @@ define float @rint_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [rint_float_param_0]; ; CHECK-NEXT: cvt.rni.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.rint.f32(float %a) ret float %b @@ -282,7 +282,7 @@ define float @rint_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [rint_float_ftz_param_0]; ; CHECK-NEXT: cvt.rni.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.rint.f32(float %a) ret float %b @@ -296,7 +296,7 @@ define double @rint_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [rint_double_param_0]; ; CHECK-NEXT: cvt.rni.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.rint.f64(double %a) ret double %b @@ -312,7 +312,7 @@ define float @roundeven_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [roundeven_float_param_0]; ; CHECK-NEXT: cvt.rni.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.roundeven.f32(float %a) ret float %b @@ -326,7 +326,7 @@ define float @roundeven_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [roundeven_float_ftz_param_0]; ; CHECK-NEXT: cvt.rni.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.roundeven.f32(float %a) ret float %b @@ -340,7 +340,7 @@ define double @roundeven_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [roundeven_double_param_0]; ; CHECK-NEXT: cvt.rni.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.roundeven.f64(double %a) ret double %b @@ -356,7 +356,7 @@ define float @trunc_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [trunc_float_param_0]; ; CHECK-NEXT: cvt.rzi.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.trunc.f32(float %a) ret float %b @@ -370,7 +370,7 @@ define float @trunc_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [trunc_float_ftz_param_0]; ; CHECK-NEXT: cvt.rzi.ftz.f32.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.trunc.f32(float %a) ret float %b @@ -384,7 +384,7 @@ define double @trunc_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [trunc_double_param_0]; ; CHECK-NEXT: cvt.rzi.f64.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.trunc.f64(double %a) ret double %b @@ -400,7 +400,7 @@ define float @abs_float(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [abs_float_param_0]; ; CHECK-NEXT: abs.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.fabs.f32(float %a) ret float %b @@ -414,7 +414,7 @@ define float @abs_float_ftz(float %a) #1 { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [abs_float_ftz_param_0]; ; CHECK-NEXT: abs.ftz.f32 %f2, %f1; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %b = call float @llvm.fabs.f32(float %a) ret float %b @@ -428,7 +428,7 @@ define double @abs_double(double %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [abs_double_param_0]; ; CHECK-NEXT: abs.f64 %fd2, %fd1; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd2; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd2; ; CHECK-NEXT: ret; %b = call double @llvm.fabs.f64(double %a) ret double %b @@ -449,7 +449,7 @@ define half @minnum_half(half %a, half %b) { ; CHECK-NOF16-NEXT: cvt.f32.f16 %f2, %rs1; ; CHECK-NOF16-NEXT: min.f32 %f3, %f2, %f1; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs3, %f3; -; CHECK-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-NOF16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minnum_half( @@ -460,7 +460,7 @@ define half @minnum_half(half %a, half %b) { ; CHECK-F16-NEXT: ld.param.b16 %rs1, [minnum_half_param_0]; ; CHECK-F16-NEXT: ld.param.b16 %rs2, [minnum_half_param_1]; ; CHECK-F16-NEXT: min.f16 %rs3, %rs1, %rs2; -; CHECK-F16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-F16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minnum_half( @@ -475,7 +475,7 @@ define half @minnum_half(half %a, half %b) { ; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %f2, %rs1; ; CHECK-SM80-NOF16-NEXT: min.f32 %f3, %f2, %f1; ; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs3, %f3; -; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call half @llvm.minnum.f16(half %a, half %b) ret half %x @@ -490,7 +490,7 @@ define float @minnum_float(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f1, [minnum_float_param_0]; ; CHECK-NEXT: ld.param.f32 %f2, [minnum_float_param_1]; ; CHECK-NEXT: min.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %x = call float @llvm.minnum.f32(float %a, float %b) ret float %x @@ -504,7 +504,7 @@ define float @minnum_imm1(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [minnum_imm1_param_0]; ; CHECK-NEXT: min.f32 %f2, %f1, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %x = call float @llvm.minnum.f32(float %a, float 0.0) ret float %x @@ -518,7 +518,7 @@ define float @minnum_imm2(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [minnum_imm2_param_0]; ; CHECK-NEXT: min.f32 %f2, %f1, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %x = call float @llvm.minnum.f32(float 0.0, float %a) ret float %x @@ -533,7 +533,7 @@ define float @minnum_float_ftz(float %a, float %b) #1 { ; CHECK-NEXT: ld.param.f32 %f1, [minnum_float_ftz_param_0]; ; CHECK-NEXT: ld.param.f32 %f2, [minnum_float_ftz_param_1]; ; CHECK-NEXT: min.ftz.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %x = call float @llvm.minnum.f32(float %a, float %b) ret float %x @@ -548,7 +548,7 @@ define double @minnum_double(double %a, double %b) { ; CHECK-NEXT: ld.param.f64 %fd1, [minnum_double_param_0]; ; CHECK-NEXT: ld.param.f64 %fd2, [minnum_double_param_1]; ; CHECK-NEXT: min.f64 %fd3, %fd1, %fd2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %x = call double @llvm.minnum.f64(double %a, double %b) ret double %x @@ -575,7 +575,7 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-NEXT: min.f32 %f6, %f5, %f4; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %f6; ; CHECK-NOF16-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; CHECK-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minnum_v2half( @@ -586,7 +586,7 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-F16-NEXT: ld.param.b32 %r1, [minnum_v2half_param_1]; ; CHECK-F16-NEXT: ld.param.b32 %r2, [minnum_v2half_param_0]; ; CHECK-F16-NEXT: min.f16x2 %r3, %r2, %r1; -; CHECK-F16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-F16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minnum_v2half( @@ -609,7 +609,7 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-SM80-NOF16-NEXT: min.f32 %f6, %f5, %f4; ; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %f6; ; CHECK-SM80-NOF16-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -640,7 +640,7 @@ define half @minimum_half(half %a, half %b) { ; CHECK-NOF16-NEXT: cvt.f32.f16 %f3, %rs5; ; CHECK-NOF16-NEXT: setp.eq.f32 %p5, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs8, %rs5, %p5; -; CHECK-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs9; +; CHECK-NOF16-NEXT: st.param.b16 [func_retval0], %rs9; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_half( @@ -651,7 +651,7 @@ define half @minimum_half(half %a, half %b) { ; CHECK-F16-NEXT: ld.param.b16 %rs1, [minimum_half_param_0]; ; CHECK-F16-NEXT: ld.param.b16 %rs2, [minimum_half_param_1]; ; CHECK-F16-NEXT: min.NaN.f16 %rs3, %rs1, %rs2; -; CHECK-F16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-F16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_half( @@ -676,7 +676,7 @@ define half @minimum_half(half %a, half %b) { ; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %f3, %rs5; ; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p5, %f3, 0f00000000; ; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs8, %rs5, %p5; -; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs9; +; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0], %rs9; ; CHECK-SM80-NOF16-NEXT: ret; %x = call half @llvm.minimum.f16(half %a, half %b) ret half %x @@ -703,7 +703,7 @@ define float @minimum_float(float %a, float %b) { ; CHECK-NOF16-NEXT: selp.f32 %f6, %f2, %f5, %p3; ; CHECK-NOF16-NEXT: setp.eq.f32 %p4, %f4, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f7, %f6, %f4, %p4; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f7; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_float( @@ -714,7 +714,7 @@ define float @minimum_float(float %a, float %b) { ; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_float_param_0]; ; CHECK-F16-NEXT: ld.param.f32 %f2, [minimum_float_param_1]; ; CHECK-F16-NEXT: min.NaN.f32 %f3, %f1, %f2; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_float( @@ -725,7 +725,7 @@ define float @minimum_float(float %a, float %b) { ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_param_0]; ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f2, [minimum_float_param_1]; ; CHECK-SM80-NOF16-NEXT: min.NaN.f32 %f3, %f1, %f2; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.minimum.f32(float %a, float %b) ret float %x @@ -748,7 +748,7 @@ define float @minimum_imm1(float %a) { ; CHECK-NOF16-NEXT: selp.f32 %f4, %f1, %f3, %p2; ; CHECK-NOF16-NEXT: setp.eq.f32 %p3, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f5, %f4, %f3, %p3; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f5; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f5; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_imm1( @@ -758,7 +758,7 @@ define float @minimum_imm1(float %a) { ; CHECK-F16-NEXT: // %bb.0: ; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_imm1_param_0]; ; CHECK-F16-NEXT: min.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_imm1( @@ -768,7 +768,7 @@ define float @minimum_imm1(float %a) { ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm1_param_0]; ; CHECK-SM80-NOF16-NEXT: min.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.minimum.f32(float %a, float 0.0) ret float %x @@ -791,7 +791,7 @@ define float @minimum_imm2(float %a) { ; CHECK-NOF16-NEXT: selp.f32 %f4, %f1, %f3, %p2; ; CHECK-NOF16-NEXT: setp.eq.f32 %p3, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f5, %f4, %f3, %p3; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f5; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f5; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_imm2( @@ -801,7 +801,7 @@ define float @minimum_imm2(float %a) { ; CHECK-F16-NEXT: // %bb.0: ; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_imm2_param_0]; ; CHECK-F16-NEXT: min.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_imm2( @@ -811,7 +811,7 @@ define float @minimum_imm2(float %a) { ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_imm2_param_0]; ; CHECK-SM80-NOF16-NEXT: min.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.minimum.f32(float 0.0, float %a) ret float %x @@ -838,7 +838,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 { ; CHECK-NOF16-NEXT: selp.f32 %f6, %f2, %f5, %p3; ; CHECK-NOF16-NEXT: setp.eq.ftz.f32 %p4, %f4, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f7, %f6, %f4, %p4; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f7; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_float_ftz( @@ -849,7 +849,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 { ; CHECK-F16-NEXT: ld.param.f32 %f1, [minimum_float_ftz_param_0]; ; CHECK-F16-NEXT: ld.param.f32 %f2, [minimum_float_ftz_param_1]; ; CHECK-F16-NEXT: min.NaN.ftz.f32 %f3, %f1, %f2; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_float_ftz( @@ -860,7 +860,7 @@ define float @minimum_float_ftz(float %a, float %b) #1 { ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [minimum_float_ftz_param_0]; ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f2, [minimum_float_ftz_param_1]; ; CHECK-SM80-NOF16-NEXT: min.NaN.ftz.f32 %f3, %f1, %f2; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.minimum.f32(float %a, float %b) ret float %x @@ -887,7 +887,7 @@ define double @minimum_double(double %a, double %b) { ; CHECK-NEXT: selp.f64 %fd6, %fd2, %fd5, %p3; ; CHECK-NEXT: setp.eq.f64 %p4, %fd4, 0d0000000000000000; ; CHECK-NEXT: selp.f64 %fd7, %fd6, %fd4, %p4; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd7; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd7; ; CHECK-NEXT: ret; %x = call double @llvm.minimum.f64(double %a, double %b) ret double %x @@ -933,7 +933,7 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-NEXT: setp.eq.f32 %p10, %f6, 0f00000000; ; CHECK-NOF16-NEXT: selp.b16 %rs18, %rs17, %rs13, %p10; ; CHECK-NOF16-NEXT: mov.b32 %r3, {%rs18, %rs11}; -; CHECK-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimum_v2half( @@ -944,7 +944,7 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-F16-NEXT: ld.param.b32 %r1, [minimum_v2half_param_1]; ; CHECK-F16-NEXT: ld.param.b32 %r2, [minimum_v2half_param_0]; ; CHECK-F16-NEXT: min.NaN.f16x2 %r3, %r2, %r1; -; CHECK-F16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-F16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: minimum_v2half( @@ -986,7 +986,7 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p10, %f6, 0f00000000; ; CHECK-SM80-NOF16-NEXT: selp.b16 %rs18, %rs17, %rs13, %p10; ; CHECK-SM80-NOF16-NEXT: mov.b32 %r3, {%rs18, %rs11}; -; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -1007,7 +1007,7 @@ define half @maxnum_half(half %a, half %b) { ; CHECK-NOF16-NEXT: cvt.f32.f16 %f2, %rs1; ; CHECK-NOF16-NEXT: max.f32 %f3, %f2, %f1; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs3, %f3; -; CHECK-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-NOF16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maxnum_half( @@ -1018,7 +1018,7 @@ define half @maxnum_half(half %a, half %b) { ; CHECK-F16-NEXT: ld.param.b16 %rs1, [maxnum_half_param_0]; ; CHECK-F16-NEXT: ld.param.b16 %rs2, [maxnum_half_param_1]; ; CHECK-F16-NEXT: max.f16 %rs3, %rs1, %rs2; -; CHECK-F16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-F16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maxnum_half( @@ -1033,7 +1033,7 @@ define half @maxnum_half(half %a, half %b) { ; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %f2, %rs1; ; CHECK-SM80-NOF16-NEXT: max.f32 %f3, %f2, %f1; ; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs3, %f3; -; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call half @llvm.maxnum.f16(half %a, half %b) ret half %x @@ -1047,7 +1047,7 @@ define float @maxnum_imm1(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [maxnum_imm1_param_0]; ; CHECK-NEXT: max.f32 %f2, %f1, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %x = call float @llvm.maxnum.f32(float %a, float 0.0) ret float %x @@ -1061,7 +1061,7 @@ define float @maxnum_imm2(float %a) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f32 %f1, [maxnum_imm2_param_0]; ; CHECK-NEXT: max.f32 %f2, %f1, 0f00000000; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-NEXT: ret; %x = call float @llvm.maxnum.f32(float 0.0, float %a) ret float %x @@ -1076,7 +1076,7 @@ define float @maxnum_float(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f1, [maxnum_float_param_0]; ; CHECK-NEXT: ld.param.f32 %f2, [maxnum_float_param_1]; ; CHECK-NEXT: max.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %x = call float @llvm.maxnum.f32(float %a, float %b) ret float %x @@ -1091,7 +1091,7 @@ define float @maxnum_float_ftz(float %a, float %b) #1 { ; CHECK-NEXT: ld.param.f32 %f1, [maxnum_float_ftz_param_0]; ; CHECK-NEXT: ld.param.f32 %f2, [maxnum_float_ftz_param_1]; ; CHECK-NEXT: max.ftz.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %x = call float @llvm.maxnum.f32(float %a, float %b) ret float %x @@ -1106,7 +1106,7 @@ define double @maxnum_double(double %a, double %b) { ; CHECK-NEXT: ld.param.f64 %fd1, [maxnum_double_param_0]; ; CHECK-NEXT: ld.param.f64 %fd2, [maxnum_double_param_1]; ; CHECK-NEXT: max.f64 %fd3, %fd1, %fd2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %x = call double @llvm.maxnum.f64(double %a, double %b) ret double %x @@ -1133,7 +1133,7 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-NEXT: max.f32 %f6, %f5, %f4; ; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %f6; ; CHECK-NOF16-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; CHECK-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maxnum_v2half( @@ -1144,7 +1144,7 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-F16-NEXT: ld.param.b32 %r1, [maxnum_v2half_param_1]; ; CHECK-F16-NEXT: ld.param.b32 %r2, [maxnum_v2half_param_0]; ; CHECK-F16-NEXT: max.f16x2 %r3, %r2, %r1; -; CHECK-F16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-F16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maxnum_v2half( @@ -1167,7 +1167,7 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-SM80-NOF16-NEXT: max.f32 %f6, %f5, %f4; ; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %f6; ; CHECK-SM80-NOF16-NEXT: mov.b32 %r3, {%rs6, %rs5}; -; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -1198,7 +1198,7 @@ define half @maximum_half(half %a, half %b) { ; CHECK-NOF16-NEXT: cvt.f32.f16 %f3, %rs5; ; CHECK-NOF16-NEXT: setp.eq.f32 %p5, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs8, %rs5, %p5; -; CHECK-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs9; +; CHECK-NOF16-NEXT: st.param.b16 [func_retval0], %rs9; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_half( @@ -1209,7 +1209,7 @@ define half @maximum_half(half %a, half %b) { ; CHECK-F16-NEXT: ld.param.b16 %rs1, [maximum_half_param_0]; ; CHECK-F16-NEXT: ld.param.b16 %rs2, [maximum_half_param_1]; ; CHECK-F16-NEXT: max.NaN.f16 %rs3, %rs1, %rs2; -; CHECK-F16-NEXT: st.param.b16 [func_retval0+0], %rs3; +; CHECK-F16-NEXT: st.param.b16 [func_retval0], %rs3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_half( @@ -1234,7 +1234,7 @@ define half @maximum_half(half %a, half %b) { ; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %f3, %rs5; ; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p5, %f3, 0f00000000; ; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs8, %rs5, %p5; -; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0+0], %rs9; +; CHECK-SM80-NOF16-NEXT: st.param.b16 [func_retval0], %rs9; ; CHECK-SM80-NOF16-NEXT: ret; %x = call half @llvm.maximum.f16(half %a, half %b) ret half %x @@ -1253,7 +1253,7 @@ define float @maximum_imm1(float %a) { ; CHECK-NOF16-NEXT: selp.f32 %f3, 0f7FC00000, %f2, %p1; ; CHECK-NOF16-NEXT: setp.eq.f32 %p2, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f4, 0f00000000, %f3, %p2; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_imm1( @@ -1263,7 +1263,7 @@ define float @maximum_imm1(float %a) { ; CHECK-F16-NEXT: // %bb.0: ; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_imm1_param_0]; ; CHECK-F16-NEXT: max.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_imm1( @@ -1273,7 +1273,7 @@ define float @maximum_imm1(float %a) { ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm1_param_0]; ; CHECK-SM80-NOF16-NEXT: max.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.maximum.f32(float %a, float 0.0) ret float %x @@ -1292,7 +1292,7 @@ define float @maximum_imm2(float %a) { ; CHECK-NOF16-NEXT: selp.f32 %f3, 0f7FC00000, %f2, %p1; ; CHECK-NOF16-NEXT: setp.eq.f32 %p2, %f3, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f4, 0f00000000, %f3, %p2; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_imm2( @@ -1302,7 +1302,7 @@ define float @maximum_imm2(float %a) { ; CHECK-F16-NEXT: // %bb.0: ; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_imm2_param_0]; ; CHECK-F16-NEXT: max.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_imm2( @@ -1312,7 +1312,7 @@ define float @maximum_imm2(float %a) { ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_imm2_param_0]; ; CHECK-SM80-NOF16-NEXT: max.NaN.f32 %f2, %f1, 0f00000000; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f2; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f2; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.maximum.f32(float 0.0, float %a) ret float %x @@ -1339,7 +1339,7 @@ define float @maximum_float(float %a, float %b) { ; CHECK-NOF16-NEXT: selp.f32 %f6, %f2, %f5, %p3; ; CHECK-NOF16-NEXT: setp.eq.f32 %p4, %f4, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f7, %f6, %f4, %p4; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f7; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_float( @@ -1350,7 +1350,7 @@ define float @maximum_float(float %a, float %b) { ; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_float_param_0]; ; CHECK-F16-NEXT: ld.param.f32 %f2, [maximum_float_param_1]; ; CHECK-F16-NEXT: max.NaN.f32 %f3, %f1, %f2; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_float( @@ -1361,7 +1361,7 @@ define float @maximum_float(float %a, float %b) { ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_param_0]; ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f2, [maximum_float_param_1]; ; CHECK-SM80-NOF16-NEXT: max.NaN.f32 %f3, %f1, %f2; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.maximum.f32(float %a, float %b) ret float %x @@ -1388,7 +1388,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 { ; CHECK-NOF16-NEXT: selp.f32 %f6, %f2, %f5, %p3; ; CHECK-NOF16-NEXT: setp.eq.ftz.f32 %p4, %f4, 0f00000000; ; CHECK-NOF16-NEXT: selp.f32 %f7, %f6, %f4, %p4; -; CHECK-NOF16-NEXT: st.param.f32 [func_retval0+0], %f7; +; CHECK-NOF16-NEXT: st.param.f32 [func_retval0], %f7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_float_ftz( @@ -1399,7 +1399,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 { ; CHECK-F16-NEXT: ld.param.f32 %f1, [maximum_float_ftz_param_0]; ; CHECK-F16-NEXT: ld.param.f32 %f2, [maximum_float_ftz_param_1]; ; CHECK-F16-NEXT: max.NaN.ftz.f32 %f3, %f1, %f2; -; CHECK-F16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-F16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_float_ftz( @@ -1410,7 +1410,7 @@ define float @maximum_float_ftz(float %a, float %b) #1 { ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f1, [maximum_float_ftz_param_0]; ; CHECK-SM80-NOF16-NEXT: ld.param.f32 %f2, [maximum_float_ftz_param_1]; ; CHECK-SM80-NOF16-NEXT: max.NaN.ftz.f32 %f3, %f1, %f2; -; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-SM80-NOF16-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call float @llvm.maximum.f32(float %a, float %b) ret float %x @@ -1437,7 +1437,7 @@ define double @maximum_double(double %a, double %b) { ; CHECK-NEXT: selp.f64 %fd6, %fd2, %fd5, %p3; ; CHECK-NEXT: setp.eq.f64 %p4, %fd4, 0d0000000000000000; ; CHECK-NEXT: selp.f64 %fd7, %fd6, %fd4, %p4; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd7; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd7; ; CHECK-NEXT: ret; %x = call double @llvm.maximum.f64(double %a, double %b) ret double %x @@ -1483,7 +1483,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-NEXT: setp.eq.f32 %p10, %f6, 0f00000000; ; CHECK-NOF16-NEXT: selp.b16 %rs18, %rs17, %rs13, %p10; ; CHECK-NOF16-NEXT: mov.b32 %r3, {%rs18, %rs11}; -; CHECK-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximum_v2half( @@ -1494,7 +1494,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-F16-NEXT: ld.param.b32 %r1, [maximum_v2half_param_1]; ; CHECK-F16-NEXT: ld.param.b32 %r2, [maximum_v2half_param_0]; ; CHECK-F16-NEXT: max.NaN.f16x2 %r3, %r2, %r1; -; CHECK-F16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-F16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-F16-NEXT: ret; ; ; CHECK-SM80-NOF16-LABEL: maximum_v2half( @@ -1536,7 +1536,7 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p10, %f6, 0f00000000; ; CHECK-SM80-NOF16-NEXT: selp.b16 %rs18, %rs17, %rs13, %p10; ; CHECK-SM80-NOF16-NEXT: mov.b32 %r3, {%rs18, %rs11}; -; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -1554,7 +1554,7 @@ define float @fma_float(float %a, float %b, float %c) { ; CHECK-NEXT: ld.param.f32 %f2, [fma_float_param_1]; ; CHECK-NEXT: ld.param.f32 %f3, [fma_float_param_2]; ; CHECK-NEXT: fma.rn.f32 %f4, %f1, %f2, %f3; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NEXT: ret; %x = call float @llvm.fma.f32(float %a, float %b, float %c) ret float %x @@ -1570,7 +1570,7 @@ define float @fma_float_ftz(float %a, float %b, float %c) #1 { ; CHECK-NEXT: ld.param.f32 %f2, [fma_float_ftz_param_1]; ; CHECK-NEXT: ld.param.f32 %f3, [fma_float_ftz_param_2]; ; CHECK-NEXT: fma.rn.ftz.f32 %f4, %f1, %f2, %f3; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: st.param.f32 [func_retval0], %f4; ; CHECK-NEXT: ret; %x = call float @llvm.fma.f32(float %a, float %b, float %c) ret float %x @@ -1586,7 +1586,7 @@ define double @fma_double(double %a, double %b, double %c) { ; CHECK-NEXT: ld.param.f64 %fd2, [fma_double_param_1]; ; CHECK-NEXT: ld.param.f64 %fd3, [fma_double_param_2]; ; CHECK-NEXT: fma.rn.f64 %fd4, %fd1, %fd2, %fd3; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd4; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd4; ; CHECK-NEXT: ret; %x = call double @llvm.fma.f64(double %a, double %b, double %c) ret double %x diff --git a/llvm/test/CodeGen/NVPTX/mulhi-intrins.ll b/llvm/test/CodeGen/NVPTX/mulhi-intrins.ll index efa99462b9b11c..21fce55fcbc242 100644 --- a/llvm/test/CodeGen/NVPTX/mulhi-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/mulhi-intrins.ll @@ -13,7 +13,7 @@ define i16 @test_mulhi_i16(i16 %x, i16 %y) { ; CHECK-NEXT: ld.param.u16 %rs2, [test_mulhi_i16_param_1]; ; CHECK-NEXT: mul.hi.s16 %rs3, %rs1, %rs2; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %1 = call i16 @llvm.nvvm.mulhi.s(i16 %x, i16 %y) ret i16 %1 @@ -30,7 +30,7 @@ define i16 @test_mulhi_u16(i16 %x, i16 %y) { ; CHECK-NEXT: ld.param.u16 %rs2, [test_mulhi_u16_param_1]; ; CHECK-NEXT: mul.hi.u16 %rs3, %rs1, %rs2; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %1 = call i16 @llvm.nvvm.mulhi.us(i16 %x, i16 %y) ret i16 %1 @@ -45,7 +45,7 @@ define i32 @test_mulhi_i32(i32 %x, i32 %y) { ; CHECK-NEXT: ld.param.u32 %r1, [test_mulhi_i32_param_0]; ; CHECK-NEXT: ld.param.u32 %r2, [test_mulhi_i32_param_1]; ; CHECK-NEXT: mul.hi.s32 %r3, %r1, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %1 = call i32 @llvm.nvvm.mulhi.i(i32 %x, i32 %y) ret i32 %1 @@ -60,7 +60,7 @@ define i32 @test_mulhi_u32(i32 %x, i32 %y) { ; CHECK-NEXT: ld.param.u32 %r1, [test_mulhi_u32_param_0]; ; CHECK-NEXT: ld.param.u32 %r2, [test_mulhi_u32_param_1]; ; CHECK-NEXT: mul.hi.u32 %r3, %r1, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r3; +; CHECK-NEXT: st.param.b32 [func_retval0], %r3; ; CHECK-NEXT: ret; %1 = call i32 @llvm.nvvm.mulhi.ui(i32 %x, i32 %y) ret i32 %1 @@ -75,7 +75,7 @@ define i64 @test_mulhi_i64(i64 %x, i64 %y) { ; CHECK-NEXT: ld.param.u64 %rd1, [test_mulhi_i64_param_0]; ; CHECK-NEXT: ld.param.u64 %rd2, [test_mulhi_i64_param_1]; ; CHECK-NEXT: mul.hi.s64 %rd3, %rd1, %rd2; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; ; CHECK-NEXT: ret; %1 = call i64 @llvm.nvvm.mulhi.ll(i64 %x, i64 %y) ret i64 %1 @@ -90,7 +90,7 @@ define i64 @test_mulhi_u64(i64 %x, i64 %y) { ; CHECK-NEXT: ld.param.u64 %rd1, [test_mulhi_u64_param_0]; ; CHECK-NEXT: ld.param.u64 %rd2, [test_mulhi_u64_param_1]; ; CHECK-NEXT: mul.hi.u64 %rd3, %rd1, %rd2; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd3; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd3; ; CHECK-NEXT: ret; %1 = call i64 @llvm.nvvm.mulhi.ull(i64 %x, i64 %y) ret i64 %1 diff --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch-O0.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch-O0.ll index 0088d6c64205d2..1e45df5efcf538 100644 --- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch-O0.ll +++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch-O0.ll @@ -9,17 +9,17 @@ declare i32 @__nvvm_reflect(ptr) ; SM_52: .visible .func (.param .b32 func_retval0) foo() ; SM_52: mov.b32 %[[REG:.+]], 3; -; SM_52-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_52-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_52-NEXT: ret; ; ; SM_70: .visible .func (.param .b32 func_retval0) foo() ; SM_70: mov.b32 %[[REG:.+]], 2; -; SM_70-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_70-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_70-NEXT: ret; ; ; SM_90: .visible .func (.param .b32 func_retval0) foo() ; SM_90: mov.b32 %[[REG:.+]], 1; -; SM_90-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_90-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_90-NEXT: ret; define i32 @foo() { entry: @@ -56,17 +56,17 @@ return: ; SM_52: .visible .func (.param .b32 func_retval0) bar() ; SM_52: mov.b32 %[[REG:.+]], 2; -; SM_52-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_52-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_52-NEXT: ret; ; ; SM_70: .visible .func (.param .b32 func_retval0) bar() ; SM_70: mov.b32 %[[REG:.+]], 1; -; SM_70-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_70-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_70-NEXT: ret; ; ; SM_90: .visible .func (.param .b32 func_retval0) bar() ; SM_90: mov.b32 %[[REG:.+]], 1; -; SM_90-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_90-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_90-NEXT: ret; define i32 @bar() { entry: @@ -104,17 +104,17 @@ if.end: ; SM_52: .visible .func (.param .b32 func_retval0) qux() ; SM_52: mov.b32 %[[REG:.+]], 3; -; SM_52-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_52-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_52-NEXT: ret; ; ; SM_70: .visible .func (.param .b32 func_retval0) qux() ; SM_70: mov.b32 %[[REG:.+]], 2; -; SM_70-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_70-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_70-NEXT: ret; ; ; SM_90: .visible .func (.param .b32 func_retval0) qux() ; SM_90: mov.b32 %[[REG:.+]], 1; -; SM_90-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_90-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_90-NEXT: ret; define i32 @qux() { entry: @@ -144,15 +144,15 @@ return: ; SM_52: .visible .func (.param .b32 func_retval0) phi() ; SM_52: mov.f32 %[[REG:.+]], 0f00000000; -; SM_52-NEXT: st.param.f32 [func_retval0+0], %[[REG]]; +; SM_52-NEXT: st.param.f32 [func_retval0], %[[REG]]; ; SM_52-NEXT: ret; ; SM_70: .visible .func (.param .b32 func_retval0) phi() ; SM_70: mov.f32 %[[REG:.+]], 0f00000000; -; SM_70-NEXT: st.param.f32 [func_retval0+0], %[[REG]]; +; SM_70-NEXT: st.param.f32 [func_retval0], %[[REG]]; ; SM_70-NEXT: ret; ; SM_90: .visible .func (.param .b32 func_retval0) phi() ; SM_90: mov.f32 %[[REG:.+]], 0f00000000; -; SM_90-NEXT: st.param.f32 [func_retval0+0], %[[REG]]; +; SM_90-NEXT: st.param.f32 [func_retval0], %[[REG]]; ; SM_90-NEXT: ret; define float @phi() { entry: @@ -177,17 +177,17 @@ exit: ; SM_52: .visible .func (.param .b32 func_retval0) prop() ; SM_52: mov.b32 %[[REG:.+]], 3; -; SM_52-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_52-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_52-NEXT: ret; ; ; SM_70: .visible .func (.param .b32 func_retval0) prop() ; SM_70: mov.b32 %[[REG:.+]], 2; -; SM_70-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_70-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_70-NEXT: ret; ; ; SM_90: .visible .func (.param .b32 func_retval0) prop() ; SM_90: mov.b32 %[[REG:.+]], 1; -; SM_90-NEXT: st.param.b32 [func_retval0+0], %[[REG:.+]]; +; SM_90-NEXT: st.param.b32 [func_retval0], %[[REG:.+]]; ; SM_90-NEXT: ret; define i32 @prop() { entry: diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll index a29d4e1875cd7b..bb95f88e999d29 100644 --- a/llvm/test/CodeGen/NVPTX/param-load-store.ll +++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll @@ -30,13 +30,13 @@ ; CHECK: cvt.u32.u16 [[B:%r[0-9]+]], [[A8]] ; CHECK: and.b32 [[C:%r[0-9]+]], [[B]], 1; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[C]] +; CHECK: st.param.b32 [param0], [[C]] ; CHECK: .param .b32 retval0; ; CHECK: call.uni ; CHECK-NEXT: test_i1, -; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0]; ; CHECK: and.b32 [[R:%r[0-9]+]], [[R8]], 1; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK: ret; define i1 @test_i1(i1 %a) { %r = tail call i1 @test_i1(i1 %a); @@ -53,13 +53,13 @@ define i1 @test_i1(i1 %a) { ; CHECK: and.b32 [[A1:%r[0-9]+]], [[A32]], 1; ; CHECK: neg.s32 [[A:%r[0-9]+]], [[A1]]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[A]]; +; CHECK: st.param.b32 [param0], [[A]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni -; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[R8:%r[0-9]+]], [retval0]; ; CHECK: and.b32 [[R1:%r[0-9]+]], [[R8]], 1; ; CHECK: neg.s32 [[R:%r[0-9]+]], [[R1]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define signext i1 @test_i1s(i1 signext %a) { %r = tail call signext i1 @test_i1s(i1 signext %a); @@ -73,14 +73,14 @@ define signext i1 @test_i1s(i1 signext %a) { ; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i1_param_0+2]; ; CHECK-DAG: ld.param.u8 [[E0:%rs[0-9]+]], [test_v3i1_param_0] ; CHECK: .param .align 1 .b8 param0[1]; -; CHECK-DAG: st.param.b8 [param0+0], [[E0]]; +; CHECK-DAG: st.param.b8 [param0], [[E0]]; ; CHECK-DAG: st.param.b8 [param0+2], [[E2]]; ; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i1, -; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2]; -; CHECK-DAG: st.param.b8 [func_retval0+0], [[RE0]] +; CHECK-DAG: st.param.b8 [func_retval0], [[RE0]] ; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]]; ; CHECK-NEXT: ret; define <3 x i1> @test_v3i1(<3 x i1> %a) { @@ -93,15 +93,15 @@ define <3 x i1> @test_v3i1(<3 x i1> %a) { ; CHECK-NEXT: .param .align 1 .b8 test_v4i1_param_0[1] ; CHECK: ld.param.u8 [[E0:%rs[0-9]+]], [test_v4i1_param_0] ; CHECK: .param .align 1 .b8 param0[1]; -; CHECK: st.param.b8 [param0+0], [[E0]]; +; CHECK: st.param.b8 [param0], [[E0]]; ; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK: test_v4i1, -; CHECK: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; +; CHECK: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0]; ; CHECK: ld.param.b8 [[RE1:%rs[0-9]+]], [retval0+1]; ; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2]; ; CHECK: ld.param.b8 [[RE3:%rs[0-9]+]], [retval0+3]; -; CHECK: st.param.b8 [func_retval0+0], [[RE0]]; +; CHECK: st.param.b8 [func_retval0], [[RE0]]; ; CHECK: st.param.b8 [func_retval0+1], [[RE1]]; ; CHECK: st.param.b8 [func_retval0+2], [[RE2]]; ; CHECK: st.param.b8 [func_retval0+3], [[RE3]]; @@ -117,14 +117,14 @@ define <4 x i1> @test_v4i1(<4 x i1> %a) { ; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i1_param_0+4]; ; CHECK-DAG: ld.param.u8 [[E0:%rs[0-9]+]], [test_v5i1_param_0] ; CHECK: .param .align 1 .b8 param0[1]; -; CHECK-DAG: st.param.b8 [param0+0], [[E0]]; +; CHECK-DAG: st.param.b8 [param0], [[E0]]; ; CHECK-DAG: st.param.b8 [param0+4], [[E4]]; ; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v5i1, -; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4]; -; CHECK-DAG: st.param.b8 [func_retval0+0], [[RE0]] +; CHECK-DAG: st.param.b8 [func_retval0], [[RE0]] ; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]]; ; CHECK-NEXT: ret; define <5 x i1> @test_v5i1(<5 x i1> %a) { @@ -137,12 +137,12 @@ define <5 x i1> @test_v5i1(<5 x i1> %a) { ; CHECK-NEXT: .param .b32 test_i2_param_0 ; CHECK: ld.param.u8 {{%rs[0-9]+}}, [test_i2_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK: test_i2, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i2 @test_i2(i2 %a) { %r = tail call i2 @test_i2(i2 %a); @@ -154,12 +154,12 @@ define i2 @test_i2(i2 %a) { ; CHECK-NEXT: .param .b32 test_i3_param_0 ; CHECK: ld.param.u8 {{%rs[0-9]+}}, [test_i3_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK: test_i3, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i3 @test_i3(i3 %a) { %r = tail call i3 @test_i3(i3 %a); @@ -174,13 +174,13 @@ define i3 @test_i3(i3 %a) { ; CHECK: cvt.u32.u16 [[A32:%r[0-9]+]], [[A8]]; ; CHECK: and.b32 [[A:%r[0-9]+]], [[A32]], 255; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[A]]; +; CHECK: st.param.b32 [param0], [[A]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK: test_i8, -; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0]; ; CHECK: and.b32 [[R:%r[0-9]+]], [[R32]], 255; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i8 @test_i8(i8 %a) { %r = tail call i8 @test_i8(i8 %a); @@ -194,15 +194,15 @@ define i8 @test_i8(i8 %a) { ; CHECK: ld.param.s8 [[A8:%rs[0-9]+]], [test_i8s_param_0]; ; CHECK: cvt.s32.s16 [[A:%r[0-9]+]], [[A8]]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[A]]; +; CHECK: st.param.b32 [param0], [[A]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK: test_i8s, -; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[R32:%r[0-9]+]], [retval0]; ; -- This is suspicious (though correct) -- why not cvt.u8.u32, cvt.s8.s32 ? ; CHECK: cvt.u16.u32 [[R16:%rs[0-9]+]], [[R32]]; ; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[R16]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define signext i8 @test_i8s(i8 signext %a) { %r = tail call signext i8 @test_i8s(i8 signext %a); @@ -214,14 +214,14 @@ define signext i8 @test_i8s(i8 signext %a) { ; CHECK-NEXT: .param .align 4 .b8 test_v3i8_param_0[4] ; CHECK: ld.param.u32 [[R:%r[0-9]+]], [test_v3i8_param_0]; ; CHECK: .param .align 4 .b8 param0[4]; -; CHECK: st.param.b32 [param0+0], [[R]] +; CHECK: st.param.b32 [param0], [[R]] ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i8, -; CHECK: ld.param.b32 [[RE:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[RE:%r[0-9]+]], [retval0]; ; v4i8/i32->{v3i8 elements}->v4i8/i32 conversion is messy and not very ; interesting here, so it's skipped. -; CHECK: st.param.b32 [func_retval0+0], +; CHECK: st.param.b32 [func_retval0], ; CHECK-NEXT: ret; define <3 x i8> @test_v3i8(<3 x i8> %a) { %r = tail call <3 x i8> @test_v3i8(<3 x i8> %a); @@ -233,12 +233,12 @@ define <3 x i8> @test_v3i8(<3 x i8> %a) { ; CHECK-NEXT: .param .align 4 .b8 test_v4i8_param_0[4] ; CHECK: ld.param.u32 [[R:%r[0-9]+]], [test_v4i8_param_0] ; CHECK: .param .align 4 .b8 param0[4]; -; CHECK: st.param.b32 [param0+0], [[R]]; +; CHECK: st.param.b32 [param0], [[R]]; ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v4i8, -; CHECK: ld.param.b32 [[RET:%r[0-9]+]], [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], [[RET]]; +; CHECK: ld.param.b32 [[RET:%r[0-9]+]], [retval0]; +; CHECK: st.param.b32 [func_retval0], [[RET]]; ; CHECK-NEXT: ret; define <4 x i8> @test_v4i8(<4 x i8> %a) { %r = tail call <4 x i8> @test_v4i8(<4 x i8> %a); @@ -251,14 +251,14 @@ define <4 x i8> @test_v4i8(<4 x i8> %a) { ; CHECK-DAG: ld.param.u32 [[E0:%r[0-9]+]], [test_v5i8_param_0] ; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i8_param_0+4]; ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK-DAG: st.param.v4.b8 [param0+0], +; CHECK-DAG: st.param.v4.b8 [param0], ; CHECK-DAG: st.param.b8 [param0+4], [[E4]]; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v5i8, -; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4]; -; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} +; CHECK-DAG: st.param.v4.b8 [func_retval0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} ; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]]; ; CHECK-NEXT: ret; define <5 x i8> @test_v5i8(<5 x i8> %a) { @@ -270,12 +270,12 @@ define <5 x i8> @test_v5i8(<5 x i8> %a) { ; CHECK-LABEL: test_i11( ; CHECK-NEXT: .param .b32 test_i11_param_0 ; CHECK: ld.param.u16 {{%rs[0-9]+}}, [test_i11_param_0]; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i11, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i11 @test_i11(i11 %a) { %r = tail call i11 @test_i11(i11 %a); @@ -288,13 +288,13 @@ define i11 @test_i11(i11 %a) { ; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16_param_0]; ; CHECK: cvt.u32.u16 [[E32:%r[0-9]+]], [[E16]]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[E32]]; +; CHECK: st.param.b32 [param0], [[E32]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i16, -; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0]; ; CHECK: and.b32 [[R:%r[0-9]+]], [[RE32]], 65535; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i16 @test_i16(i16 %a) { %r = tail call i16 @test_i16(i16 %a); @@ -307,13 +307,13 @@ define i16 @test_i16(i16 %a) { ; CHECK: ld.param.u16 [[E16:%rs[0-9]+]], [test_i16s_param_0]; ; CHECK: cvt.s32.s16 [[E32:%r[0-9]+]], [[E16]]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[E32]]; +; CHECK: st.param.b32 [param0], [[E32]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i16s, -; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0+0]; +; CHECK: ld.param.b32 [[RE32:%r[0-9]+]], [retval0]; ; CHECK: cvt.s32.s16 [[R:%r[0-9]+]], [[RE32]]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define signext i16 @test_i16s(i16 signext %a) { %r = tail call signext i16 @test_i16s(i16 signext %a); @@ -327,14 +327,14 @@ define signext i16 @test_i16s(i16 signext %a) { ; CHECK-DAG: ld.param.u32 [[R:%r[0-9]+]], [test_v3i16_param_0]; ; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[R]]; ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b16 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.b16 [param0+4], [[E2]]; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i16, -; CHECK: ld.param.v2.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0]; ; CHECK: ld.param.b16 [[RE2:%rs[0-9]+]], [retval0+4]; -; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b16 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-DAG: st.param.b16 [func_retval0+4], [[RE2]]; ; CHECK-NEXT: ret; define <3 x i16> @test_v3i16(<3 x i16> %a) { @@ -347,12 +347,12 @@ define <3 x i16> @test_v3i16(<3 x i16> %a) { ; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8] ; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v4i16_param_0] ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v4i16, -; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]} +; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0]; +; CHECK: st.param.v2.b32 [func_retval0], {[[RE0]], [[RE1]]} ; CHECK-NEXT: ret; define <4 x i16> @test_v4i16(<4 x i16> %a) { %r = tail call <4 x i16> @test_v4i16(<4 x i16> %a); @@ -365,14 +365,14 @@ define <4 x i16> @test_v4i16(<4 x i16> %a) { ; CHECK-DAG: ld.param.u16 [[E4:%rs[0-9]+]], [test_v5i16_param_0+8]; ; CHECK-DAG: ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0] ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK-DAG: st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; +; CHECK-DAG: st.param.v4.b16 [param0], {[[E0]], [[E1]], [[E2]], [[E3]]}; ; CHECK-DAG: st.param.b16 [param0+8], [[E4]]; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v5i16, -; CHECK-DAG: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.b16 [[RE4:%rs[0-9]+]], [retval0+8]; -; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} +; CHECK-DAG: st.param.v4.b16 [func_retval0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} ; CHECK-DAG: st.param.b16 [func_retval0+8], [[RE4]]; ; CHECK-NEXT: ret; define <5 x i16> @test_v5i16(<5 x i16> %a) { @@ -385,12 +385,12 @@ define <5 x i16> @test_v5i16(<5 x i16> %a) { ; CHECK-NEXT: .param .align 2 .b8 test_f16_param_0[2] ; CHECK: ld.param.b16 [[E:%rs[0-9]+]], [test_f16_param_0]; ; CHECK: .param .align 2 .b8 param0[2]; -; CHECK: st.param.b16 [param0+0], [[E]]; +; CHECK: st.param.b16 [param0], [[E]]; ; CHECK: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_f16, -; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b16 [func_retval0+0], [[R]] +; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b16 [func_retval0], [[R]] ; CHECK-NEXT: ret; define half @test_f16(half %a) { %r = tail call half @test_f16(half %a); @@ -402,12 +402,12 @@ define half @test_f16(half %a) { ; CHECK-NEXT: .param .align 4 .b8 test_v2f16_param_0[4] ; CHECK: ld.param.b32 [[E:%r[0-9]+]], [test_v2f16_param_0]; ; CHECK: .param .align 4 .b8 param0[4]; -; CHECK: st.param.b32 [param0+0], [[E]]; +; CHECK: st.param.b32 [param0], [[E]]; ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v2f16, -; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]] +; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0]; +; CHECK: st.param.b32 [func_retval0], [[R]] ; CHECK-NEXT: ret; define <2 x half> @test_v2f16(<2 x half> %a) { %r = tail call <2 x half> @test_v2f16(<2 x half> %a); @@ -419,12 +419,12 @@ define <2 x half> @test_v2f16(<2 x half> %a) { ; CHECK-NEXT: .param .align 2 .b8 test_bf16_param_0[2] ; CHECK: ld.param.b16 [[E:%rs[0-9]+]], [test_bf16_param_0]; ; CHECK: .param .align 2 .b8 param0[2]; -; CHECK: st.param.b16 [param0+0], [[E]]; +; CHECK: st.param.b16 [param0], [[E]]; ; CHECK: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_bf16, -; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b16 [func_retval0+0], [[R]] +; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b16 [func_retval0], [[R]] ; CHECK-NEXT: ret; define bfloat @test_bf16(bfloat %a) { %r = tail call bfloat @test_bf16(bfloat %a); @@ -436,12 +436,12 @@ define bfloat @test_bf16(bfloat %a) { ; CHECK-NEXT: .param .align 4 .b8 test_v2bf16_param_0[4] ; CHECK: ld.param.b32 [[E:%r[0-9]+]], [test_v2bf16_param_0]; ; CHECK: .param .align 4 .b8 param0[4]; -; CHECK: st.param.b32 [param0+0], [[E]]; +; CHECK: st.param.b32 [param0], [[E]]; ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v2bf16, -; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]] +; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0]; +; CHECK: st.param.b32 [func_retval0], [[R]] ; CHECK-NEXT: ret; define <2 x bfloat> @test_v2bf16(<2 x bfloat> %a) { %r = tail call <2 x bfloat> @test_v2bf16(<2 x bfloat> %a); @@ -456,14 +456,14 @@ define <2 x bfloat> @test_v2bf16(<2 x bfloat> %a) { ; CHECK-DAG: mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[HH01]]; ; CHECK-DAG: ld.param.b16 [[E2:%rs[0-9]+]], [test_v3f16_param_0+4]; ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK-DAG: st.param.v2.b16 [param0+0], {[[E0]], [[E1]]}; +; CHECK-DAG: st.param.v2.b16 [param0], {[[E0]], [[E1]]}; ; CHECK-DAG: st.param.b16 [param0+4], [[E2]]; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK: test_v3f16, -; CHECK-DAG: ld.param.v2.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v2.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.b16 [[R2:%rs[0-9]+]], [retval0+4]; -; CHECK-DAG: st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-DAG: st.param.v2.b16 [func_retval0], {[[R0]], [[R1]]}; ; CHECK-DAG: st.param.b16 [func_retval0+4], [[R2]]; ; CHECK: ret; define <3 x half> @test_v3f16(<3 x half> %a) { @@ -476,12 +476,12 @@ define <3 x half> @test_v3f16(<3 x half> %a) { ; CHECK: .param .align 8 .b8 test_v4f16_param_0[8] ; CHECK: ld.param.v2.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]]}, [test_v4f16_param_0]; ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK: st.param.v2.b32 [param0+0], {[[R01]], [[R23]]}; +; CHECK: st.param.v2.b32 [param0], {[[R01]], [[R23]]}; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK: test_v4f16, -; CHECK: ld.param.v2.b32 {[[RH01:%r[0-9]+]], [[RH23:%r[0-9]+]]}, [retval0+0]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[RH01]], [[RH23]]}; +; CHECK: ld.param.v2.b32 {[[RH01:%r[0-9]+]], [[RH23:%r[0-9]+]]}, [retval0]; +; CHECK: st.param.v2.b32 [func_retval0], {[[RH01]], [[RH23]]}; ; CHECK: ret; define <4 x half> @test_v4f16(<4 x half> %a) { %r = tail call <4 x half> @test_v4f16(<4 x half> %a); @@ -494,14 +494,14 @@ define <4 x half> @test_v4f16(<4 x half> %a) { ; CHECK-DAG: ld.param.v4.b16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5f16_param_0]; ; CHECK-DAG: ld.param.b16 [[E4:%rs[0-9]+]], [test_v5f16_param_0+8]; ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK-DAG: st.param.v4.b16 [param0+0], +; CHECK-DAG: st.param.v4.b16 [param0], ; CHECK-DAG: st.param.b16 [param0+8], [[E4]]; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK: test_v5f16, -; CHECK-DAG: ld.param.v4.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]], [[R2:%rs[0-9]+]], [[R3:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v4.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]], [[R2:%rs[0-9]+]], [[R3:%rs[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.b16 [[R4:%rs[0-9]+]], [retval0+8]; -; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]}; +; CHECK-DAG: st.param.v4.b16 [func_retval0], {[[R0]], [[R1]], [[R2]], [[R3]]}; ; CHECK-DAG: st.param.b16 [func_retval0+8], [[R4]]; ; CHECK: ret; define <5 x half> @test_v5f16(<5 x half> %a) { @@ -514,12 +514,12 @@ define <5 x half> @test_v5f16(<5 x half> %a) { ; CHECK: .param .align 16 .b8 test_v8f16_param_0[16] ; CHECK: ld.param.v4.u32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]], [[R45:%r[0-9]+]], [[R67:%r[0-9]+]]}, [test_v8f16_param_0]; ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK: st.param.v4.b32 [param0+0], {[[R01]], [[R23]], [[R45]], [[R67]]}; +; CHECK: st.param.v4.b32 [param0], {[[R01]], [[R23]], [[R45]], [[R67]]}; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK: test_v8f16, -; CHECK: ld.param.v4.b32 {[[RH01:%r[0-9]+]], [[RH23:%r[0-9]+]], [[RH45:%r[0-9]+]], [[RH67:%r[0-9]+]]}, [retval0+0]; -; CHECK: st.param.v4.b32 [func_retval0+0], {[[RH01]], [[RH23]], [[RH45]], [[RH67]]}; +; CHECK: ld.param.v4.b32 {[[RH01:%r[0-9]+]], [[RH23:%r[0-9]+]], [[RH45:%r[0-9]+]], [[RH67:%r[0-9]+]]}, [retval0]; +; CHECK: st.param.v4.b32 [func_retval0], {[[RH01]], [[RH23]], [[RH45]], [[RH67]]}; ; CHECK: ret; define <8 x half> @test_v8f16(<8 x half> %a) { %r = tail call <8 x half> @test_v8f16(<8 x half> %a); @@ -533,16 +533,16 @@ define <8 x half> @test_v8f16(<8 x half> %a) { ; CHECK-DAG: ld.param.v4.b16 {[[E4:%rs[0-9]+]], [[E5:%rs[0-9]+]], [[E6:%rs[0-9]+]], [[E7:%rs[0-9]+]]}, [test_v9f16_param_0+8]; ; CHECK-DAG: ld.param.b16 [[E8:%rs[0-9]+]], [test_v9f16_param_0+16]; ; CHECK: .param .align 32 .b8 param0[32]; -; CHECK-DAG: st.param.v4.b16 [param0+0], +; CHECK-DAG: st.param.v4.b16 [param0], ; CHECK-DAG: st.param.v4.b16 [param0+8], ; CHECK-DAG: st.param.b16 [param0+16], [[E8]]; ; CHECK: .param .align 32 .b8 retval0[32]; ; CHECK: call.uni (retval0), ; CHECK: test_v9f16, -; CHECK-DAG: ld.param.v4.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]], [[R2:%rs[0-9]+]], [[R3:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v4.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]], [[R2:%rs[0-9]+]], [[R3:%rs[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.v4.b16 {[[R4:%rs[0-9]+]], [[R5:%rs[0-9]+]], [[R6:%rs[0-9]+]], [[R7:%rs[0-9]+]]}, [retval0+8]; ; CHECK-DAG: ld.param.b16 [[R8:%rs[0-9]+]], [retval0+16]; -; CHECK-DAG: st.param.v4.b16 [func_retval0+0], {[[R0]], [[R1]], [[R2]], [[R3]]}; +; CHECK-DAG: st.param.v4.b16 [func_retval0], {[[R0]], [[R1]], [[R2]], [[R3]]}; ; CHECK-DAG: st.param.v4.b16 [func_retval0+8], {[[R4]], [[R5]], [[R6]], [[R7]]}; ; CHECK-DAG: st.param.b16 [func_retval0+16], [[R8]]; ; CHECK: ret; @@ -557,12 +557,12 @@ define <9 x half> @test_v9f16(<9 x half> %a) { ; CHECK-DAG: ld.param.u16 {{%r[0-9]+}}, [test_i19_param_0]; ; CHECK-DAG: ld.param.u8 {{%r[0-9]+}}, [test_i19_param_0+2]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i19, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i19 @test_i19(i19 %a) { %r = tail call i19 @test_i19(i19 %a); @@ -575,12 +575,12 @@ define i19 @test_i19(i19 %a) { ; CHECK-DAG: ld.param.u16 {{%r[0-9]+}}, [test_i23_param_0]; ; CHECK-DAG: ld.param.u8 {{%r[0-9]+}}, [test_i23_param_0+2]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i23, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i23 @test_i23(i23 %a) { %r = tail call i23 @test_i23(i23 %a); @@ -593,12 +593,12 @@ define i23 @test_i23(i23 %a) { ; CHECK-DAG: ld.param.u8 {{%r[0-9]+}}, [test_i24_param_0+2]; ; CHECK-DAG: ld.param.u16 {{%r[0-9]+}}, [test_i24_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i24, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i24 @test_i24(i24 %a) { %r = tail call i24 @test_i24(i24 %a); @@ -610,12 +610,12 @@ define i24 @test_i24(i24 %a) { ; CHECK-NEXT: .param .b32 test_i29_param_0 ; CHECK: ld.param.u32 {{%r[0-9]+}}, [test_i29_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; +; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i29, -; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], {{%r[0-9]+}}; +; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; +; CHECK: st.param.b32 [func_retval0], {{%r[0-9]+}}; ; CHECK-NEXT: ret; define i29 @test_i29(i29 %a) { %r = tail call i29 @test_i29(i29 %a); @@ -627,12 +627,12 @@ define i29 @test_i29(i29 %a) { ; CHECK-NEXT: .param .b32 test_i32_param_0 ; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_i32_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.b32 [param0+0], [[E]]; +; CHECK: st.param.b32 [param0], [[E]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i32, -; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i32 @test_i32(i32 %a) { %r = tail call i32 @test_i32(i32 %a); @@ -645,14 +645,14 @@ define i32 @test_i32(i32 %a) { ; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [test_v3i32_param_0+8]; ; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v3i32_param_0]; ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.b32 [param0+8], [[E2]]; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i32, -; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0]; ; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8]; -; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b32 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]]; ; CHECK-NEXT: ret; define <3 x i32> @test_v3i32(<3 x i32> %a) { @@ -665,12 +665,12 @@ define <3 x i32> @test_v3i32(<3 x i32> %a) { ; CHECK-NEXT: .param .align 16 .b8 test_v4i32_param_0[16] ; CHECK: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v4i32_param_0] ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; +; CHECK: st.param.v4.b32 [param0], {[[E0]], [[E1]], [[E2]], [[E3]]}; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v4i32, -; CHECK: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0]; -; CHECK: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} +; CHECK: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0]; +; CHECK: st.param.v4.b32 [func_retval0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} ; CHECK-NEXT: ret; define <4 x i32> @test_v4i32(<4 x i32> %a) { %r = tail call <4 x i32> @test_v4i32(<4 x i32> %a); @@ -683,14 +683,14 @@ define <4 x i32> @test_v4i32(<4 x i32> %a) { ; CHECK-DAG: ld.param.u32 [[E4:%r[0-9]+]], [test_v5i32_param_0+16]; ; CHECK-DAG: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_v5i32_param_0] ; CHECK: .param .align 32 .b8 param0[32]; -; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; +; CHECK-DAG: st.param.v4.b32 [param0], {[[E0]], [[E1]], [[E2]], [[E3]]}; ; CHECK-DAG: st.param.b32 [param0+16], [[E4]]; ; CHECK: .param .align 32 .b8 retval0[32]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v5i32, -; CHECK-DAG: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.v4.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]], [[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0]; ; CHECK-DAG: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16]; -; CHECK-DAG: st.param.v4.b32 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} +; CHECK-DAG: st.param.v4.b32 [func_retval0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} ; CHECK-DAG: st.param.b32 [func_retval0+16], [[RE4]]; ; CHECK-NEXT: ret; define <5 x i32> @test_v5i32(<5 x i32> %a) { @@ -703,12 +703,12 @@ define <5 x i32> @test_v5i32(<5 x i32> %a) { ; CHECK-NEXT: .param .b32 test_f32_param_0 ; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_f32_param_0]; ; CHECK: .param .b32 param0; -; CHECK: st.param.f32 [param0+0], [[E]]; +; CHECK: st.param.f32 [param0], [[E]]; ; CHECK: .param .b32 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_f32, -; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0]; -; CHECK: st.param.f32 [func_retval0+0], [[R]]; +; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0]; +; CHECK: st.param.f32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define float @test_f32(float %a) { %r = tail call float @test_f32(float %a); @@ -721,12 +721,12 @@ define float @test_f32(float %a) { ; CHECK-DAG: ld.param.u8 {{%rd[0-9]+}}, [test_i40_param_0+4]; ; CHECK-DAG: ld.param.u32 {{%rd[0-9]+}}, [test_i40_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i40, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i40 @test_i40(i40 %a) { %r = tail call i40 @test_i40(i40 %a); @@ -739,12 +739,12 @@ define i40 @test_i40(i40 %a) { ; CHECK-DAG: ld.param.u16 {{%rd[0-9]+}}, [test_i47_param_0+4]; ; CHECK-DAG: ld.param.u32 {{%rd[0-9]+}}, [test_i47_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i47, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i47 @test_i47(i47 %a) { %r = tail call i47 @test_i47(i47 %a); @@ -757,12 +757,12 @@ define i47 @test_i47(i47 %a) { ; CHECK-DAG: ld.param.u16 {{%rd[0-9]+}}, [test_i48_param_0+4]; ; CHECK-DAG: ld.param.u32 {{%rd[0-9]+}}, [test_i48_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i48, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i48 @test_i48(i48 %a) { %r = tail call i48 @test_i48(i48 %a); @@ -776,12 +776,12 @@ define i48 @test_i48(i48 %a) { ; CHECK-DAG: ld.param.u16 {{%rd[0-9]+}}, [test_i51_param_0+4]; ; CHECK-DAG: ld.param.u32 {{%rd[0-9]+}}, [test_i51_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i51, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i51 @test_i51(i51 %a) { %r = tail call i51 @test_i51(i51 %a); @@ -795,12 +795,12 @@ define i51 @test_i51(i51 %a) { ; CHECK-DAG: ld.param.u16 {{%rd[0-9]+}}, [test_i56_param_0+4]; ; CHECK-DAG: ld.param.u32 {{%rd[0-9]+}}, [test_i56_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i56, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i56 @test_i56(i56 %a) { %r = tail call i56 @test_i56(i56 %a); @@ -812,12 +812,12 @@ define i56 @test_i56(i56 %a) { ; CHECK-NEXT: .param .b64 test_i57_param_0 ; CHECK: ld.param.u64 {{%rd[0-9]+}}, [test_i57_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; +; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i57, -; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], {{%rd[0-9]+}}; +; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; +; CHECK: st.param.b64 [func_retval0], {{%rd[0-9]+}}; ; CHECK-NEXT: ret; define i57 @test_i57(i57 %a) { %r = tail call i57 @test_i57(i57 %a); @@ -829,12 +829,12 @@ define i57 @test_i57(i57 %a) { ; CHECK-NEXT: .param .b64 test_i64_param_0 ; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_i64_param_0]; ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], [[E]]; +; CHECK: st.param.b64 [param0], [[E]]; ; CHECK: .param .b64 retval0; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_i64, -; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], [[R]]; +; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0]; +; CHECK: st.param.b64 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define i64 @test_i64(i64 %a) { %r = tail call i64 @test_i64(i64 %a); @@ -847,16 +847,16 @@ define i64 @test_i64(i64 %a) { ; CHECK-DAG: ld.param.u64 [[E2:%rd[0-9]+]], [test_v3i64_param_0+16]; ; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v3i64_param_0]; ; CHECK: .param .align 32 .b8 param0[32]; -; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b64 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.b64 [param0+16], [[E2]]; ; CHECK: .param .align 32 .b8 retval0[32]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i64, -; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0]; ; CHECK: ld.param.b64 [[RE2:%rd[0-9]+]], [retval0+16]; -; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]]; -; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE2]]; ; CHECK-NEXT: ret; define <3 x i64> @test_v3i64(<3 x i64> %a) { @@ -871,15 +871,15 @@ define <3 x i64> @test_v3i64(<3 x i64> %a) { ; CHECK-DAG: ld.param.v2.u64 {[[E2:%rd[0-9]+]], [[E3:%rd[0-9]+]]}, [test_v4i64_param_0+16]; ; CHECK-DAG: ld.param.v2.u64 {[[E0:%rd[0-9]+]], [[E1:%rd[0-9]+]]}, [test_v4i64_param_0]; ; CHECK: .param .align 32 .b8 param0[32]; -; CHECK: st.param.v2.b64 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b64 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.v2.b64 [param0+16], {[[E2]], [[E3]]}; ; CHECK: .param .align 32 .b8 retval0[32]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v4i64, -; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b64 {[[RE0:%rd[0-9]+]], [[RE1:%rd[0-9]+]]}, [retval0]; ; CHECK: ld.param.v2.b64 {[[RE2:%rd[0-9]+]], [[RE3:%rd[0-9]+]]}, [retval0+16]; ; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[RE2]], [[RE3]]}; -; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-NEXT: ret; define <4 x i64> @test_v4i64(<4 x i64> %a) { %r = tail call <4 x i64> @test_v4i64(<4 x i64> %a); @@ -893,12 +893,12 @@ define <4 x i64> @test_v4i64(<4 x i64> %a) { ; CHECK-NEXT: .align 1 .b8 test_s_i1_param_0[1] ; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i1_param_0]; ; CHECK: .param .align 1 .b8 param0[1]; -; CHECK: st.param.b8 [param0+0], [[A]] +; CHECK: st.param.b8 [param0], [[A]] ; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni ; CHECK-NEXT: test_s_i1, -; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b8 [func_retval0+0], [[R]]; +; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b8 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_i1 @test_s_i1(%s_i1 %a) { %r = tail call %s_i1 @test_s_i1(%s_i1 %a); @@ -910,12 +910,12 @@ define %s_i1 @test_s_i1(%s_i1 %a) { ; CHECK-NEXT: .param .align 1 .b8 test_s_i8_param_0[1] ; CHECK: ld.param.u8 [[A:%rs[0-9]+]], [test_s_i8_param_0]; ; CHECK: .param .align 1 .b8 param0[1]; -; CHECK: st.param.b8 [param0+0], [[A]] +; CHECK: st.param.b8 [param0], [[A]] ; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni ; CHECK-NEXT: test_s_i8, -; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b8 [func_retval0+0], [[R]]; +; CHECK: ld.param.b8 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b8 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_i8 @test_s_i8(%s_i8 %a) { %r = tail call %s_i8 @test_s_i8(%s_i8 %a); @@ -927,12 +927,12 @@ define %s_i8 @test_s_i8(%s_i8 %a) { ; CHECK-NEXT: .param .align 2 .b8 test_s_i16_param_0[2] ; CHECK: ld.param.u16 [[A:%rs[0-9]+]], [test_s_i16_param_0]; ; CHECK: .param .align 2 .b8 param0[2]; -; CHECK: st.param.b16 [param0+0], [[A]] +; CHECK: st.param.b16 [param0], [[A]] ; CHECK: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni ; CHECK-NEXT: test_s_i16, -; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_i16 @test_s_i16(%s_i16 %a) { %r = tail call %s_i16 @test_s_i16(%s_i16 %a); @@ -944,12 +944,12 @@ define %s_i16 @test_s_i16(%s_i16 %a) { ; CHECK-NEXT: .param .align 2 .b8 test_s_f16_param_0[2] ; CHECK: ld.param.b16 [[A:%rs[0-9]+]], [test_s_f16_param_0]; ; CHECK: .param .align 2 .b8 param0[2]; -; CHECK: st.param.b16 [param0+0], [[A]] +; CHECK: st.param.b16 [param0], [[A]] ; CHECK: .param .align 2 .b8 retval0[2]; ; CHECK: call.uni ; CHECK-NEXT: test_s_f16, -; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0+0]; -; CHECK: st.param.b16 [func_retval0+0], [[R]]; +; CHECK: ld.param.b16 [[R:%rs[0-9]+]], [retval0]; +; CHECK: st.param.b16 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_f16 @test_s_f16(%s_f16 %a) { %r = tail call %s_f16 @test_s_f16(%s_f16 %a); @@ -961,12 +961,12 @@ define %s_f16 @test_s_f16(%s_f16 %a) { ; CHECK-NEXT: .param .align 4 .b8 test_s_i32_param_0[4] ; CHECK: ld.param.u32 [[E:%r[0-9]+]], [test_s_i32_param_0]; ; CHECK: .param .align 4 .b8 param0[4] -; CHECK: st.param.b32 [param0+0], [[E]]; +; CHECK: st.param.b32 [param0], [[E]]; ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_i32, -; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0+0]; -; CHECK: st.param.b32 [func_retval0+0], [[R]]; +; CHECK: ld.param.b32 [[R:%r[0-9]+]], [retval0]; +; CHECK: st.param.b32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_i32 @test_s_i32(%s_i32 %a) { %r = tail call %s_i32 @test_s_i32(%s_i32 %a); @@ -978,12 +978,12 @@ define %s_i32 @test_s_i32(%s_i32 %a) { ; CHECK-NEXT: .param .align 4 .b8 test_s_f32_param_0[4] ; CHECK: ld.param.f32 [[E:%f[0-9]+]], [test_s_f32_param_0]; ; CHECK: .param .align 4 .b8 param0[4] -; CHECK: st.param.f32 [param0+0], [[E]]; +; CHECK: st.param.f32 [param0], [[E]]; ; CHECK: .param .align 4 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_f32, -; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0+0]; -; CHECK: st.param.f32 [func_retval0+0], [[R]]; +; CHECK: ld.param.f32 [[R:%f[0-9]+]], [retval0]; +; CHECK: st.param.f32 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_f32 @test_s_f32(%s_f32 %a) { %r = tail call %s_f32 @test_s_f32(%s_f32 %a); @@ -995,12 +995,12 @@ define %s_f32 @test_s_f32(%s_f32 %a) { ; CHECK-NEXT: .param .align 8 .b8 test_s_i64_param_0[8] ; CHECK: ld.param.u64 [[E:%rd[0-9]+]], [test_s_i64_param_0]; ; CHECK: .param .align 8 .b8 param0[8]; -; CHECK: st.param.b64 [param0+0], [[E]]; +; CHECK: st.param.b64 [param0], [[E]]; ; CHECK: .param .align 8 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_i64, -; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0+0]; -; CHECK: st.param.b64 [func_retval0+0], [[R]]; +; CHECK: ld.param.b64 [[R:%rd[0-9]+]], [retval0]; +; CHECK: st.param.b64 [func_retval0], [[R]]; ; CHECK-NEXT: ret; define %s_i64 @test_s_i64(%s_i64 %a) { %r = tail call %s_i64 @test_s_i64(%s_i64 %a); @@ -1017,7 +1017,7 @@ define %s_i64 @test_s_i64(%s_i64 %a) { ; CHECK-DAG: ld.param.f32 [[E1:%f[0-9]+]], [test_s_i32f32_param_0+4]; ; CHECK-DAG: ld.param.u32 [[E0:%r[0-9]+]], [test_s_i32f32_param_0]; ; CHECK: .param .align 8 .b8 param0[24]; -; CHECK-DAG: st.param.b32 [param0+0], [[E0]]; +; CHECK-DAG: st.param.b32 [param0], [[E0]]; ; CHECK-DAG: st.param.f32 [param0+4], [[E1]]; ; CHECK-DAG: st.param.b32 [param0+8], [[E2]]; ; CHECK-DAG: st.param.f32 [param0+12], [[E3]]; @@ -1025,12 +1025,12 @@ define %s_i64 @test_s_i64(%s_i64 %a) { ; CHECK: .param .align 8 .b8 retval0[24]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_i32f32, -; CHECK-DAG: ld.param.b32 [[RE0:%r[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b32 [[RE0:%r[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.f32 [[RE1:%f[0-9]+]], [retval0+4]; ; CHECK-DAG: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8]; ; CHECK-DAG: ld.param.f32 [[RE3:%f[0-9]+]], [retval0+12]; ; CHECK-DAG: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16]; -; CHECK-DAG: st.param.b32 [func_retval0+0], [[RE0]]; +; CHECK-DAG: st.param.b32 [func_retval0], [[RE0]]; ; CHECK-DAG: st.param.f32 [func_retval0+4], [[RE1]]; ; CHECK-DAG: st.param.b32 [func_retval0+8], [[RE2]]; ; CHECK-DAG: st.param.f32 [func_retval0+12], [[RE3]]; @@ -1049,16 +1049,16 @@ define %s_i32f32 @test_s_i32f32(%s_i32f32 %a) { ; CHECK-DAG: ld.param.v2.u32 {[[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [test_s_i32x4_param_0+8]; ; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i32x4_param_0]; ; CHECK: .param .align 8 .b8 param0[24]; -; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.v2.b32 [param0+8], {[[E2]], [[E3]]}; ; CHECK: st.param.b64 [param0+16], [[E4]]; ; CHECK: .param .align 8 .b8 retval0[24]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_i32x4, -; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0]; ; CHECK: ld.param.v2.b32 {[[RE2:%r[0-9]+]], [[RE3:%r[0-9]+]]}, [retval0+8]; ; CHECK: ld.param.b64 [[RE4:%rd[0-9]+]], [retval0+16]; -; CHECK-DAG: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK-DAG: st.param.v2.b32 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK-DAG: st.param.v2.b32 [func_retval0+8], {[[RE2]], [[RE3]]}; ; CHECK-DAG: st.param.b64 [func_retval0+16], [[RE4]]; ; CHECK: ret; @@ -1077,7 +1077,7 @@ define %s_i32x4 @test_s_i32x4(%s_i32x4 %a) { ; CHECK: ld.param.u8 [[E2:%rs[0-9]+]], [test_s_i1i32x4_param_0+8]; ; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_i1i32x4_param_0]; ; CHECK: .param .align 8 .b8 param0[32]; -; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.b8 [param0+8], [[E2]]; ; CHECK: st.param.b32 [param0+12], [[E3]]; ; CHECK: st.param.b32 [param0+16], [[E4]]; @@ -1088,12 +1088,12 @@ define %s_i32x4 @test_s_i32x4(%s_i32x4 %a) { ; CHECK: ( ; CHECK: param0 ; CHECK: ); -; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0]; ; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+8]; ; CHECK: ld.param.b32 [[RE3:%r[0-9]+]], [retval0+12]; ; CHECK: ld.param.b32 [[RE4:%r[0-9]+]], [retval0+16]; ; CHECK: ld.param.b64 [[RE5:%rd[0-9]+]], [retval0+24]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK: st.param.v2.b32 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK: st.param.b8 [func_retval0+8], [[RE2]]; ; CHECK: st.param.b32 [func_retval0+12], [[RE3]]; ; CHECK: st.param.b32 [func_retval0+16], [[RE4]]; @@ -1136,7 +1136,7 @@ define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) { ; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+1]; ; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0]; ; CHECK: .param .align 1 .b8 param0[25]; -; CHECK-DAG: st.param.b8 [param0+0], +; CHECK-DAG: st.param.b8 [param0], ; CHECK-DAG: st.param.b8 [param0+1], ; CHECK-DAG: st.param.b8 [param0+2], ; CHECK-DAG: st.param.b8 [param0+3], @@ -1164,7 +1164,7 @@ define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) { ; CHECK: .param .align 1 .b8 retval0[25]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_s_i1i32x4p, -; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+0]; +; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0]; ; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+1]; ; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+2]; ; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+3]; @@ -1190,7 +1190,7 @@ define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) { ; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+23]; ; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+24]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b8 [func_retval0+0], +; CHECK-DAG: st.param.b8 [func_retval0], ; CHECK-DAG: st.param.b8 [func_retval0+1], ; CHECK-DAG: st.param.b8 [func_retval0+2], ; CHECK-DAG: st.param.b8 [func_retval0+3], @@ -1232,7 +1232,7 @@ define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) { ; CHECK: ld.param.u32 [[E2:%r[0-9]+]], [test_s_crossfield_param_0+8]; ; CHECK: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_s_crossfield_param_0]; ; CHECK: .param .align 16 .b8 param0[80]; -; CHECK: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK: st.param.b32 [param0+8], [[E2]]; ; CHECK: st.param.v4.b32 [param0+16], {[[E3]], [[E4]], [[E5]], [[E6]]}; ; CHECK: st.param.v4.b32 [param0+32], {[[E7]], [[E8]], [[E9]], [[E10]]}; @@ -1241,13 +1241,13 @@ define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) { ; CHECK: .param .align 16 .b8 retval0[80]; ; CHECK: call.uni (retval0), ; CHECK: test_s_crossfield, -; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0]; +; CHECK: ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0]; ; CHECK: ld.param.b32 [[RE2:%r[0-9]+]], [retval0+8]; ; CHECK: ld.param.v4.b32 {[[RE3:%r[0-9]+]], [[RE4:%r[0-9]+]], [[RE5:%r[0-9]+]], [[RE6:%r[0-9]+]]}, [retval0+16]; ; CHECK: ld.param.v4.b32 {[[RE7:%r[0-9]+]], [[RE8:%r[0-9]+]], [[RE9:%r[0-9]+]], [[RE10:%r[0-9]+]]}, [retval0+32]; ; CHECK: ld.param.v4.b32 {[[RE11:%r[0-9]+]], [[RE12:%r[0-9]+]], [[RE13:%r[0-9]+]], [[RE14:%r[0-9]+]]}, [retval0+48]; ; CHECK: ld.param.b32 [[RE15:%r[0-9]+]], [retval0+64]; -; CHECK: st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]}; +; CHECK: st.param.v2.b32 [func_retval0], {[[RE0]], [[RE1]]}; ; CHECK: st.param.b32 [func_retval0+8], [[RE2]]; ; CHECK: st.param.v4.b32 [func_retval0+16], {[[RE3]], [[RE4]], [[RE5]], [[RE6]]}; ; CHECK: st.param.v4.b32 [func_retval0+32], {[[RE7]], [[RE8]], [[RE9]], [[RE10]]}; diff --git a/llvm/test/CodeGen/NVPTX/param-overalign.ll b/llvm/test/CodeGen/NVPTX/param-overalign.ll index 5c09bb8e1a5d72..8c506fb0f75abe 100644 --- a/llvm/test/CodeGen/NVPTX/param-overalign.ll +++ b/llvm/test/CodeGen/NVPTX/param-overalign.ll @@ -28,16 +28,16 @@ define float @caller_md(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f2, [caller_md_param_1]; ; CHECK-NEXT: { ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.f32 [param0+0], {%f1, %f2}; +; CHECK-NEXT: st.param.v2.f32 [param0], {%f1, %f2}; ; CHECK-NEXT: .param .b32 retval0; ; CHECK-NEXT: call.uni (retval0), ; CHECK-NEXT: callee_md, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.f32 %f3, [retval0+0]; +; CHECK-NEXT: ld.param.f32 %f3, [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %s1 = insertvalue %struct.float2 poison, float %a, 0 %s2 = insertvalue %struct.float2 %s1, float %b, 1 @@ -53,7 +53,7 @@ define float @callee_md(%struct.float2 %a) { ; CHECK: ld.param.v2.f32 {%f1, %f2}, [callee_md_param_0]; ; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %v0 = extractvalue %struct.float2 %a, 0 %v1 = extractvalue %struct.float2 %a, 1 @@ -72,16 +72,16 @@ define float @caller(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f2, [caller_param_1]; ; CHECK-NEXT: { ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.f32 [param0+0], {%f1, %f2}; +; CHECK-NEXT: st.param.v2.f32 [param0], {%f1, %f2}; ; CHECK-NEXT: .param .b32 retval0; ; CHECK-NEXT: call.uni (retval0), ; CHECK-NEXT: callee, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.f32 %f3, [retval0+0]; +; CHECK-NEXT: ld.param.f32 %f3, [retval0]; ; CHECK-NEXT: } -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %s1 = insertvalue %struct.float2 poison, float %a, 0 %s2 = insertvalue %struct.float2 %s1, float %b, 1 @@ -97,7 +97,7 @@ define float @callee(%struct.float2 alignstack(8) %a ) { ; CHECK: ld.param.v2.f32 {%f1, %f2}, [callee_param_0]; ; CHECK-NEXT: add.rn.f32 %f3, %f1, %f2; -; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: st.param.f32 [func_retval0], %f3; ; CHECK-NEXT: ret; %v0 = extractvalue %struct.float2 %a, 0 %v1 = extractvalue %struct.float2 %a, 1 diff --git a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll index 55fadf10f8d6d3..db8b1a6f53d13c 100644 --- a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll +++ b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll @@ -84,14 +84,14 @@ define dso_local void @caller_St4x1(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x1_param_1 ; CHECK: ) ; CHECK: .param .b32 param0; - ; CHECK: st.param.b32 [param0+0], {{%r[0-9]+}}; + ; CHECK: st.param.b32 [param0], {{%r[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[4]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: callee_St4x1, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+0]; + ; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0]; %1 = load i32, ptr %in, align 4 %call = tail call fastcc [1 x i32] @callee_St4x1(i32 %1) #2 %.fca.0.extract = extractvalue [1 x i32] %call, 0 @@ -104,7 +104,7 @@ define internal fastcc [1 x i32] @callee_St4x1(i32 %in.0.val) { ; CHECK-LABEL: callee_St4x1( ; CHECK-NEXT: .param .b32 callee_St4x1_param_0 ; CHECK: ld.param.u32 [[R1:%r[0-9]+]], [callee_St4x1_param_0]; - ; CHECK: st.param.b32 [func_retval0+0], [[R1]]; + ; CHECK: st.param.b32 [func_retval0], [[R1]]; ; CHECK-NEXT: ret; %oldret = insertvalue [1 x i32] poison, i32 %in.0.val, 0 ret [1 x i32] %oldret @@ -116,14 +116,14 @@ define dso_local void @caller_St4x2(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x2_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[8]; - ; CHECK: st.param.v2.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v2.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: callee_St4x2, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; %agg.tmp = alloca %struct.St4x2, align 8 %1 = load i64, ptr %in, align 4 store i64 %1, ptr %agg.tmp, align 8 @@ -141,7 +141,7 @@ define internal fastcc [2 x i32] @callee_St4x2(ptr nocapture noundef readonly by ; CHECK-LABEL: callee_St4x2( ; CHECK-NEXT: .param .align 16 .b8 callee_St4x2_param_0[8] ; CHECK: ld.param.v2.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]]}, [callee_St4x2_param_0]; - ; CHECK: st.param.v2.b32 [func_retval0+0], {[[R1]], [[R2]]}; + ; CHECK: st.param.v2.b32 [func_retval0], {[[R1]], [[R2]]}; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 %arrayidx.1 = getelementptr inbounds [2 x i32], ptr %in, i64 0, i64 1 @@ -157,7 +157,7 @@ define dso_local void @caller_St4x3(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x3_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[12]; - ; CHECK: st.param.v2.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v2.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.b32 [param0+8], {{%r[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[12]; ; CHECK: call.uni (retval0), @@ -165,7 +165,7 @@ define dso_local void @caller_St4x3(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; ; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+8]; %call = tail call fastcc [3 x i32] @callee_St4x3(ptr noundef nonnull byval(%struct.St4x3) align 4 %in) #2 %.fca.0.extract = extractvalue [3 x i32] %call, 0 @@ -185,7 +185,7 @@ define internal fastcc [3 x i32] @callee_St4x3(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St4x3_param_0[12] ; CHECK: ld.param.v2.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]]}, [callee_St4x3_param_0]; ; CHECK: ld.param.u32 [[R3:%r[0-9]+]], [callee_St4x3_param_0+8]; - ; CHECK: st.param.v2.b32 [func_retval0+0], {[[R1]], [[R2]]}; + ; CHECK: st.param.v2.b32 [func_retval0], {[[R1]], [[R2]]}; ; CHECK: st.param.b32 [func_retval0+8], [[R3]]; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 @@ -205,14 +205,14 @@ define dso_local void @caller_St4x4(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x4_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[16]; - ; CHECK: st.param.v4.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v4.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: callee_St4x4, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; %call = tail call fastcc [4 x i32] @callee_St4x4(ptr noundef nonnull byval(%struct.St4x4) align 4 %in) #2 %.fca.0.extract = extractvalue [4 x i32] %call, 0 %.fca.1.extract = extractvalue [4 x i32] %call, 1 @@ -233,7 +233,7 @@ define internal fastcc [4 x i32] @callee_St4x4(ptr nocapture noundef readonly by ; CHECK-LABEL: callee_St4x4( ; CHECK-NEXT: .param .align 16 .b8 callee_St4x4_param_0[16] ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x4_param_0]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 %arrayidx.1 = getelementptr inbounds [4 x i32], ptr %in, i64 0, i64 1 @@ -255,7 +255,7 @@ define dso_local void @caller_St4x5(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x5_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[20]; - ; CHECK: st.param.v4.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v4.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.b32 [param0+16], {{%r[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[20]; ; CHECK: call.uni (retval0), @@ -263,7 +263,7 @@ define dso_local void @caller_St4x5(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; ; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+16]; %call = tail call fastcc [5 x i32] @callee_St4x5(ptr noundef nonnull byval(%struct.St4x5) align 4 %in) #2 %.fca.0.extract = extractvalue [5 x i32] %call, 0 @@ -289,7 +289,7 @@ define internal fastcc [5 x i32] @callee_St4x5(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St4x5_param_0[20] ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x5_param_0]; ; CHECK: ld.param.u32 [[R5:%r[0-9]+]], [callee_St4x5_param_0+16]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK: st.param.b32 [func_retval0+16], [[R5]]; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 @@ -315,7 +315,7 @@ define dso_local void @caller_St4x6(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x6_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[24]; - ; CHECK: st.param.v4.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v4.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.v2.b32 [param0+16], {{{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[24]; ; CHECK: call.uni (retval0), @@ -323,7 +323,7 @@ define dso_local void @caller_St4x6(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+16]; %call = tail call fastcc [6 x i32] @callee_St4x6(ptr noundef nonnull byval(%struct.St4x6) align 4 %in) #2 %.fca.0.extract = extractvalue [6 x i32] %call, 0 @@ -352,7 +352,7 @@ define internal fastcc [6 x i32] @callee_St4x6(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St4x6_param_0[24] ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x6_param_0]; ; CHECK: ld.param.v2.u32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]]}, [callee_St4x6_param_0+16]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK: st.param.v2.b32 [func_retval0+16], {[[R5]], [[R6]]}; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 @@ -381,7 +381,7 @@ define dso_local void @caller_St4x7(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x7_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[28]; - ; CHECK: st.param.v4.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v4.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.v2.b32 [param0+16], {{{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.b32 [param0+24], {{%r[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[28]; @@ -390,7 +390,7 @@ define dso_local void @caller_St4x7(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; ; CHECK: ld.param.v2.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+16]; ; CHECK: ld.param.b32 {{%r[0-9]+}}, [retval0+24]; %call = tail call fastcc [7 x i32] @callee_St4x7(ptr noundef nonnull byval(%struct.St4x7) align 4 %in) #2 @@ -424,7 +424,7 @@ define internal fastcc [7 x i32] @callee_St4x7(ptr nocapture noundef readonly by ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x7_param_0]; ; CHECK: ld.param.v2.u32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]]}, [callee_St4x7_param_0+16]; ; CHECK: ld.param.u32 [[R7:%r[0-9]+]], [callee_St4x7_param_0+24]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK: st.param.v2.b32 [func_retval0+16], {[[R5]], [[R6]]}; ; CHECK: st.param.b32 [func_retval0+24], [[R7]]; ; CHECK-NEXT: ret; @@ -457,7 +457,7 @@ define dso_local void @caller_St4x8(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St4x8_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[32]; - ; CHECK: st.param.v4.b32 [param0+0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; + ; CHECK: st.param.v4.b32 [param0], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: st.param.v4.b32 [param0+16], {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[32]; ; CHECK: call.uni (retval0), @@ -465,7 +465,7 @@ define dso_local void @caller_St4x8(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0]; ; CHECK: ld.param.v4.b32 {{{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}, {{%r[0-9]+}}}, [retval0+16]; %call = tail call fastcc [8 x i32] @callee_St4x8(ptr noundef nonnull byval(%struct.St4x8) align 4 %in) #2 %.fca.0.extract = extractvalue [8 x i32] %call, 0 @@ -500,7 +500,7 @@ define internal fastcc [8 x i32] @callee_St4x8(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St4x8_param_0[32] ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x8_param_0]; ; CHECK: ld.param.v4.u32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]], [[R7:%r[0-9]+]], [[R8:%r[0-9]+]]}, [callee_St4x8_param_0+16]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK: st.param.v4.b32 [func_retval0+16], {[[R5]], [[R6]], [[R7]], [[R8]]}; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 @@ -535,14 +535,14 @@ define dso_local void @caller_St8x1(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St8x1_param_1 ; CHECK: ) ; CHECK: .param .b64 param0; - ; CHECK: st.param.b64 [param0+0], {{%rd[0-9]+}}; + ; CHECK: st.param.b64 [param0], {{%rd[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[8]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: callee_St8x1, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+0]; + ; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0]; %1 = load i64, ptr %in, align 8 %call = tail call fastcc [1 x i64] @callee_St8x1(i64 %1) #2 %.fca.0.extract = extractvalue [1 x i64] %call, 0 @@ -555,7 +555,7 @@ define internal fastcc [1 x i64] @callee_St8x1(i64 %in.0.val) { ; CHECK-LABEL: callee_St8x1( ; CHECK-NEXT: .param .b64 callee_St8x1_param_0 ; CHECK: ld.param.u64 [[RD1:%rd[0-9]+]], [callee_St8x1_param_0]; - ; CHECK: st.param.b64 [func_retval0+0], [[RD1]]; + ; CHECK: st.param.b64 [func_retval0], [[RD1]]; ; CHECK-NEXT: ret; %oldret = insertvalue [1 x i64] poison, i64 %in.0.val, 0 ret [1 x i64] %oldret @@ -567,14 +567,14 @@ define dso_local void @caller_St8x2(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St8x2_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[16]; - ; CHECK: st.param.v2.b64 [param0+0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; + ; CHECK: st.param.v2.b64 [param0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[16]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: callee_St8x2, ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0]; %call = tail call fastcc [2 x i64] @callee_St8x2(ptr noundef nonnull byval(%struct.St8x2) align 8 %in) #2 %.fca.0.extract = extractvalue [2 x i64] %call, 0 %.fca.1.extract = extractvalue [2 x i64] %call, 1 @@ -589,7 +589,7 @@ define internal fastcc [2 x i64] @callee_St8x2(ptr nocapture noundef readonly by ; CHECK-LABEL: callee_St8x2( ; CHECK-NEXT: .param .align 16 .b8 callee_St8x2_param_0[16] ; CHECK: ld.param.v2.u64 {[[RD1:%rd[0-9]+]], [[RD2:%rd[0-9]+]]}, [callee_St8x2_param_0]; - ; CHECK: st.param.v2.b64 [func_retval0+0], {[[RD1]], [[RD2]]}; + ; CHECK: st.param.v2.b64 [func_retval0], {[[RD1]], [[RD2]]}; ; CHECK-NEXT: ret; %1 = load i64, ptr %in, align 8 %arrayidx.1 = getelementptr inbounds [2 x i64], ptr %in, i64 0, i64 1 @@ -605,7 +605,7 @@ define dso_local void @caller_St8x3(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St8x3_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[24]; - ; CHECK: st.param.v2.b64 [param0+0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; + ; CHECK: st.param.v2.b64 [param0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; ; CHECK: st.param.b64 [param0+16], {{%rd[0-9]+}}; ; CHECK: .param .align 16 .b8 retval0[24]; ; CHECK: call.uni (retval0), @@ -613,7 +613,7 @@ define dso_local void @caller_St8x3(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0]; ; CHECK: ld.param.b64 {{%rd[0-9]+}}, [retval0+16]; %call = tail call fastcc [3 x i64] @callee_St8x3(ptr noundef nonnull byval(%struct.St8x3) align 8 %in) #2 %.fca.0.extract = extractvalue [3 x i64] %call, 0 @@ -633,7 +633,7 @@ define internal fastcc [3 x i64] @callee_St8x3(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St8x3_param_0[24] ; CHECK: ld.param.v2.u64 {[[RD1:%rd[0-9]+]], [[RD2:%rd[0-9]+]]}, [callee_St8x3_param_0]; ; CHECK: ld.param.u64 [[RD3:%rd[0-9]+]], [callee_St8x3_param_0+16]; - ; CHECK: st.param.v2.b64 [func_retval0+0], {[[RD1]], [[RD2]]}; + ; CHECK: st.param.v2.b64 [func_retval0], {[[RD1]], [[RD2]]}; ; CHECK: st.param.b64 [func_retval0+16], [[RD3]]; ; CHECK-NEXT: ret; %1 = load i64, ptr %in, align 8 @@ -653,7 +653,7 @@ define dso_local void @caller_St8x4(ptr nocapture noundef readonly byval(%struct ; CHECK: .param .b64 caller_St8x4_param_1 ; CHECK: ) ; CHECK: .param .align 16 .b8 param0[32]; - ; CHECK: st.param.v2.b64 [param0+0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; + ; CHECK: st.param.v2.b64 [param0], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; ; CHECK: st.param.v2.b64 [param0+16], {{{%rd[0-9]+}}, {{%rd[0-9]+}}}; ; CHECK: .param .align 16 .b8 retval0[32]; ; CHECK: call.uni (retval0), @@ -661,7 +661,7 @@ define dso_local void @caller_St8x4(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); - ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0+0]; + ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0]; ; CHECK: ld.param.v2.b64 {{{%rd[0-9]+}}, {{%rd[0-9]+}}}, [retval0+16]; %call = tail call fastcc [4 x i64] @callee_St8x4(ptr noundef nonnull byval(%struct.St8x4) align 8 %in) #2 %.fca.0.extract = extractvalue [4 x i64] %call, 0 @@ -684,7 +684,7 @@ define internal fastcc [4 x i64] @callee_St8x4(ptr nocapture noundef readonly by ; CHECK-NEXT: .param .align 16 .b8 callee_St8x4_param_0[32] ; CHECK: ld.param.v2.u64 {[[RD1:%rd[0-9]+]], [[RD2:%rd[0-9]+]]}, [callee_St8x4_param_0]; ; CHECK: ld.param.v2.u64 {[[RD3:%rd[0-9]+]], [[RD4:%rd[0-9]+]]}, [callee_St8x4_param_0+16]; - ; CHECK: st.param.v2.b64 [func_retval0+0], {[[RD1]], [[RD2]]}; + ; CHECK: st.param.v2.b64 [func_retval0], {[[RD1]], [[RD2]]}; ; CHECK: st.param.v2.b64 [func_retval0+16], {[[RD3]], [[RD4]]}; ; CHECK-NEXT: ret; %1 = load i64, ptr %in, align 8 @@ -708,7 +708,7 @@ define private fastcc [4 x i32] @callee_St4x4_private(ptr nocapture noundef read ; CHECK-LABEL: callee_St4x4_private( ; CHECK-NEXT: .param .align 16 .b8 callee_St4x4_private_param_0[16] ; CHECK: ld.param.v4.u32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x4_private_param_0]; - ; CHECK: st.param.v4.b32 [func_retval0+0], {[[R1]], [[R2]], [[R3]], [[R4]]}; + ; CHECK: st.param.v4.b32 [func_retval0], {[[R1]], [[R2]], [[R3]], [[R4]]}; ; CHECK-NEXT: ret; %1 = load i32, ptr %in, align 4 %arrayidx.1 = getelementptr inbounds [4 x i32], ptr %in, i64 0, i64 1 @@ -735,7 +735,7 @@ define external fastcc [4 x i32] @callee_St4x4_external(ptr nocapture noundef re ; CHECK: ld.param.u32 [[R2:%r[0-9]+]], [callee_St4x4_external_param_0+4]; ; CHECK: ld.param.u32 [[R3:%r[0-9]+]], [callee_St4x4_external_param_0+8]; ; CHECK: ld.param.u32 [[R4:%r[0-9]+]], [callee_St4x4_external_param_0+12]; - ; CHECK: st.param.b32 [func_retval0+0], [[R1]]; + ; CHECK: st.param.b32 [func_retval0], [[R1]]; ; CHECK: st.param.b32 [func_retval0+4], [[R2]]; ; CHECK: st.param.b32 [func_retval0+8], [[R3]]; ; CHECK: st.param.b32 [func_retval0+12], [[R4]]; diff --git a/llvm/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll b/llvm/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll index fa138f3d0936e9..4c9a2ee80c251f 100644 --- a/llvm/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll +++ b/llvm/test/CodeGen/NVPTX/proxy-reg-erasure-ptx.ll @@ -12,14 +12,14 @@ declare i1 @callee_i1() define i1 @check_i1() { ; PTX-LABEL: check_i1 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]]; ; PTX-WITHOUT-DAG: and.b32 [[RES:%r[0-9]+]], [[PROXY]], 1; ; PTX-WITH-DAG: and.b32 [[RES:%r[0-9]+]], [[LD]], 1; - ; PTX-DAG: st.param.b32 [func_retval0+0], [[RES]]; + ; PTX-DAG: st.param.b32 [func_retval0], [[RES]]; %ret = call i1 @callee_i1() ret i1 %ret @@ -29,14 +29,14 @@ declare i16 @callee_i16() define i16 @check_i16() { ; PTX-LABEL: check_i16 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]]; ; PTX-WITHOUT-DAG: and.b32 [[RES:%r[0-9]+]], [[PROXY]], 65535; ; PTX-WITH-DAG: and.b32 [[RES:%r[0-9]+]], [[LD]], 65535; - ; PTX-DAG: st.param.b32 [func_retval0+0], [[RES]]; + ; PTX-DAG: st.param.b32 [func_retval0], [[RES]]; %ret = call i16 @callee_i16() ret i16 %ret @@ -46,12 +46,12 @@ declare i32 @callee_i32() define i32 @check_i32() { ; PTX-LABEL: check_i32 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.b32 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.b32 [func_retval0], [[LD]]; %ret = call i32 @callee_i32() ret i32 %ret @@ -61,12 +61,12 @@ declare i64 @callee_i64() define i64 @check_i64() { ; PTX-LABEL: check_i64 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b64 [[LD:%rd[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b64 [[LD:%rd[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b64 [[PROXY:%rd[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.b64 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.b64 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.b64 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.b64 [func_retval0], [[LD]]; %ret = call i64 @callee_i64() ret i64 %ret @@ -76,13 +76,13 @@ declare i128 @callee_i128() define i128 @check_i128() { ; PTX-LABEL: check_i128 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.v2.b64 {[[LD0:%rd[0-9]+]], [[LD1:%rd[0-9]+]]}, [retval0+0]; + ; PTX-DAG: ld.param.v2.b64 {[[LD0:%rd[0-9]+]], [[LD1:%rd[0-9]+]]}, [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b64 [[PROXY0:%rd[0-9]+]], [[LD0]]; ; PTX-WITHOUT-DAG: mov.b64 [[PROXY1:%rd[0-9]+]], [[LD1]]; - ; PTX-WITHOUT-DAG: st.param.v2.b64 [func_retval0+0], {[[PROXY0]], [[PROXY1]]}; - ; PTX-WITH-DAG: st.param.v2.b64 [func_retval0+0], {[[LD0]], [[LD1]]}; + ; PTX-WITHOUT-DAG: st.param.v2.b64 [func_retval0], {[[PROXY0]], [[PROXY1]]}; + ; PTX-WITH-DAG: st.param.v2.b64 [func_retval0], {[[LD0]], [[LD1]]}; %ret = call i128 @callee_i128() ret i128 %ret @@ -92,12 +92,12 @@ declare half @callee_f16() define half @check_f16() { ; PTX-LABEL: check_f16 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b16 [[LD:%rs[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b16 [[LD:%rs[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b16 [[PROXY:%rs[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.b16 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.b16 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.b16 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.b16 [func_retval0], [[LD]]; %ret = call half @callee_f16() ret half %ret @@ -107,12 +107,12 @@ declare float @callee_f32() define float @check_f32() { ; PTX-LABEL: check_f32 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.f32 [[LD:%f[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.f32 [[LD:%f[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.f32 [[PROXY:%f[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.f32 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.f32 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.f32 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.f32 [func_retval0], [[LD]]; %ret = call float @callee_f32() ret float %ret @@ -122,12 +122,12 @@ declare double @callee_f64() define double @check_f64() { ; PTX-LABEL: check_f64 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.f64 [[LD:%fd[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.f64 [[LD:%fd[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.f64 [[PROXY:%fd[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.f64 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.f64 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.f64 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.f64 [func_retval0], [[LD]]; %ret = call double @callee_f64() ret double %ret @@ -137,15 +137,15 @@ declare <4 x i32> @callee_vec_i32() define <4 x i32> @check_vec_i32() { ; PTX-LABEL: check_vec_i32 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.v4.b32 {[[LD0:%r[0-9]+]], [[LD1:%r[0-9]+]], [[LD2:%r[0-9]+]], [[LD3:%r[0-9]+]]}, [retval0+0]; + ; PTX-DAG: ld.param.v4.b32 {[[LD0:%r[0-9]+]], [[LD1:%r[0-9]+]], [[LD2:%r[0-9]+]], [[LD3:%r[0-9]+]]}, [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b32 [[PROXY0:%r[0-9]+]], [[LD0]]; ; PTX-WITHOUT-DAG: mov.b32 [[PROXY1:%r[0-9]+]], [[LD1]]; ; PTX-WITHOUT-DAG: mov.b32 [[PROXY2:%r[0-9]+]], [[LD2]]; ; PTX-WITHOUT-DAG: mov.b32 [[PROXY3:%r[0-9]+]], [[LD3]]; - ; PTX-WITHOUT-DAG: st.param.v4.b32 [func_retval0+0], {[[PROXY0]], [[PROXY1]], [[PROXY2]], [[PROXY3]]}; - ; PTX-WITH-DAG: st.param.v4.b32 [func_retval0+0], {[[LD0]], [[LD1]], [[LD2]], [[LD3]]}; + ; PTX-WITHOUT-DAG: st.param.v4.b32 [func_retval0], {[[PROXY0]], [[PROXY1]], [[PROXY2]], [[PROXY3]]}; + ; PTX-WITH-DAG: st.param.v4.b32 [func_retval0], {[[LD0]], [[LD1]], [[LD2]], [[LD3]]}; %ret = call <4 x i32> @callee_vec_i32() ret <4 x i32> %ret @@ -155,12 +155,12 @@ declare <2 x half> @callee_vec_f16() define <2 x half> @check_vec_f16() { ; PTX-LABEL: check_vec_f16 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0+0]; + ; PTX-DAG: ld.param.b32 [[LD:%r[0-9]+]], [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.b32 [[PROXY:%r[0-9]+]], [[LD]]; - ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0+0], [[PROXY]]; - ; PTX-WITH-DAG: st.param.b32 [func_retval0+0], [[LD]]; + ; PTX-WITHOUT-DAG: st.param.b32 [func_retval0], [[PROXY]]; + ; PTX-WITH-DAG: st.param.b32 [func_retval0], [[LD]]; %ret = call <2 x half> @callee_vec_f16() ret <2 x half> %ret @@ -170,13 +170,13 @@ declare <2 x double> @callee_vec_f64() define <2 x double> @check_vec_f64() { ; PTX-LABEL: check_vec_f64 ; PTX-DAG: { // callseq {{[0-9]+}}, {{[0-9]+}} - ; PTX-DAG: ld.param.v2.f64 {[[LD0:%fd[0-9]+]], [[LD1:%fd[0-9]+]]}, [retval0+0]; + ; PTX-DAG: ld.param.v2.f64 {[[LD0:%fd[0-9]+]], [[LD1:%fd[0-9]+]]}, [retval0]; ; PTX-DAG: } // callseq {{[0-9]+}} ; PTX-WITHOUT-DAG: mov.f64 [[PROXY0:%fd[0-9]+]], [[LD0]]; ; PTX-WITHOUT-DAG: mov.f64 [[PROXY1:%fd[0-9]+]], [[LD1]]; - ; PTX-WITHOUT-DAG: st.param.v2.f64 [func_retval0+0], {[[PROXY0]], [[PROXY1]]}; - ; PTX-WITH-DAG: st.param.v2.f64 [func_retval0+0], {[[LD0]], [[LD1]]}; + ; PTX-WITHOUT-DAG: st.param.v2.f64 [func_retval0], {[[PROXY0]], [[PROXY1]]}; + ; PTX-WITH-DAG: st.param.v2.f64 [func_retval0], {[[LD0]], [[LD1]]}; %ret = call <2 x double> @callee_vec_f64() ret <2 x double> %ret diff --git a/llvm/test/CodeGen/NVPTX/rcp-opt.ll b/llvm/test/CodeGen/NVPTX/rcp-opt.ll index e2443c27e8490a..ccc3db54009785 100644 --- a/llvm/test/CodeGen/NVPTX/rcp-opt.ll +++ b/llvm/test/CodeGen/NVPTX/rcp-opt.ll @@ -15,7 +15,7 @@ define double @test1(double %in) { ; CHECK-NEXT: ld.param.f64 %fd1, [test1_param_0]; ; CHECK-NEXT: rcp.rn.f64 %fd2, %fd1; ; CHECK-NEXT: neg.f64 %fd3, %fd2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %div = fdiv double 1.000000e+00, %in %neg = fsub double -0.000000e+00, %div @@ -33,7 +33,7 @@ define double @test2(double %in) { ; CHECK-NEXT: ld.param.f64 %fd1, [test2_param_0]; ; CHECK-NEXT: rcp.rn.f64 %fd2, %fd1; ; CHECK-NEXT: neg.f64 %fd3, %fd2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %div = fdiv double -1.000000e+00, %in ret double %div @@ -50,7 +50,7 @@ define double @test3(double %in) { ; CHECK-NEXT: ld.param.f64 %fd1, [test3_param_0]; ; CHECK-NEXT: rcp.rn.f64 %fd2, %fd1; ; CHECK-NEXT: neg.f64 %fd3, %fd2; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: st.param.f64 [func_retval0], %fd3; ; CHECK-NEXT: ret; %neg = fsub double -0.000000e+00, %in %div = fdiv double 1.000000e+00, %neg diff --git a/llvm/test/CodeGen/NVPTX/rotate.ll b/llvm/test/CodeGen/NVPTX/rotate.ll index 6586393f83d440..4174fd2f3ec2cc 100644 --- a/llvm/test/CodeGen/NVPTX/rotate.ll +++ b/llvm/test/CodeGen/NVPTX/rotate.ll @@ -31,7 +31,7 @@ define i32 @rotate32(i32 %a, i32 %b) { ; SM20-NEXT: and.b32 %r6, %r5, 31; ; SM20-NEXT: shr.u32 %r7, %r1, %r6; ; SM20-NEXT: or.b32 %r8, %r4, %r7; -; SM20-NEXT: st.param.b32 [func_retval0+0], %r8; +; SM20-NEXT: st.param.b32 [func_retval0], %r8; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotate32( @@ -42,7 +42,7 @@ define i32 @rotate32(i32 %a, i32 %b) { ; SM35-NEXT: ld.param.u32 %r1, [rotate32_param_0]; ; SM35-NEXT: ld.param.u32 %r2, [rotate32_param_1]; ; SM35-NEXT: shf.l.wrap.b32 %r3, %r1, %r1, %r2; -; SM35-NEXT: st.param.b32 [func_retval0+0], %r3; +; SM35-NEXT: st.param.b32 [func_retval0], %r3; ; SM35-NEXT: ret; %val = tail call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 %b) ret i32 %val @@ -65,7 +65,7 @@ define i64 @rotate64(i64 %a, i32 %b) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shr.u64 %rd3, %rd1, %r4; ; SM20-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotate64( @@ -82,7 +82,7 @@ define i64 @rotate64(i64 %a, i32 %b) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shr.u64 %rd3, %rd1, %r4; ; SM35-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b) ret i64 %val @@ -105,7 +105,7 @@ define i64 @rotateright64(i64 %a, i32 %b) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shl.b64 %rd3, %rd1, %r4; ; SM20-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotateright64( @@ -122,7 +122,7 @@ define i64 @rotateright64(i64 %a, i32 %b) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shl.b64 %rd3, %rd1, %r4; ; SM35-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b) ret i64 %val @@ -140,7 +140,7 @@ define i32 @rotl0(i32 %x) { ; SM20-NEXT: shr.u32 %r2, %r1, 24; ; SM20-NEXT: shl.b32 %r3, %r1, 8; ; SM20-NEXT: or.b32 %r4, %r3, %r2; -; SM20-NEXT: st.param.b32 [func_retval0+0], %r4; +; SM20-NEXT: st.param.b32 [func_retval0], %r4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotl0( @@ -150,7 +150,7 @@ define i32 @rotl0(i32 %x) { ; SM35-NEXT: // %bb.0: ; SM35-NEXT: ld.param.u32 %r1, [rotl0_param_0]; ; SM35-NEXT: shf.l.wrap.b32 %r2, %r1, %r1, 8; -; SM35-NEXT: st.param.b32 [func_retval0+0], %r2; +; SM35-NEXT: st.param.b32 [func_retval0], %r2; ; SM35-NEXT: ret; %t0 = shl i32 %x, 8 %t1 = lshr i32 %x, 24 @@ -174,7 +174,7 @@ define i64 @rotl64(i64 %a, i64 %n) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shr.u64 %rd3, %rd1, %r4; ; SM20-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotl64( @@ -191,7 +191,7 @@ define i64 @rotl64(i64 %a, i64 %n) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shr.u64 %rd3, %rd1, %r4; ; SM35-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %n) ret i64 %val @@ -208,7 +208,7 @@ define i64 @rotl64_imm(i64 %a) { ; SM20-NEXT: shr.u64 %rd2, %rd1, 62; ; SM20-NEXT: shl.b64 %rd3, %rd1, 2; ; SM20-NEXT: or.b64 %rd4, %rd3, %rd2; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotl64_imm( @@ -220,7 +220,7 @@ define i64 @rotl64_imm(i64 %a) { ; SM35-NEXT: shr.u64 %rd2, %rd1, 62; ; SM35-NEXT: shl.b64 %rd3, %rd1, 2; ; SM35-NEXT: or.b64 %rd4, %rd3, %rd2; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 66) ret i64 %val @@ -242,7 +242,7 @@ define i64 @rotr64(i64 %a, i64 %n) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shl.b64 %rd3, %rd1, %r4; ; SM20-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotr64( @@ -259,7 +259,7 @@ define i64 @rotr64(i64 %a, i64 %n) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shl.b64 %rd3, %rd1, %r4; ; SM35-NEXT: or.b64 %rd4, %rd2, %rd3; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %n) ret i64 %val @@ -276,7 +276,7 @@ define i64 @rotr64_imm(i64 %a) { ; SM20-NEXT: shl.b64 %rd2, %rd1, 62; ; SM20-NEXT: shr.u64 %rd3, %rd1, 2; ; SM20-NEXT: or.b64 %rd4, %rd3, %rd2; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM20-NEXT: st.param.b64 [func_retval0], %rd4; ; SM20-NEXT: ret; ; ; SM35-LABEL: rotr64_imm( @@ -288,7 +288,7 @@ define i64 @rotr64_imm(i64 %a) { ; SM35-NEXT: shl.b64 %rd2, %rd1, 62; ; SM35-NEXT: shr.u64 %rd3, %rd1, 2; ; SM35-NEXT: or.b64 %rd4, %rd3, %rd2; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd4; +; SM35-NEXT: st.param.b64 [func_retval0], %rd4; ; SM35-NEXT: ret; %val = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 66) ret i64 %val @@ -310,7 +310,7 @@ define i32 @funnel_shift_right_32(i32 %a, i32 %b, i32 %c) { ; SM20-NEXT: and.b32 %r8, %r7, 31; ; SM20-NEXT: shl.b32 %r9, %r6, %r8; ; SM20-NEXT: or.b32 %r10, %r9, %r5; -; SM20-NEXT: st.param.b32 [func_retval0+0], %r10; +; SM20-NEXT: st.param.b32 [func_retval0], %r10; ; SM20-NEXT: ret; ; ; SM35-LABEL: funnel_shift_right_32( @@ -322,7 +322,7 @@ define i32 @funnel_shift_right_32(i32 %a, i32 %b, i32 %c) { ; SM35-NEXT: ld.param.u32 %r2, [funnel_shift_right_32_param_1]; ; SM35-NEXT: ld.param.u32 %r3, [funnel_shift_right_32_param_2]; ; SM35-NEXT: shf.r.wrap.b32 %r4, %r2, %r1, %r3; -; SM35-NEXT: st.param.b32 [func_retval0+0], %r4; +; SM35-NEXT: st.param.b32 [func_retval0], %r4; ; SM35-NEXT: ret; %val = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c) ret i32 %val @@ -344,7 +344,7 @@ define i32 @funnel_shift_left_32(i32 %a, i32 %b, i32 %c) { ; SM20-NEXT: and.b32 %r8, %r7, 31; ; SM20-NEXT: shr.u32 %r9, %r6, %r8; ; SM20-NEXT: or.b32 %r10, %r4, %r9; -; SM20-NEXT: st.param.b32 [func_retval0+0], %r10; +; SM20-NEXT: st.param.b32 [func_retval0], %r10; ; SM20-NEXT: ret; ; ; SM35-LABEL: funnel_shift_left_32( @@ -356,7 +356,7 @@ define i32 @funnel_shift_left_32(i32 %a, i32 %b, i32 %c) { ; SM35-NEXT: ld.param.u32 %r2, [funnel_shift_left_32_param_1]; ; SM35-NEXT: ld.param.u32 %r3, [funnel_shift_left_32_param_2]; ; SM35-NEXT: shf.l.wrap.b32 %r4, %r2, %r1, %r3; -; SM35-NEXT: st.param.b32 [func_retval0+0], %r4; +; SM35-NEXT: st.param.b32 [func_retval0], %r4; ; SM35-NEXT: ret; %val = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c) ret i32 %val @@ -379,7 +379,7 @@ define i64 @funnel_shift_right_64(i64 %a, i64 %b, i64 %c) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shl.b64 %rd5, %rd4, %r4; ; SM20-NEXT: or.b64 %rd6, %rd5, %rd3; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd6; +; SM20-NEXT: st.param.b64 [func_retval0], %rd6; ; SM20-NEXT: ret; ; ; SM35-LABEL: funnel_shift_right_64( @@ -398,7 +398,7 @@ define i64 @funnel_shift_right_64(i64 %a, i64 %b, i64 %c) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shl.b64 %rd5, %rd4, %r4; ; SM35-NEXT: or.b64 %rd6, %rd5, %rd3; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd6; +; SM35-NEXT: st.param.b64 [func_retval0], %rd6; ; SM35-NEXT: ret; %val = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c) ret i64 %val @@ -421,7 +421,7 @@ define i64 @funnel_shift_left_64(i64 %a, i64 %b, i64 %c) { ; SM20-NEXT: and.b32 %r4, %r3, 63; ; SM20-NEXT: shr.u64 %rd5, %rd4, %r4; ; SM20-NEXT: or.b64 %rd6, %rd2, %rd5; -; SM20-NEXT: st.param.b64 [func_retval0+0], %rd6; +; SM20-NEXT: st.param.b64 [func_retval0], %rd6; ; SM20-NEXT: ret; ; ; SM35-LABEL: funnel_shift_left_64( @@ -440,7 +440,7 @@ define i64 @funnel_shift_left_64(i64 %a, i64 %b, i64 %c) { ; SM35-NEXT: and.b32 %r4, %r3, 63; ; SM35-NEXT: shr.u64 %rd5, %rd4, %r4; ; SM35-NEXT: or.b64 %rd6, %rd2, %rd5; -; SM35-NEXT: st.param.b64 [func_retval0+0], %rd6; +; SM35-NEXT: st.param.b64 [func_retval0], %rd6; ; SM35-NEXT: ret; %val = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c) ret i64 %val diff --git a/llvm/test/CodeGen/NVPTX/rotate_64.ll b/llvm/test/CodeGen/NVPTX/rotate_64.ll index 05fdb02ac74794..d4851f55d93c99 100644 --- a/llvm/test/CodeGen/NVPTX/rotate_64.ll +++ b/llvm/test/CodeGen/NVPTX/rotate_64.ll @@ -15,7 +15,7 @@ define i64 @rotate64(i64 %a, i32 %b) { ; CHECK-NEXT: shr.u64 %rd2, %rd1, 61; ; CHECK-NEXT: shl.b64 %rd3, %rd1, 3; ; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 3) ret i64 %val @@ -31,7 +31,7 @@ define i64 @rotateright64(i64 %a, i32 %b) { ; CHECK-NEXT: shl.b64 %rd2, %rd1, 61; ; CHECK-NEXT: shr.u64 %rd3, %rd1, 3; ; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-NEXT: ret; %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 3) ret i64 %val diff --git a/llvm/test/CodeGen/NVPTX/sad-intrins.ll b/llvm/test/CodeGen/NVPTX/sad-intrins.ll index a09413bc4e5242..8258dca605e9ef 100644 --- a/llvm/test/CodeGen/NVPTX/sad-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/sad-intrins.ll @@ -14,7 +14,7 @@ define i16 @test_sad_i16(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: ld.param.u16 %rs3, [test_sad_i16_param_2]; ; CHECK-NEXT: sad.s16 %rs4, %rs1, %rs2, %rs3; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %1 = call i16 @llvm.nvvm.sad.s(i16 %x, i16 %y, i16 %z) ret i16 %1 @@ -32,7 +32,7 @@ define i16 @test_sad_u16(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: ld.param.u16 %rs3, [test_sad_u16_param_2]; ; CHECK-NEXT: sad.u16 %rs4, %rs1, %rs2, %rs3; ; CHECK-NEXT: cvt.u32.u16 %r1, %rs4; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; %1 = call i16 @llvm.nvvm.sad.us(i16 %x, i16 %y, i16 %z) ret i16 %1 @@ -48,7 +48,7 @@ define i32 @test_sad_i32(i32 %x, i32 %y, i32 %z) { ; CHECK-NEXT: ld.param.u32 %r2, [test_sad_i32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_sad_i32_param_2]; ; CHECK-NEXT: sad.s32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %1 = call i32 @llvm.nvvm.sad.i(i32 %x, i32 %y, i32 %z) ret i32 %1 @@ -64,7 +64,7 @@ define i32 @test_sad_u32(i32 %x, i32 %y, i32 %z) { ; CHECK-NEXT: ld.param.u32 %r2, [test_sad_u32_param_1]; ; CHECK-NEXT: ld.param.u32 %r3, [test_sad_u32_param_2]; ; CHECK-NEXT: sad.u32 %r4, %r1, %r2, %r3; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r4; +; CHECK-NEXT: st.param.b32 [func_retval0], %r4; ; CHECK-NEXT: ret; %1 = call i32 @llvm.nvvm.sad.ui(i32 %x, i32 %y, i32 %z) ret i32 %1 @@ -80,7 +80,7 @@ define i64 @test_sad_i64(i64 %x, i64 %y, i64 %z) { ; CHECK-NEXT: ld.param.u64 %rd2, [test_sad_i64_param_1]; ; CHECK-NEXT: ld.param.u64 %rd3, [test_sad_i64_param_2]; ; CHECK-NEXT: sad.s64 %rd4, %rd1, %rd2, %rd3; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-NEXT: ret; %1 = call i64 @llvm.nvvm.sad.ll(i64 %x, i64 %y, i64 %z) ret i64 %1 @@ -96,7 +96,7 @@ define i64 @test_sad_u64(i64 %x, i64 %y, i64 %z) { ; CHECK-NEXT: ld.param.u64 %rd2, [test_sad_u64_param_1]; ; CHECK-NEXT: ld.param.u64 %rd3, [test_sad_u64_param_2]; ; CHECK-NEXT: sad.u64 %rd4, %rd1, %rd2, %rd3; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd4; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-NEXT: ret; %1 = call i64 @llvm.nvvm.sad.ull(i64 %x, i64 %y, i64 %z) ret i64 %1 diff --git a/llvm/test/CodeGen/NVPTX/sext-setcc.ll b/llvm/test/CodeGen/NVPTX/sext-setcc.ll index f471d47077cf0d..0cb0c1ba8c6bd0 100644 --- a/llvm/test/CodeGen/NVPTX/sext-setcc.ll +++ b/llvm/test/CodeGen/NVPTX/sext-setcc.ll @@ -19,7 +19,7 @@ define <2 x i16> @sext_setcc_v2i1_to_v2i16(ptr %p) { ; CHECK-NEXT: selp.s16 %rs3, -1, 0, %p2; ; CHECK-NEXT: selp.s16 %rs4, -1, 0, %p1; ; CHECK-NEXT: mov.b32 %r2, {%rs4, %rs3}; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-NEXT: ret; entry: %v = load <2 x i16>, ptr %p, align 4 @@ -62,7 +62,7 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) { ; CHECK-NEXT: bfi.b32 %r10, %r9, %r8, 16, 8; ; CHECK-NEXT: selp.s32 %r11, -1, 0, %p1; ; CHECK-NEXT: bfi.b32 %r12, %r11, %r10, 24, 8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r12; +; CHECK-NEXT: st.param.b32 [func_retval0], %r12; ; CHECK-NEXT: ret; entry: %v = load <4 x i8>, ptr %p, align 4 diff --git a/llvm/test/CodeGen/NVPTX/st-param-imm.ll b/llvm/test/CodeGen/NVPTX/st-param-imm.ll index 29f27c1ba6cdcf..b178f5e05296cf 100644 --- a/llvm/test/CodeGen/NVPTX/st-param-imm.ll +++ b/llvm/test/CodeGen/NVPTX/st-param-imm.ll @@ -26,7 +26,7 @@ define void @st_param_i8_i16() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 0, 0 ; CHECK-NEXT: .param .align 2 .b8 param0[4]; -; CHECK-NEXT: st.param.b8 [param0+0], 1; +; CHECK-NEXT: st.param.b8 [param0], 1; ; CHECK-NEXT: st.param.b16 [param0+2], 2; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_i8_i16, @@ -47,7 +47,7 @@ define void @st_param_i32() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 1, 0 ; CHECK-NEXT: .param .b32 param0; -; CHECK-NEXT: st.param.b32 [param0+0], 3; +; CHECK-NEXT: st.param.b32 [param0], 3; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_i32, ; CHECK-NEXT: ( @@ -67,7 +67,7 @@ define void @st_param_i64() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 2, 0 ; CHECK-NEXT: .param .b64 param0; -; CHECK-NEXT: st.param.b64 [param0+0], 4; +; CHECK-NEXT: st.param.b64 [param0], 4; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_i64, ; CHECK-NEXT: ( @@ -87,7 +87,7 @@ define void @st_param_f32() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 3, 0 ; CHECK-NEXT: .param .b32 param0; -; CHECK-NEXT: st.param.f32 [param0+0], 0f40A00000; +; CHECK-NEXT: st.param.f32 [param0], 0f40A00000; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_f32, ; CHECK-NEXT: ( @@ -107,7 +107,7 @@ define void @st_param_f64() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 4, 0 ; CHECK-NEXT: .param .b64 param0; -; CHECK-NEXT: st.param.f64 [param0+0], 0d4018000000000000; +; CHECK-NEXT: st.param.f64 [param0], 0d4018000000000000; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_f64, ; CHECK-NEXT: ( @@ -133,7 +133,7 @@ define void @st_param_v2_i8_ii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 5, 0 ; CHECK-NEXT: .param .align 2 .b8 param0[2]; -; CHECK-NEXT: st.param.v2.b8 [param0+0], {1, 2}; +; CHECK-NEXT: st.param.v2.b8 [param0], {1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i8, ; CHECK-NEXT: ( @@ -153,7 +153,7 @@ define void @st_param_v2_i8_ir(i8 %val) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v2_i8_ir_param_0]; ; CHECK-NEXT: { // callseq 6, 0 ; CHECK-NEXT: .param .align 2 .b8 param0[2]; -; CHECK-NEXT: st.param.v2.b8 [param0+0], {1, %rs1}; +; CHECK-NEXT: st.param.v2.b8 [param0], {1, %rs1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i8, ; CHECK-NEXT: ( @@ -175,7 +175,7 @@ define void @st_param_v2_i8_ri(i8 %val) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v2_i8_ri_param_0]; ; CHECK-NEXT: { // callseq 7, 0 ; CHECK-NEXT: .param .align 2 .b8 param0[2]; -; CHECK-NEXT: st.param.v2.b8 [param0+0], {%rs1, 2}; +; CHECK-NEXT: st.param.v2.b8 [param0], {%rs1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i8, ; CHECK-NEXT: ( @@ -197,7 +197,7 @@ define void @st_param_v2_i16_ii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 8, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v2.b16 [param0+0], {1, 2}; +; CHECK-NEXT: st.param.v2.b16 [param0], {1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i16, ; CHECK-NEXT: ( @@ -217,7 +217,7 @@ define void @st_param_v2_i16_ir(i16 %val) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v2_i16_ir_param_0]; ; CHECK-NEXT: { // callseq 9, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v2.b16 [param0+0], {1, %rs1}; +; CHECK-NEXT: st.param.v2.b16 [param0], {1, %rs1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i16, ; CHECK-NEXT: ( @@ -239,7 +239,7 @@ define void @st_param_v2_i16_ri(i16 %val) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v2_i16_ri_param_0]; ; CHECK-NEXT: { // callseq 10, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v2.b16 [param0+0], {%rs1, 2}; +; CHECK-NEXT: st.param.v2.b16 [param0], {%rs1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i16, ; CHECK-NEXT: ( @@ -261,7 +261,7 @@ define void @st_param_v2_i32_ii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 11, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.b32 [param0+0], {1, 2}; +; CHECK-NEXT: st.param.v2.b32 [param0], {1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i32, ; CHECK-NEXT: ( @@ -281,7 +281,7 @@ define void @st_param_v2_i32_ir(i32 %val) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v2_i32_ir_param_0]; ; CHECK-NEXT: { // callseq 12, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.b32 [param0+0], {1, %r1}; +; CHECK-NEXT: st.param.v2.b32 [param0], {1, %r1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i32, ; CHECK-NEXT: ( @@ -303,7 +303,7 @@ define void @st_param_v2_i32_ri(i32 %val) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v2_i32_ri_param_0]; ; CHECK-NEXT: { // callseq 13, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.b32 [param0+0], {%r1, 2}; +; CHECK-NEXT: st.param.v2.b32 [param0], {%r1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i32, ; CHECK-NEXT: ( @@ -325,7 +325,7 @@ define void @st_param_v2_i64_ii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 14, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.b64 [param0+0], {1, 2}; +; CHECK-NEXT: st.param.v2.b64 [param0], {1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i64, ; CHECK-NEXT: ( @@ -345,7 +345,7 @@ define void @st_param_v2_i64_ir(i64 %val) { ; CHECK-NEXT: ld.param.u64 %rd1, [st_param_v2_i64_ir_param_0]; ; CHECK-NEXT: { // callseq 15, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.b64 [param0+0], {1, %rd1}; +; CHECK-NEXT: st.param.v2.b64 [param0], {1, %rd1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i64, ; CHECK-NEXT: ( @@ -367,7 +367,7 @@ define void @st_param_v2_i64_ri(i64 %val) { ; CHECK-NEXT: ld.param.u64 %rd1, [st_param_v2_i64_ri_param_0]; ; CHECK-NEXT: { // callseq 16, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.b64 [param0+0], {%rd1, 2}; +; CHECK-NEXT: st.param.v2.b64 [param0], {%rd1, 2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_i64, ; CHECK-NEXT: ( @@ -389,7 +389,7 @@ define void @st_param_v2_f32_ii(float %val) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 17, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.f32 [param0+0], {0f3F800000, 0f40000000}; +; CHECK-NEXT: st.param.v2.f32 [param0], {0f3F800000, 0f40000000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f32, ; CHECK-NEXT: ( @@ -409,7 +409,7 @@ define void @st_param_v2_f32_ir(float %val) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ir_param_0]; ; CHECK-NEXT: { // callseq 18, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.f32 [param0+0], {0f3F800000, %f1}; +; CHECK-NEXT: st.param.v2.f32 [param0], {0f3F800000, %f1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f32, ; CHECK-NEXT: ( @@ -431,7 +431,7 @@ define void @st_param_v2_f32_ri(float %val) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v2_f32_ri_param_0]; ; CHECK-NEXT: { // callseq 19, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v2.f32 [param0+0], {%f1, 0f40000000}; +; CHECK-NEXT: st.param.v2.f32 [param0], {%f1, 0f40000000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f32, ; CHECK-NEXT: ( @@ -453,7 +453,7 @@ define void @st_param_v2_f64_ii(double %val) { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 20, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.f64 [param0+0], {0d3FF0000000000000, 0d4000000000000000}; +; CHECK-NEXT: st.param.v2.f64 [param0], {0d3FF0000000000000, 0d4000000000000000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f64, ; CHECK-NEXT: ( @@ -473,7 +473,7 @@ define void @st_param_v2_f64_ir(double %val) { ; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ir_param_0]; ; CHECK-NEXT: { // callseq 21, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.f64 [param0+0], {0d3FF0000000000000, %fd1}; +; CHECK-NEXT: st.param.v2.f64 [param0], {0d3FF0000000000000, %fd1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f64, ; CHECK-NEXT: ( @@ -495,7 +495,7 @@ define void @st_param_v2_f64_ri(double %val) { ; CHECK-NEXT: ld.param.f64 %fd1, [st_param_v2_f64_ri_param_0]; ; CHECK-NEXT: { // callseq 22, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v2.f64 [param0+0], {%fd1, 0d4000000000000000}; +; CHECK-NEXT: st.param.v2.f64 [param0], {%fd1, 0d4000000000000000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v2_f64, ; CHECK-NEXT: ( @@ -524,7 +524,7 @@ define void @st_param_v4_i8_iiii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 23, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -546,7 +546,7 @@ define void @st_param_v4_i8_irrr(i8 %b, i8 %c, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_irrr_param_2]; ; CHECK-NEXT: { // callseq 24, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, %rs2, %rs3}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, %rs1, %rs2, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -572,7 +572,7 @@ define void @st_param_v4_i8_rirr(i8 %a, i8 %c, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rirr_param_2]; ; CHECK-NEXT: { // callseq 25, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, %rs2, %rs3}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, 2, %rs2, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -598,7 +598,7 @@ define void @st_param_v4_i8_rrir(i8 %a, i8 %b, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rrir_param_2]; ; CHECK-NEXT: { // callseq 26, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, 3, %rs3}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, %rs2, 3, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -624,7 +624,7 @@ define void @st_param_v4_i8_rrri(i8 %a, i8 %b, i8 %c) { ; CHECK-NEXT: ld.param.u8 %rs3, [st_param_v4_i8_rrri_param_2]; ; CHECK-NEXT: { // callseq 27, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, %rs3, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, %rs2, %rs3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -649,7 +649,7 @@ define void @st_param_v4_i8_iirr(i8 %c, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_iirr_param_1]; ; CHECK-NEXT: { // callseq 28, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, %rs1, %rs2}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, 2, %rs1, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -674,7 +674,7 @@ define void @st_param_v4_i8_irir(i8 %b, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_irir_param_1]; ; CHECK-NEXT: { // callseq 29, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, 3, %rs2}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, %rs1, 3, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -699,7 +699,7 @@ define void @st_param_v4_i8_irri(i8 %b, i8 %c) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_irri_param_1]; ; CHECK-NEXT: { // callseq 30, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, %rs2, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, %rs1, %rs2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -724,7 +724,7 @@ define void @st_param_v4_i8_riir(i8 %a, i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_riir_param_1]; ; CHECK-NEXT: { // callseq 31, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, 3, %rs2}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, 2, 3, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -749,7 +749,7 @@ define void @st_param_v4_i8_riri(i8 %a, i8 %c) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_riri_param_1]; ; CHECK-NEXT: { // callseq 32, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, %rs2, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, 2, %rs2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -774,7 +774,7 @@ define void @st_param_v4_i8_rrii(i8 %a, i8 %b) { ; CHECK-NEXT: ld.param.u8 %rs2, [st_param_v4_i8_rrii_param_1]; ; CHECK-NEXT: { // callseq 33, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, %rs2, 3, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, %rs2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -798,7 +798,7 @@ define void @st_param_v4_i8_iiir(i8 %d) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_iiir_param_0]; ; CHECK-NEXT: { // callseq 34, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, 3, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, 2, 3, %rs1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -822,7 +822,7 @@ define void @st_param_v4_i8_iiri(i8 %c) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_iiri_param_0]; ; CHECK-NEXT: { // callseq 35, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, 2, %rs1, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, 2, %rs1, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -846,7 +846,7 @@ define void @st_param_v4_i8_irii(i8 %b) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_irii_param_0]; ; CHECK-NEXT: { // callseq 36, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {1, %rs1, 3, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {1, %rs1, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -870,7 +870,7 @@ define void @st_param_v4_i8_riii(i8 %a) { ; CHECK-NEXT: ld.param.u8 %rs1, [st_param_v4_i8_riii_param_0]; ; CHECK-NEXT: { // callseq 37, 0 ; CHECK-NEXT: .param .align 4 .b8 param0[4]; -; CHECK-NEXT: st.param.v4.b8 [param0+0], {%rs1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b8 [param0], {%rs1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i8, ; CHECK-NEXT: ( @@ -894,7 +894,7 @@ define void @st_param_v4_i16_iiii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 38, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -916,7 +916,7 @@ define void @st_param_v4_i16_irrr(i16 %b, i16 %c, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_irrr_param_2]; ; CHECK-NEXT: { // callseq 39, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, %rs2, %rs3}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, %rs1, %rs2, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -942,7 +942,7 @@ define void @st_param_v4_i16_rirr(i16 %a, i16 %c, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rirr_param_2]; ; CHECK-NEXT: { // callseq 40, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, %rs2, %rs3}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, 2, %rs2, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -968,7 +968,7 @@ define void @st_param_v4_i16_rrir(i16 %a, i16 %b, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rrir_param_2]; ; CHECK-NEXT: { // callseq 41, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, 3, %rs3}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, %rs2, 3, %rs3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -994,7 +994,7 @@ define void @st_param_v4_i16_rrri(i16 %a, i16 %b, i16 %c) { ; CHECK-NEXT: ld.param.u16 %rs3, [st_param_v4_i16_rrri_param_2]; ; CHECK-NEXT: { // callseq 42, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, %rs3, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, %rs2, %rs3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1019,7 +1019,7 @@ define void @st_param_v4_i16_iirr(i16 %c, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_iirr_param_1]; ; CHECK-NEXT: { // callseq 43, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, %rs1, %rs2}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, 2, %rs1, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1044,7 +1044,7 @@ define void @st_param_v4_i16_irir(i16 %b, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_irir_param_1]; ; CHECK-NEXT: { // callseq 44, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, 3, %rs2}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, %rs1, 3, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1069,7 +1069,7 @@ define void @st_param_v4_i16_irri(i16 %b, i16 %c) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_irri_param_1]; ; CHECK-NEXT: { // callseq 45, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, %rs2, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, %rs1, %rs2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1094,7 +1094,7 @@ define void @st_param_v4_i16_riir(i16 %a, i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_riir_param_1]; ; CHECK-NEXT: { // callseq 46, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, 3, %rs2}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, 2, 3, %rs2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1119,7 +1119,7 @@ define void @st_param_v4_i16_riri(i16 %a, i16 %c) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_riri_param_1]; ; CHECK-NEXT: { // callseq 47, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, %rs2, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, 2, %rs2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1144,7 +1144,7 @@ define void @st_param_v4_i16_rrii(i16 %a, i16 %b) { ; CHECK-NEXT: ld.param.u16 %rs2, [st_param_v4_i16_rrii_param_1]; ; CHECK-NEXT: { // callseq 48, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, %rs2, 3, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, %rs2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1168,7 +1168,7 @@ define void @st_param_v4_i16_iiir(i16 %d) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_iiir_param_0]; ; CHECK-NEXT: { // callseq 49, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, 3, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, 2, 3, %rs1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1192,7 +1192,7 @@ define void @st_param_v4_i16_iiri(i16 %c) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_iiri_param_0]; ; CHECK-NEXT: { // callseq 50, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, 2, %rs1, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, 2, %rs1, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1216,7 +1216,7 @@ define void @st_param_v4_i16_irii(i16 %b) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_irii_param_0]; ; CHECK-NEXT: { // callseq 51, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {1, %rs1, 3, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {1, %rs1, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1240,7 +1240,7 @@ define void @st_param_v4_i16_riii(i16 %a) { ; CHECK-NEXT: ld.param.u16 %rs1, [st_param_v4_i16_riii_param_0]; ; CHECK-NEXT: { // callseq 52, 0 ; CHECK-NEXT: .param .align 8 .b8 param0[8]; -; CHECK-NEXT: st.param.v4.b16 [param0+0], {%rs1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b16 [param0], {%rs1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i16, ; CHECK-NEXT: ( @@ -1264,7 +1264,7 @@ define void @st_param_v4_i32_iiii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 53, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1286,7 +1286,7 @@ define void @st_param_v4_i32_irrr(i32 %b, i32 %c, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_irrr_param_2]; ; CHECK-NEXT: { // callseq 54, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, %r2, %r3}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, %r1, %r2, %r3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1312,7 +1312,7 @@ define void @st_param_v4_i32_rirr(i32 %a, i32 %c, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rirr_param_2]; ; CHECK-NEXT: { // callseq 55, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, %r2, %r3}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, 2, %r2, %r3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1338,7 +1338,7 @@ define void @st_param_v4_i32_rrir(i32 %a, i32 %b, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rrir_param_2]; ; CHECK-NEXT: { // callseq 56, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, 3, %r3}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, %r2, 3, %r3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1364,7 +1364,7 @@ define void @st_param_v4_i32_rrri(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r3, [st_param_v4_i32_rrri_param_2]; ; CHECK-NEXT: { // callseq 57, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, %r3, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, %r2, %r3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1389,7 +1389,7 @@ define void @st_param_v4_i32_iirr(i32 %c, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_iirr_param_1]; ; CHECK-NEXT: { // callseq 58, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, %r1, %r2}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, 2, %r1, %r2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1414,7 +1414,7 @@ define void @st_param_v4_i32_irir(i32 %b, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_irir_param_1]; ; CHECK-NEXT: { // callseq 59, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, 3, %r2}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, %r1, 3, %r2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1439,7 +1439,7 @@ define void @st_param_v4_i32_irri(i32 %b, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_irri_param_1]; ; CHECK-NEXT: { // callseq 60, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, %r2, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, %r1, %r2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1464,7 +1464,7 @@ define void @st_param_v4_i32_riir(i32 %a, i32 %d) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_riir_param_1]; ; CHECK-NEXT: { // callseq 61, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, 3, %r2}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, 2, 3, %r2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1489,7 +1489,7 @@ define void @st_param_v4_i32_riri(i32 %a, i32 %c) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_riri_param_1]; ; CHECK-NEXT: { // callseq 62, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, %r2, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, 2, %r2, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1514,7 +1514,7 @@ define void @st_param_v4_i32_rrii(i32 %a, i32 %b) { ; CHECK-NEXT: ld.param.u32 %r2, [st_param_v4_i32_rrii_param_1]; ; CHECK-NEXT: { // callseq 63, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, %r2, 3, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, %r2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1538,7 +1538,7 @@ define void @st_param_v4_i32_iiir(i32 %d) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_iiir_param_0]; ; CHECK-NEXT: { // callseq 64, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, 3, %r1}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, 2, 3, %r1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1562,7 +1562,7 @@ define void @st_param_v4_i32_iiri(i32 %c) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_iiri_param_0]; ; CHECK-NEXT: { // callseq 65, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, 2, %r1, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, 2, %r1, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1586,7 +1586,7 @@ define void @st_param_v4_i32_irii(i32 %b) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_irii_param_0]; ; CHECK-NEXT: { // callseq 66, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {1, %r1, 3, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {1, %r1, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1610,7 +1610,7 @@ define void @st_param_v4_i32_riii(i32 %a) { ; CHECK-NEXT: ld.param.u32 %r1, [st_param_v4_i32_riii_param_0]; ; CHECK-NEXT: { // callseq 67, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.b32 [param0+0], {%r1, 2, 3, 4}; +; CHECK-NEXT: st.param.v4.b32 [param0], {%r1, 2, 3, 4}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_i32, ; CHECK-NEXT: ( @@ -1634,7 +1634,7 @@ define void @st_param_v4_f32_iiii() { ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: { // callseq 68, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, 0f40400000, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, 0f40000000, 0f40400000, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1656,7 +1656,7 @@ define void @st_param_v4_f32_irrr(float %b, float %c, float %d) { ; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_irrr_param_2]; ; CHECK-NEXT: { // callseq 69, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, %f2, %f3}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, %f1, %f2, %f3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1682,7 +1682,7 @@ define void @st_param_v4_f32_rirr(float %a, float %c, float %d) { ; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rirr_param_2]; ; CHECK-NEXT: { // callseq 70, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, %f2, %f3}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, 0f40000000, %f2, %f3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1708,7 +1708,7 @@ define void @st_param_v4_f32_rrir(float %a, float %b, float %d) { ; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rrir_param_2]; ; CHECK-NEXT: { // callseq 71, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, 0f40400000, %f3}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, %f2, 0f40400000, %f3}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1734,7 +1734,7 @@ define void @st_param_v4_f32_rrri(float %a, float %b, float %c) { ; CHECK-NEXT: ld.param.f32 %f3, [st_param_v4_f32_rrri_param_2]; ; CHECK-NEXT: { // callseq 72, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, %f3, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, %f2, %f3, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1759,7 +1759,7 @@ define void @st_param_v4_f32_iirr(float %c, float %d) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_iirr_param_1]; ; CHECK-NEXT: { // callseq 73, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, %f1, %f2}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, 0f40000000, %f1, %f2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1784,7 +1784,7 @@ define void @st_param_v4_f32_irir(float %b, float %d) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_irir_param_1]; ; CHECK-NEXT: { // callseq 74, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, 0f40400000, %f2}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, %f1, 0f40400000, %f2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1809,7 +1809,7 @@ define void @st_param_v4_f32_irri(float %b, float %c) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_irri_param_1]; ; CHECK-NEXT: { // callseq 75, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, %f2, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, %f1, %f2, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1834,7 +1834,7 @@ define void @st_param_v4_f32_riir(float %a, float %d) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_riir_param_1]; ; CHECK-NEXT: { // callseq 76, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, 0f40400000, %f2}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, 0f40000000, 0f40400000, %f2}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1859,7 +1859,7 @@ define void @st_param_v4_f32_riri(float %a, float %c) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_riri_param_1]; ; CHECK-NEXT: { // callseq 77, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, %f2, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, 0f40000000, %f2, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1884,7 +1884,7 @@ define void @st_param_v4_f32_rrii(float %a, float %b) { ; CHECK-NEXT: ld.param.f32 %f2, [st_param_v4_f32_rrii_param_1]; ; CHECK-NEXT: { // callseq 78, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, %f2, 0f40400000, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, %f2, 0f40400000, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1908,7 +1908,7 @@ define void @st_param_v4_f32_iiir(float %d) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiir_param_0]; ; CHECK-NEXT: { // callseq 79, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, 0f40400000, %f1}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, 0f40000000, 0f40400000, %f1}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1932,7 +1932,7 @@ define void @st_param_v4_f32_iiri(float %c) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_iiri_param_0]; ; CHECK-NEXT: { // callseq 80, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, 0f40000000, %f1, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, 0f40000000, %f1, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1956,7 +1956,7 @@ define void @st_param_v4_f32_irii(float %b) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_irii_param_0]; ; CHECK-NEXT: { // callseq 81, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {0f3F800000, %f1, 0f40400000, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {0f3F800000, %f1, 0f40400000, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( @@ -1980,7 +1980,7 @@ define void @st_param_v4_f32_riii(float %a) { ; CHECK-NEXT: ld.param.f32 %f1, [st_param_v4_f32_riii_param_0]; ; CHECK-NEXT: { // callseq 82, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[16]; -; CHECK-NEXT: st.param.v4.f32 [param0+0], {%f1, 0f40000000, 0f40400000, 0f40800000}; +; CHECK-NEXT: st.param.v4.f32 [param0], {%f1, 0f40000000, 0f40400000, 0f40800000}; ; CHECK-NEXT: call.uni ; CHECK-NEXT: call_v4_f32, ; CHECK-NEXT: ( diff --git a/llvm/test/CodeGen/NVPTX/store-undef.ll b/llvm/test/CodeGen/NVPTX/store-undef.ll index 109d28a3e3c597..1b991ab82db8f4 100644 --- a/llvm/test/CodeGen/NVPTX/store-undef.ll +++ b/llvm/test/CodeGen/NVPTX/store-undef.ll @@ -38,7 +38,7 @@ define void @test_store_param_def(i64 %param0, i32 %param1) { ; CHECK-NEXT: ld.param.u32 %r1, [test_store_param_def_param_1]; ; CHECK-NEXT: { // callseq 1, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[32]; -; CHECK-NEXT: st.param.b64 [param0+0], %rd1; +; CHECK-NEXT: st.param.b64 [param0], %rd1; ; CHECK-NEXT: st.param.v2.b32 [param0+8], {%r2, %r1}; ; CHECK-NEXT: st.param.v4.b32 [param0+16], {%r3, %r1, %r4, %r5}; ; CHECK-NEXT: call.uni diff --git a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll index 107671d1d1f399..473bc28ed4ee7c 100644 --- a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll +++ b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll @@ -58,8 +58,8 @@ define void @baz(ptr %red, i32 %idx) { ; SM20: texfunc, ; SM30: texfunc, %texcall = tail call float @texfunc(i64 %texHandle) -; SM20: ld.param.f32 %f[[TEXCALL:[0-9]+]], [[[RETVAL]]+0] -; SM30: ld.param.f32 %f[[TEXCALL:[0-9]+]], [[[RETVAL]]+0] +; SM20: ld.param.f32 %f[[TEXCALL:[0-9]+]], [[[RETVAL]]] +; SM30: ld.param.f32 %f[[TEXCALL:[0-9]+]], [[[RETVAL]]] ; SM20: add.rn.f32 %f[[RET2:[0-9]+]], %f[[RED]], %f[[TEXCALL]] ; SM30: add.rn.f32 %f[[RET2:[0-9]+]], %f[[RED]], %f[[TEXCALL]] %ret2 = fadd float %ret, %texcall diff --git a/llvm/test/CodeGen/NVPTX/tid-range.ll b/llvm/test/CodeGen/NVPTX/tid-range.ll index c4dd33960d44ac..4af4cc3845353f 100644 --- a/llvm/test/CodeGen/NVPTX/tid-range.ll +++ b/llvm/test/CodeGen/NVPTX/tid-range.ll @@ -13,7 +13,7 @@ entry: ; CHECK-LABEL: test1( ; CHECK: setp.eq.s32 %p1, %r1, 1; ; CHECK: selp.u32 %[[R:.+]], 1, 0, %p1; -; CHECK: st.param.b32 [func_retval0+0], %[[R]]; +; CHECK: st.param.b32 [func_retval0], %[[R]]; declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() diff --git a/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll b/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll index 40a3e9e945a23e..7dd751cab630b0 100644 --- a/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll +++ b/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll @@ -29,7 +29,7 @@ ; CHECK-DAG: or.b16 [[P2_1_or:%rs[0-9]+]], [[P2_1_shl]], [[P2_0]]; ; CHECK: { // callseq ; CHECK: .param .align 8 .b8 param0[16]; -; CHECK-DAG: st.param.b16 [param0+0], [[P0]]; +; CHECK-DAG: st.param.b16 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+3], [[P2_1_or]]; ; CHECK-DAG: st.param.b8 [param0+4], [[P2_1]]; ; CHECK: .param .align 8 .b8 retval0[16]; @@ -38,11 +38,11 @@ ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+3]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+4]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b16 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.b16 [func_retval0], [[R0]]; ; CHECK-DAG: shl.b16 [[R2_1_shl:%rs[0-9]+]], [[R2_1]], 8; ; CHECK-DAG: and.b16 [[R2_0_and:%rs[0-9]+]], [[R2_0]], 255; ; CHECK-DAG: or.b16 [[R2:%rs[0-9]+]], [[R2_0_and]], [[R2_1_shl]]; @@ -74,7 +74,7 @@ define %s_i8i16p @test_s_i8i16p(%s_i8i16p %a) { ; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16; ; CHECK: { // callseq ; CHECK-DAG: .param .align 8 .b8 param0[24]; -; CHECK-DAG: st.param.b32 [param0+0], [[P0]]; +; CHECK-DAG: st.param.b32 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+5], [[P2]]; ; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]]; ; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]]; @@ -85,13 +85,13 @@ define %s_i8i16p @test_s_i8i16p(%s_i8i16p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6]; ; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7]; ; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b32 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.b32 [func_retval0], [[R0]]; ; CHECK-DAG: st.param.b8 [func_retval0+5], ; CHECK-DAG: st.param.b8 [func_retval0+6], ; CHECK-DAG: st.param.b8 [func_retval0+7], @@ -137,7 +137,7 @@ define %s_i8i32p @test_s_i8i32p(%s_i8i32p %a) { ; CHECK-DAG: bfe.u64 [[P2_bfe_6:%rd[0-9]+]], [[P2_or_5]], 24, 8; ; CHECK: { // callseq ; CHECK: .param .align 8 .b8 param0[32]; -; CHECK-DAG: st.param.b64 [param0+0], [[P0]]; +; CHECK-DAG: st.param.b64 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+9], [[P2]]; ; CHECK-DAG: st.param.b8 [param0+10], [[P2_shr_1]]; ; CHECK-DAG: st.param.b8 [param0+11], [[P2_shr_2]]; @@ -152,7 +152,7 @@ define %s_i8i32p @test_s_i8i32p(%s_i8i32p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.b64 [[R0:%rd[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b64 [[R0:%rd[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+9]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+10]; ; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+11]; @@ -162,7 +162,7 @@ define %s_i8i32p @test_s_i8i32p(%s_i8i32p %a) { ; CHECK-DAG: ld.param.b8 [[R2_6:%rs[0-9]+]], [retval0+15]; ; CHECK-DAG: ld.param.b8 [[R2_7:%rs[0-9]+]], [retval0+16]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b64 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.b64 [func_retval0], [[R0]]; ; CHECK-DAG: st.param.b8 [func_retval0+9], ; CHECK-DAG: st.param.b8 [func_retval0+10], ; CHECK-DAG: st.param.b8 [func_retval0+11], @@ -188,7 +188,7 @@ define %s_i8i64p @test_s_i8i64p(%s_i8i64p %a) { ; CHECK-DAG: or.b16 [[P2_1_or:%rs[0-9]+]], [[P2_1_shl]], [[P2_0]]; ; CHECK: { // callseq ; CHECK: .param .align 8 .b8 param0[16]; -; CHECK-DAG: st.param.b16 [param0+0], [[P0]]; +; CHECK-DAG: st.param.b16 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+3], [[P2_1_or]]; ; CHECK-DAG: st.param.b8 [param0+4], [[P2_1]]; ; CHECK: .param .align 8 .b8 retval0[16]; @@ -197,11 +197,11 @@ define %s_i8i64p @test_s_i8i64p(%s_i8i64p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2I_0:%rs[0-9]+]], [retval0+3]; ; CHECK-DAG: ld.param.b8 [[R2I_1:%rs[0-9]+]], [retval0+4]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b16 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.b16 [func_retval0], [[R0]]; ; CHECK-DAG: shl.b16 [[R2I_1_shl:%rs[0-9]+]], [[R2I_1]], 8; ; CHECK-DAG: and.b16 [[R2I_0_and:%rs[0-9]+]], [[R2I_0]], 255; ; CHECK-DAG: or.b16 [[R2I:%rs[0-9]+]], [[R2I_0_and]], [[R2I_1_shl]]; @@ -233,7 +233,7 @@ define %s_i8f16p @test_s_i8f16p(%s_i8f16p %a) { ; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16; ; CHECK: { // callseq ; CHECK-DAG: .param .align 8 .b8 param0[24]; -; CHECK-DAG: st.param.b32 [param0+0], [[P0]]; +; CHECK-DAG: st.param.b32 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+5], [[P2]]; ; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]]; ; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]]; @@ -244,13 +244,13 @@ define %s_i8f16p @test_s_i8f16p(%s_i8f16p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6]; ; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7]; ; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8]; ; CHECK: } // callseq -; CHECK-DAG: st.param.b32 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.b32 [func_retval0], [[R0]]; ; CHECK-DAG: st.param.b8 [func_retval0+5], ; CHECK-DAG: st.param.b8 [func_retval0+6], ; CHECK-DAG: st.param.b8 [func_retval0+7], @@ -280,7 +280,7 @@ define %s_i8f16x2p @test_s_i8f16x2p(%s_i8f16x2p %a) { ; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16; ; CHECK: { // callseq ; CHECK-DAG: .param .align 8 .b8 param0[24]; -; CHECK-DAG: st.param.f32 [param0+0], [[P0]]; +; CHECK-DAG: st.param.f32 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+5], [[P2]]; ; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]]; ; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]]; @@ -291,13 +291,13 @@ define %s_i8f16x2p @test_s_i8f16x2p(%s_i8f16x2p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.f32 [[R0:%f[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.f32 [[R0:%f[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6]; ; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7]; ; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8]; ; CHECK: } // callseq -; CHECK-DAG: st.param.f32 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.f32 [func_retval0], [[R0]]; ; CHECK-DAG: st.param.b8 [func_retval0+5], ; CHECK-DAG: st.param.b8 [func_retval0+6], ; CHECK-DAG: st.param.b8 [func_retval0+7], @@ -343,7 +343,7 @@ define %s_i8f32p @test_s_i8f32p(%s_i8f32p %a) { ; CHECK-DAG: bfe.u64 [[P2_bfe_6:%rd[0-9]+]], [[P2_or_5]], 24, 8; ; CHECK: { // callseq ; CHECK: .param .align 8 .b8 param0[32]; -; CHECK-DAG: st.param.f64 [param0+0], [[P0]]; +; CHECK-DAG: st.param.f64 [param0], [[P0]]; ; CHECK-DAG: st.param.b8 [param0+9], [[P2]]; ; CHECK-DAG: st.param.b8 [param0+10], [[P2_shr_1]]; ; CHECK-DAG: st.param.b8 [param0+11], [[P2_shr_2]]; @@ -358,7 +358,7 @@ define %s_i8f32p @test_s_i8f32p(%s_i8f32p %a) { ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-DAG: ld.param.f64 [[R0:%fd[0-9]+]], [retval0+0]; +; CHECK-DAG: ld.param.f64 [[R0:%fd[0-9]+]], [retval0]; ; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+9]; ; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+10]; ; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+11]; @@ -368,7 +368,7 @@ define %s_i8f32p @test_s_i8f32p(%s_i8f32p %a) { ; CHECK-DAG: ld.param.b8 [[R2_6:%rs[0-9]+]], [retval0+15]; ; CHECK-DAG: ld.param.b8 [[R2_7:%rs[0-9]+]], [retval0+16]; ; CHECK: } // callseq -; CHECK-DAG: st.param.f64 [func_retval0+0], [[R0]]; +; CHECK-DAG: st.param.f64 [func_retval0], [[R0]]; ; CHECK-DAG: st.param.b8 [func_retval0+9], ; CHECK-DAG: st.param.b8 [func_retval0+10], ; CHECK-DAG: st.param.b8 [func_retval0+11], diff --git a/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll index 8633b09af04873..044d21643ed9d0 100644 --- a/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll +++ b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll @@ -18,7 +18,7 @@ define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { ; CHECK-NEXT: not.b16 %rs5, %rs2; ; CHECK-NEXT: and.b16 %rs6, %rs4, %rs5; ; CHECK-NEXT: or.b16 %rs7, %rs3, %rs6; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs7; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs7; ; CHECK-NEXT: ret; %mx = and <1 x i8> %x, %mask %notmask = xor <1 x i8> %mask, @@ -44,7 +44,7 @@ define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwin ; CHECK-NEXT: not.b16 %rs5, %rs2; ; CHECK-NEXT: and.b16 %rs6, %rs4, %rs5; ; CHECK-NEXT: or.b16 %rs7, %rs3, %rs6; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs7; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs7; ; CHECK-NEXT: ret; %mx = and <1 x i16> %x, %mask %notmask = xor <1 x i16> %mask, @@ -70,7 +70,7 @@ define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { ; CHECK-NEXT: xor.b32 %r7, %r1, -1; ; CHECK-NEXT: and.b32 %r8, %r3, %r7; ; CHECK-NEXT: or.b32 %r9, %r5, %r8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r9; ; CHECK-NEXT: ret; %mx = and <4 x i8> %x, %mask %notmask = xor <4 x i8> %mask, @@ -92,7 +92,7 @@ define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwi ; CHECK-NEXT: xor.b32 %r7, %r1, -16711681; ; CHECK-NEXT: and.b32 %r8, %r3, %r7; ; CHECK-NEXT: or.b32 %r9, %r5, %r8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r9; ; CHECK-NEXT: ret; %mx = and <4 x i8> %x, %mask %notmask = xor <4 x i8> %mask, @@ -114,7 +114,7 @@ define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwin ; CHECK-NEXT: xor.b32 %r7, %r1, -1; ; CHECK-NEXT: and.b32 %r8, %r3, %r7; ; CHECK-NEXT: or.b32 %r9, %r5, %r8; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r9; +; CHECK-NEXT: st.param.b32 [func_retval0], %r9; ; CHECK-NEXT: ret; %mx = and <2 x i16> %x, %mask %notmask = xor <2 x i16> %mask, @@ -136,7 +136,7 @@ define <1 x i32> @out_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwin ; CHECK-NEXT: not.b32 %r5, %r2; ; CHECK-NEXT: and.b32 %r6, %r4, %r5; ; CHECK-NEXT: or.b32 %r7, %r3, %r6; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r7; +; CHECK-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NEXT: ret; %mx = and <1 x i32> %x, %mask %notmask = xor <1 x i32> %mask, @@ -166,7 +166,7 @@ define <8 x i8> @out_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { ; CHECK-NEXT: and.b32 %r18, %r2, %r15; ; CHECK-NEXT: or.b32 %r19, %r13, %r18; ; CHECK-NEXT: or.b32 %r20, %r11, %r17; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r20, %r19}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r20, %r19}; ; CHECK-NEXT: ret; %mx = and <8 x i8> %x, %mask %notmask = xor <8 x i8> %mask, @@ -192,7 +192,7 @@ define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwin ; CHECK-NEXT: and.b32 %r18, %r2, %r15; ; CHECK-NEXT: or.b32 %r19, %r13, %r18; ; CHECK-NEXT: or.b32 %r20, %r11, %r17; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r20, %r19}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r20, %r19}; ; CHECK-NEXT: ret; %mx = and <4 x i16> %x, %mask %notmask = xor <4 x i16> %mask, @@ -218,7 +218,7 @@ define <4 x i16> @out_v4i16_undef(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) n ; CHECK-NEXT: and.b32 %r18, %r2, %r15; ; CHECK-NEXT: or.b32 %r19, %r13, %r18; ; CHECK-NEXT: or.b32 %r20, %r11, %r17; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r20, %r19}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r20, %r19}; ; CHECK-NEXT: ret; %mx = and <4 x i16> %x, %mask %notmask = xor <4 x i16> %mask, @@ -244,7 +244,7 @@ define <2 x i32> @out_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwin ; CHECK-NEXT: and.b32 %r12, %r8, %r9; ; CHECK-NEXT: or.b32 %r13, %r6, %r12; ; CHECK-NEXT: or.b32 %r14, %r5, %r11; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r14, %r13}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r13}; ; CHECK-NEXT: ret; %mx = and <2 x i32> %x, %mask %notmask = xor <2 x i32> %mask, @@ -266,7 +266,7 @@ define <1 x i64> @out_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwin ; CHECK-NEXT: not.b64 %rd5, %rd2; ; CHECK-NEXT: and.b64 %rd6, %rd4, %rd5; ; CHECK-NEXT: or.b64 %rd7, %rd3, %rd6; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd7; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd7; ; CHECK-NEXT: ret; %mx = and <1 x i64> %x, %mask %notmask = xor <1 x i64> %mask, @@ -304,7 +304,7 @@ define <16 x i8> @out_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwin ; CHECK-NEXT: or.b32 %r38, %r25, %r35; ; CHECK-NEXT: or.b32 %r39, %r23, %r34; ; CHECK-NEXT: or.b32 %r40, %r21, %r33; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r40, %r39, %r38, %r37}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r40, %r39, %r38, %r37}; ; CHECK-NEXT: ret; %mx = and <16 x i8> %x, %mask %notmask = xor <16 x i8> %mask, @@ -338,7 +338,7 @@ define <8 x i16> @out_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwin ; CHECK-NEXT: or.b32 %r38, %r25, %r35; ; CHECK-NEXT: or.b32 %r39, %r23, %r34; ; CHECK-NEXT: or.b32 %r40, %r21, %r33; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r40, %r39, %r38, %r37}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r40, %r39, %r38, %r37}; ; CHECK-NEXT: ret; %mx = and <8 x i16> %x, %mask %notmask = xor <8 x i16> %mask, @@ -372,7 +372,7 @@ define <4 x i32> @out_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwin ; CHECK-NEXT: or.b32 %r26, %r11, %r23; ; CHECK-NEXT: or.b32 %r27, %r10, %r22; ; CHECK-NEXT: or.b32 %r28, %r9, %r21; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r28, %r27, %r26, %r25}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r28, %r27, %r26, %r25}; ; CHECK-NEXT: ret; %mx = and <4 x i32> %x, %mask %notmask = xor <4 x i32> %mask, @@ -403,7 +403,7 @@ define <4 x i32> @out_v4i32_undef(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) n ; CHECK-NEXT: or.b32 %r23, %r12, %r22; ; CHECK-NEXT: or.b32 %r24, %r11, %r21; ; CHECK-NEXT: or.b32 %r25, %r10, %r20; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r25, %r24, %r9, %r23}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r25, %r24, %r9, %r23}; ; CHECK-NEXT: ret; %mx = and <4 x i32> %x, %mask %notmask = xor <4 x i32> %mask, @@ -429,7 +429,7 @@ define <2 x i64> @out_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwin ; CHECK-NEXT: and.b64 %rd12, %rd8, %rd9; ; CHECK-NEXT: or.b64 %rd13, %rd6, %rd12; ; CHECK-NEXT: or.b64 %rd14, %rd5, %rd11; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd14, %rd13}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd14, %rd13}; ; CHECK-NEXT: ret; %mx = and <2 x i64> %x, %mask %notmask = xor <2 x i64> %mask, @@ -458,7 +458,7 @@ define <1 x i8> @in_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { ; CHECK-NEXT: ld.param.u8 %rs4, [in_v1i8_param_2]; ; CHECK-NEXT: and.b16 %rs5, %rs3, %rs4; ; CHECK-NEXT: xor.b16 %rs6, %rs5, %rs2; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs6; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs6; ; CHECK-NEXT: ret; %n0 = xor <1 x i8> %x, %y %n1 = and <1 x i8> %n0, %mask @@ -482,7 +482,7 @@ define <1 x i16> @in_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind ; CHECK-NEXT: ld.param.u16 %rs4, [in_v1i16_param_2]; ; CHECK-NEXT: and.b16 %rs5, %rs3, %rs4; ; CHECK-NEXT: xor.b16 %rs6, %rs5, %rs2; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs6; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs6; ; CHECK-NEXT: ret; %n0 = xor <1 x i16> %x, %y %n1 = and <1 x i16> %n0, %mask @@ -506,7 +506,7 @@ define <4 x i8> @in_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { ; CHECK-NEXT: ld.param.u32 %r4, [in_v4i8_param_2]; ; CHECK-NEXT: and.b32 %r5, %r3, %r4; ; CHECK-NEXT: xor.b32 %r6, %r5, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %n0 = xor <4 x i8> %x, %y %n1 = and <4 x i8> %n0, %mask @@ -526,7 +526,7 @@ define <2 x i16> @in_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind ; CHECK-NEXT: ld.param.u32 %r4, [in_v2i16_param_2]; ; CHECK-NEXT: and.b32 %r5, %r3, %r4; ; CHECK-NEXT: xor.b32 %r6, %r5, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %n0 = xor <2 x i16> %x, %y %n1 = and <2 x i16> %n0, %mask @@ -546,7 +546,7 @@ define <1 x i32> @in_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind ; CHECK-NEXT: ld.param.u32 %r4, [in_v1i32_param_2]; ; CHECK-NEXT: and.b32 %r5, %r3, %r4; ; CHECK-NEXT: xor.b32 %r6, %r5, %r2; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-NEXT: ret; %n0 = xor <1 x i32> %x, %y %n1 = and <1 x i32> %n0, %mask @@ -573,7 +573,7 @@ define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { ; CHECK-NEXT: xor.b32 %r11, %r1, %r3; ; CHECK-NEXT: and.b32 %r12, %r11, %r5; ; CHECK-NEXT: xor.b32 %r13, %r12, %r3; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r13, %r9}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r13, %r9}; ; CHECK-NEXT: ret; %n0 = xor <8 x i8> %x, %y %n1 = and <8 x i8> %n0, %mask @@ -596,7 +596,7 @@ define <4 x i16> @in_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind ; CHECK-NEXT: xor.b32 %r11, %r1, %r3; ; CHECK-NEXT: and.b32 %r12, %r11, %r5; ; CHECK-NEXT: xor.b32 %r13, %r12, %r3; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r13, %r9}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r13, %r9}; ; CHECK-NEXT: ret; %n0 = xor <4 x i16> %x, %y %n1 = and <4 x i16> %n0, %mask @@ -619,7 +619,7 @@ define <2 x i32> @in_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind ; CHECK-NEXT: and.b32 %r10, %r5, %r8; ; CHECK-NEXT: xor.b32 %r11, %r10, %r4; ; CHECK-NEXT: xor.b32 %r12, %r9, %r3; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r12, %r11}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r12, %r11}; ; CHECK-NEXT: ret; %n0 = xor <2 x i32> %x, %y %n1 = and <2 x i32> %n0, %mask @@ -639,7 +639,7 @@ define <1 x i64> @in_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind ; CHECK-NEXT: ld.param.u64 %rd4, [in_v1i64_param_2]; ; CHECK-NEXT: and.b64 %rd5, %rd3, %rd4; ; CHECK-NEXT: xor.b64 %rd6, %rd5, %rd2; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd6; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd6; ; CHECK-NEXT: ret; %n0 = xor <1 x i64> %x, %y %n1 = and <1 x i64> %n0, %mask @@ -672,7 +672,7 @@ define <16 x i8> @in_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind ; CHECK-NEXT: xor.b32 %r23, %r19, %r7; ; CHECK-NEXT: xor.b32 %r25, %r18, %r6; ; CHECK-NEXT: xor.b32 %r27, %r17, %r5; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r27, %r25, %r23, %r21}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r27, %r25, %r23, %r21}; ; CHECK-NEXT: ret; %n0 = xor <16 x i8> %x, %y %n1 = and <16 x i8> %n0, %mask @@ -701,7 +701,7 @@ define <8 x i16> @in_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind ; CHECK-NEXT: xor.b32 %r23, %r19, %r7; ; CHECK-NEXT: xor.b32 %r25, %r18, %r6; ; CHECK-NEXT: xor.b32 %r27, %r17, %r5; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r27, %r25, %r23, %r21}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r27, %r25, %r23, %r21}; ; CHECK-NEXT: ret; %n0 = xor <8 x i16> %x, %y %n1 = and <8 x i16> %n0, %mask @@ -730,7 +730,7 @@ define <4 x i32> @in_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind ; CHECK-NEXT: xor.b32 %r22, %r19, %r7; ; CHECK-NEXT: xor.b32 %r23, %r18, %r6; ; CHECK-NEXT: xor.b32 %r24, %r17, %r5; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r24, %r23, %r22, %r21}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r24, %r23, %r22, %r21}; ; CHECK-NEXT: ret; %n0 = xor <4 x i32> %x, %y %n1 = and <4 x i32> %n0, %mask @@ -753,7 +753,7 @@ define <2 x i64> @in_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind ; CHECK-NEXT: and.b64 %rd10, %rd5, %rd8; ; CHECK-NEXT: xor.b64 %rd11, %rd10, %rd4; ; CHECK-NEXT: xor.b64 %rd12, %rd9, %rd3; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd12, %rd11}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd12, %rd11}; ; CHECK-NEXT: ret; %n0 = xor <2 x i64> %x, %y %n1 = and <2 x i64> %n0, %mask diff --git a/llvm/test/CodeGen/NVPTX/vaargs.ll b/llvm/test/CodeGen/NVPTX/vaargs.ll index b8c213de04f8db..8ecdff9d65ac17 100644 --- a/llvm/test/CodeGen/NVPTX/vaargs.ll +++ b/llvm/test/CodeGen/NVPTX/vaargs.ll @@ -17,55 +17,55 @@ entry: ; Test va_start ; CHECK: .param .align 8 .b8 foo_vararg[] ; CHECK: mov.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], foo_vararg; -; CHECK-NEXT: st.u[[BITS]] [%SP+0], [[VA_PTR]]; +; CHECK-NEXT: st.u[[BITS]] [%SP], [[VA_PTR]]; call void @llvm.va_start(ptr %al) ; Test va_copy() -; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP+0]; +; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP]; ; CHECK-NEXT: st.u[[BITS]] [%SP+{{[0-9]+}}], [[VA_PTR]]; call void @llvm.va_copy(ptr %al2, ptr %al) ; Test va_arg(ap, int32_t) -; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP+0]; +; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP]; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_TMP:%(r|rd)[0-9]+]], [[VA_PTR]], 3; ; CHECK-NEXT: and.b[[BITS]] [[VA_PTR_ALIGN:%(r|rd)[0-9]+]], [[VA_PTR_TMP]], -4; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_NEXT:%(r|rd)[0-9]+]], [[VA_PTR_ALIGN]], 4; -; CHECK-NEXT: st.u[[BITS]] [%SP+0], [[VA_PTR_NEXT]]; +; CHECK-NEXT: st.u[[BITS]] [%SP], [[VA_PTR_NEXT]]; ; CHECK-NEXT: ld.local.u32 %r{{[0-9]+}}, [[[VA_PTR_ALIGN]]]; %0 = va_arg ptr %al, i32 ; Test va_arg(ap, int64_t) -; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP+0]; +; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP]; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_TMP:%(r|rd)[0-9]+]], [[VA_PTR]], 7; ; CHECK-NEXT: and.b[[BITS]] [[VA_PTR_ALIGN:%(r|rd)[0-9]+]], [[VA_PTR_TMP]], -8; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_NEXT:%(r|rd)[0-9]+]], [[VA_PTR_ALIGN]], 8; -; CHECK-NEXT: st.u[[BITS]] [%SP+0], [[VA_PTR_NEXT]]; +; CHECK-NEXT: st.u[[BITS]] [%SP], [[VA_PTR_NEXT]]; ; CHECK-NEXT: ld.local.u64 %rd{{[0-9]+}}, [[[VA_PTR_ALIGN]]]; %1 = va_arg ptr %al, i64 ; Test va_arg(ap, double) -; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP+0]; +; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP]; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_TMP:%(r|rd)[0-9]+]], [[VA_PTR]], 7; ; CHECK-NEXT: and.b[[BITS]] [[VA_PTR_ALIGN:%(r|rd)[0-9]+]], [[VA_PTR_TMP]], -8; ; CHECK-NEXT: add.s[[BITS]] [[VA_PTR_NEXT:%(r|rd)[0-9]+]], [[VA_PTR_ALIGN]], 8; -; CHECK-NEXT: st.u[[BITS]] [%SP+0], [[VA_PTR_NEXT]]; +; CHECK-NEXT: st.u[[BITS]] [%SP], [[VA_PTR_NEXT]]; ; CHECK-NEXT: ld.local.f64 %fd{{[0-9]+}}, [[[VA_PTR_ALIGN]]]; %2 = va_arg ptr %al, double ; Test va_arg(ap, ptr) -; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP+0]; +; CHECK-NEXT: ld.u[[BITS]] [[VA_PTR:%(r|rd)[0-9]+]], [%SP]; ; CHECK32-NEXT: add.s32 [[VA_PTR_TMP:%r[0-9]+]], [[VA_PTR]], 3; ; CHECK64-NEXT: add.s64 [[VA_PTR_TMP:%rd[0-9]+]], [[VA_PTR]], 7; ; CHECK32-NEXT: and.b32 [[VA_PTR_ALIGN:%r[0-9]+]], [[VA_PTR_TMP]], -4; ; CHECK64-NEXT: and.b64 [[VA_PTR_ALIGN:%rd[0-9]+]], [[VA_PTR_TMP]], -8; ; CHECK32-NEXT: add.s32 [[VA_PTR_NEXT:%r[0-9]+]], [[VA_PTR_ALIGN]], 4; ; CHECK64-NEXT: add.s64 [[VA_PTR_NEXT:%rd[0-9]+]], [[VA_PTR_ALIGN]], 8; -; CHECK-NEXT: st.u[[BITS]] [%SP+0], [[VA_PTR_NEXT]]; +; CHECK-NEXT: st.u[[BITS]] [%SP], [[VA_PTR_NEXT]]; ; CHECK-NEXT: ld.local.u[[BITS]] %{{(r|rd)[0-9]+}}, [[[VA_PTR_ALIGN]]]; %3 = va_arg ptr %al, ptr @@ -91,7 +91,7 @@ define i32 @test_foo(i32 %i, i64 %l, double %d, ptr %p) { ; Store arguments to an array ; CHECK32: .param .align 8 .b8 param1[24]; ; CHECK64: .param .align 8 .b8 param1[28]; -; CHECK-NEXT: st.param.b32 [param1+0], [[ARG_I32]]; +; CHECK-NEXT: st.param.b32 [param1], [[ARG_I32]]; ; CHECK-NEXT: st.param.b64 [param1+4], [[ARG_I64]]; ; CHECK-NEXT: st.param.f64 [param1+12], [[ARG_DOUBLE]]; ; CHECK-NEXT: st.param.b[[BITS]] [param1+20], [[ARG_VOID_PTR]]; diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll index 0e0c89d3e0214f..6d14986b7ff319 100644 --- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll +++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll @@ -42,7 +42,7 @@ define dso_local i32 @variadics1(i32 noundef %first, ...) { ; CHECK-PTX-NEXT: cvt.rn.f64.s32 %fd5, %r9; ; CHECK-PTX-NEXT: add.rn.f64 %fd6, %fd5, %fd4; ; CHECK-PTX-NEXT: cvt.rzi.s32.f64 %r10, %fd6; -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r10; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r10; ; CHECK-PTX-NEXT: ret; entry: %vlist = alloca ptr, align 8 @@ -112,7 +112,7 @@ define dso_local i32 @foo() { ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot1; ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL; ; CHECK-PTX-NEXT: mov.u64 %rd1, 4294967297; -; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1; +; CHECK-PTX-NEXT: st.u64 [%SP], %rd1; ; CHECK-PTX-NEXT: mov.b32 %r1, 1; ; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1; ; CHECK-PTX-NEXT: mov.u64 %rd2, 1; @@ -123,9 +123,9 @@ define dso_local i32 @foo() { ; CHECK-PTX-NEXT: add.u64 %rd4, %SP, 0; ; CHECK-PTX-NEXT: { // callseq 0, 0 ; CHECK-PTX-NEXT: .param .b32 param0; -; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1; +; CHECK-PTX-NEXT: st.param.b32 [param0], 1; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd4; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd4; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics1, @@ -133,9 +133,9 @@ define dso_local i32 @foo() { ; CHECK-PTX-NEXT: param0, ; CHECK-PTX-NEXT: param1 ; CHECK-PTX-NEXT: ); -; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0]; +; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0]; ; CHECK-PTX-NEXT: } // callseq 0 -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-PTX-NEXT: ret; entry: %conv = sext i8 1 to i32 @@ -174,14 +174,14 @@ define dso_local i32 @variadics2(i32 noundef %first, ...) { ; CHECK-PTX-NEXT: ld.u8 %rs3, [%rd7]; ; CHECK-PTX-NEXT: shl.b16 %rs4, %rs3, 8; ; CHECK-PTX-NEXT: or.b16 %rs5, %rs4, %rs2; -; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs5; +; CHECK-PTX-NEXT: st.u16 [%SP], %rs5; ; CHECK-PTX-NEXT: ld.u64 %rd8, [%rd3+8]; ; CHECK-PTX-NEXT: add.s32 %r4, %r1, %r2; ; CHECK-PTX-NEXT: add.s32 %r5, %r4, %r3; ; CHECK-PTX-NEXT: cvt.u64.u32 %rd9, %r5; ; CHECK-PTX-NEXT: add.s64 %rd10, %rd9, %rd8; ; CHECK-PTX-NEXT: cvt.u32.u64 %r6, %rd10; -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r6; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r6; ; CHECK-PTX-NEXT: ret; entry: %vlist = alloca ptr, align 8 @@ -237,7 +237,7 @@ define dso_local i32 @bar() { ; CHECK-PTX-NEXT: cvt.u16.u8 %rs6, %rs5; ; CHECK-PTX-NEXT: shl.b16 %rs7, %rs6, 8; ; CHECK-PTX-NEXT: or.b16 %rs8, %rs7, %rs4; -; CHECK-PTX-NEXT: st.u16 [%SP+0], %rs8; +; CHECK-PTX-NEXT: st.u16 [%SP], %rs8; ; CHECK-PTX-NEXT: mov.b32 %r1, 1; ; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1; ; CHECK-PTX-NEXT: add.u64 %rd5, %SP, 8; @@ -248,9 +248,9 @@ define dso_local i32 @bar() { ; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd7; ; CHECK-PTX-NEXT: { // callseq 1, 0 ; CHECK-PTX-NEXT: .param .b32 param0; -; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1; +; CHECK-PTX-NEXT: st.param.b32 [param0], 1; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd5; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd5; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics2, @@ -258,9 +258,9 @@ define dso_local i32 @bar() { ; CHECK-PTX-NEXT: param0, ; CHECK-PTX-NEXT: param1 ; CHECK-PTX-NEXT: ); -; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0]; +; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0]; ; CHECK-PTX-NEXT: } // callseq 1 -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-PTX-NEXT: ret; entry: %s1.sroa.3 = alloca [3 x i8], align 1 @@ -286,7 +286,7 @@ define dso_local i32 @variadics3(i32 noundef %first, ...) { ; CHECK-PTX-NEXT: add.s32 %r5, %r1, %r2; ; CHECK-PTX-NEXT: add.s32 %r6, %r5, %r3; ; CHECK-PTX-NEXT: add.s32 %r7, %r6, %r4; -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r7; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-PTX-NEXT: ret; entry: %vlist = alloca ptr, align 8 @@ -321,13 +321,13 @@ define dso_local i32 @baz() { ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot5; ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL; ; CHECK-PTX-NEXT: mov.b32 %r1, 1; -; CHECK-PTX-NEXT: st.v4.u32 [%SP+0], {%r1, %r1, %r1, %r1}; +; CHECK-PTX-NEXT: st.v4.u32 [%SP], {%r1, %r1, %r1, %r1}; ; CHECK-PTX-NEXT: add.u64 %rd1, %SP, 0; ; CHECK-PTX-NEXT: { // callseq 2, 0 ; CHECK-PTX-NEXT: .param .b32 param0; -; CHECK-PTX-NEXT: st.param.b32 [param0+0], 1; +; CHECK-PTX-NEXT: st.param.b32 [param0], 1; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd1; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd1; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics3, @@ -335,9 +335,9 @@ define dso_local i32 @baz() { ; CHECK-PTX-NEXT: param0, ; CHECK-PTX-NEXT: param1 ; CHECK-PTX-NEXT: ); -; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0+0]; +; CHECK-PTX-NEXT: ld.param.b32 %r2, [retval0]; ; CHECK-PTX-NEXT: } // callseq 2 -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r2; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r2; ; CHECK-PTX-NEXT: ret; entry: %call = call i32 (i32, ...) @variadics3(i32 noundef 1, <4 x i32> noundef ) @@ -360,7 +360,7 @@ define dso_local i32 @variadics4(ptr noundef byval(%struct.S2) align 8 %first, . ; CHECK-PTX-NEXT: add.s64 %rd7, %rd5, %rd6; ; CHECK-PTX-NEXT: add.s64 %rd8, %rd7, %rd4; ; CHECK-PTX-NEXT: cvt.u32.u64 %r1, %rd8; -; CHECK-PTX-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-PTX-NEXT: ret; entry: %vlist = alloca ptr, align 8 @@ -395,7 +395,7 @@ define dso_local void @qux() { ; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot7; ; CHECK-PTX-NEXT: cvta.local.u64 %SP, %SPL; ; CHECK-PTX-NEXT: ld.global.nc.u64 %rd1, [__const_$_qux_$_s]; -; CHECK-PTX-NEXT: st.u64 [%SP+0], %rd1; +; CHECK-PTX-NEXT: st.u64 [%SP], %rd1; ; CHECK-PTX-NEXT: mov.u64 %rd2, __const_$_qux_$_s; ; CHECK-PTX-NEXT: add.s64 %rd3, %rd2, 8; ; CHECK-PTX-NEXT: ld.global.nc.u64 %rd4, [%rd3]; @@ -405,10 +405,10 @@ define dso_local void @qux() { ; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 16; ; CHECK-PTX-NEXT: { // callseq 3, 0 ; CHECK-PTX-NEXT: .param .align 8 .b8 param0[16]; -; CHECK-PTX-NEXT: st.param.b64 [param0+0], %rd1; +; CHECK-PTX-NEXT: st.param.b64 [param0], %rd1; ; CHECK-PTX-NEXT: st.param.b64 [param0+8], %rd4; ; CHECK-PTX-NEXT: .param .b64 param1; -; CHECK-PTX-NEXT: st.param.b64 [param1+0], %rd6; +; CHECK-PTX-NEXT: st.param.b64 [param1], %rd6; ; CHECK-PTX-NEXT: .param .b32 retval0; ; CHECK-PTX-NEXT: call.uni (retval0), ; CHECK-PTX-NEXT: variadics4, @@ -416,7 +416,7 @@ define dso_local void @qux() { ; CHECK-PTX-NEXT: param0, ; CHECK-PTX-NEXT: param1 ; CHECK-PTX-NEXT: ); -; CHECK-PTX-NEXT: ld.param.b32 %r1, [retval0+0]; +; CHECK-PTX-NEXT: ld.param.b32 %r1, [retval0]; ; CHECK-PTX-NEXT: } // callseq 3 ; CHECK-PTX-NEXT: ret; entry: diff --git a/llvm/test/CodeGen/NVPTX/vec-param-load.ll b/llvm/test/CodeGen/NVPTX/vec-param-load.ll index f4f5c26be3474b..9a190a0892e576 100644 --- a/llvm/test/CodeGen/NVPTX/vec-param-load.ll +++ b/llvm/test/CodeGen/NVPTX/vec-param-load.ll @@ -9,7 +9,7 @@ define <16 x float> @test_v16f32(<16 x float> %a) { ; CHECK-DAG: ld.param.v4.f32 {[[V_8_11:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+32]; ; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0+16]; ; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v16f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]} +; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} ; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]} ; CHECK-DAG: st.param.v4.f32 [func_retval0+32], {[[V_8_11]]} ; CHECK-DAG: st.param.v4.f32 [func_retval0+48], {[[V_12_15]]} @@ -21,7 +21,7 @@ define <8 x float> @test_v8f32(<8 x float> %a) { ; CHECK-LABEL: test_v8f32( ; CHECK-DAG: ld.param.v4.f32 {[[V_4_7:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0+16]; ; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v8f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]} +; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} ; CHECK-DAG: st.param.v4.f32 [func_retval0+16], {[[V_4_7]]} ; CHECK: ret; ret <8 x float> %a @@ -30,7 +30,7 @@ define <8 x float> @test_v8f32(<8 x float> %a) { define <4 x float> @test_v4f32(<4 x float> %a) { ; CHECK-LABEL: test_v4f32( ; CHECK-DAG: ld.param.v4.f32 {[[V_0_3:(%f[0-9]+[, ]*){4}]]}, [test_v4f32_param_0]; -; CHECK-DAG: st.param.v4.f32 [func_retval0+0], {[[V_0_3]]} +; CHECK-DAG: st.param.v4.f32 [func_retval0], {[[V_0_3]]} ; CHECK: ret; ret <4 x float> %a } @@ -38,7 +38,7 @@ define <4 x float> @test_v4f32(<4 x float> %a) { define <2 x float> @test_v2f32(<2 x float> %a) { ; CHECK-LABEL: test_v2f32( ; CHECK-DAG: ld.param.v2.f32 {[[V_0_3:(%f[0-9]+[, ]*){2}]]}, [test_v2f32_param_0]; -; CHECK-DAG: st.param.v2.f32 [func_retval0+0], {[[V_0_3]]} +; CHECK-DAG: st.param.v2.f32 [func_retval0], {[[V_0_3]]} ; CHECK: ret; ret <2 x float> %a } @@ -48,7 +48,7 @@ define <3 x float> @test_v3f32(<3 x float> %a) { ; CHECK-LABEL: test_v3f32( ; CHECK-DAG: ld.param.f32 [[V_2:%f[0-9]+]], [test_v3f32_param_0+8]; ; CHECK-DAG: ld.param.v2.f32 {[[V_0_1:(%f[0-9]+[, ]*){2}]]}, [test_v3f32_param_0]; -; CHECK-DAG: st.param.v2.f32 [func_retval0+0], {[[V_0_1]]} +; CHECK-DAG: st.param.v2.f32 [func_retval0], {[[V_0_1]]} ; CHECK-DAG: st.param.f32 [func_retval0+8], [[V_2]] ; CHECK: ret; ret <3 x float> %a @@ -60,7 +60,7 @@ define <8 x i64> @test_v8i64(<8 x i64> %a) { ; CHECK-DAG: ld.param.v2.u64 {[[V_4_5:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0+32]; ; CHECK-DAG: ld.param.v2.u64 {[[V_2_3:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0+16]; ; CHECK-DAG: ld.param.v2.u64 {[[V_0_1:(%rd[0-9]+[, ]*){2}]]}, [test_v8i64_param_0]; -; CHECK-DAG: st.param.v2.b64 [func_retval0+0], {[[V_0_1]]} +; CHECK-DAG: st.param.v2.b64 [func_retval0], {[[V_0_1]]} ; CHECK-DAG: st.param.v2.b64 [func_retval0+16], {[[V_2_3]]} ; CHECK-DAG: st.param.v2.b64 [func_retval0+32], {[[V_4_5]]} ; CHECK-DAG: st.param.v2.b64 [func_retval0+48], {[[V_6_7]]} @@ -72,7 +72,7 @@ define <16 x i16> @test_v16i16(<16 x i16> %a) { ; CHECK-LABEL: test_v16i16( ; CHECK-DAG: ld.param.v4.u32 {[[V_8_15:(%r[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+16]; ; CHECK-DAG: ld.param.v4.u32 {[[V_0_7:(%r[0-9]+[, ]*){4}]]}, [test_v16i16_param_0]; -; CHECK-DAG: st.param.v4.b32 [func_retval0+0], {[[V_0_7]]} +; CHECK-DAG: st.param.v4.b32 [func_retval0], {[[V_0_7]]} ; CHECK-DAG: st.param.v4.b32 [func_retval0+16], {[[V_8_15]]} ; CHECK: ret; ret <16 x i16> %a diff --git a/llvm/test/CodeGen/NVPTX/vector-args.ll b/llvm/test/CodeGen/NVPTX/vector-args.ll index 162061ff34ba1e..2a45c8271e9b8f 100644 --- a/llvm/test/CodeGen/NVPTX/vector-args.ll +++ b/llvm/test/CodeGen/NVPTX/vector-args.ll @@ -29,7 +29,7 @@ define <4 x float> @baz(<4 x float> %a) { ; CHECK: .func (.param .align 16 .b8 func_retval0[16]) baz ; CHECK: .param .align 16 .b8 baz_param_0[16] ; CHECK: ld.param.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}} -; CHECK: st.param.v4.f32 [func_retval0+0], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}} +; CHECK: st.param.v4.f32 [func_retval0], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}} %t1 = fmul <4 x float> %a, %a ret <4 x float> %t1 } diff --git a/llvm/test/CodeGen/NVPTX/vector-call.ll b/llvm/test/CodeGen/NVPTX/vector-call.ll index 15e4697333cb4e..e91d4e20a44ac8 100644 --- a/llvm/test/CodeGen/NVPTX/vector-call.ll +++ b/llvm/test/CodeGen/NVPTX/vector-call.ll @@ -8,7 +8,7 @@ declare void @bar(<4 x i32>) ; CHECK-LABEL: .func foo( ; CHECK-DAG: ld.param.v4.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [foo_param_0]; ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK-DAG: st.param.v4.b32 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; +; CHECK-DAG: st.param.v4.b32 [param0], {[[E0]], [[E1]], [[E2]], [[E3]]}; ; CHECK: call.uni ; CHECK: ret; define void @foo(<4 x i32> %a) { @@ -20,7 +20,7 @@ define void @foo(<4 x i32> %a) { ; CHECK-DAG: ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [foo3_param_0]; ; CHECK-DAG: ld.param.u32 [[E2:%r[0-9]+]], [foo3_param_0+8]; ; CHECK: .param .align 16 .b8 param0[16]; -; CHECK-DAG: st.param.v2.b32 [param0+0], {[[E0]], [[E1]]}; +; CHECK-DAG: st.param.v2.b32 [param0], {[[E0]], [[E1]]}; ; CHECK-DAG: st.param.b32 [param0+8], [[E2]]; ; CHECK: call.uni ; CHECK: ret; diff --git a/llvm/test/CodeGen/NVPTX/vector-returns.ll b/llvm/test/CodeGen/NVPTX/vector-returns.ll index 956f74392ae130..520736c4cec507 100644 --- a/llvm/test/CodeGen/NVPTX/vector-returns.ll +++ b/llvm/test/CodeGen/NVPTX/vector-returns.ll @@ -10,7 +10,7 @@ define <3 x i64> @long3() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u64 %rd1, 0; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd1, %rd1}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd1}; ; CHECK-NEXT: st.param.b64 [func_retval0+16], %rd1; ; CHECK-NEXT: ret; ret <3 x i64> zeroinitializer @@ -23,7 +23,7 @@ define <2 x i64> @long2() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u64 %rd1, 0; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd1, %rd1}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd1}; ; CHECK-NEXT: ret; ret <2 x i64> zeroinitializer } @@ -35,7 +35,7 @@ define <1 x i64> @long1() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u64 %rd1, 0; -; CHECK-NEXT: st.param.b64 [func_retval0+0], %rd1; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; ; CHECK-NEXT: ret; ret <1 x i64> zeroinitializer } @@ -47,7 +47,7 @@ define <5 x i32> @int5() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r1, %r1, %r1, %r1}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r1, %r1, %r1, %r1}; ; CHECK-NEXT: st.param.b32 [func_retval0+16], %r1; ; CHECK-NEXT: ret; ret <5 x i32> zeroinitializer @@ -60,7 +60,7 @@ define <4 x i32> @int4() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r1, %r1, %r1, %r1}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r1, %r1, %r1, %r1}; ; CHECK-NEXT: ret; ret <4 x i32> zeroinitializer } @@ -72,7 +72,7 @@ define <3 x i32> @int3() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r1, %r1}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r1}; ; CHECK-NEXT: st.param.b32 [func_retval0+8], %r1; ; CHECK-NEXT: ret; ret <3 x i32> zeroinitializer @@ -85,7 +85,7 @@ define <2 x i32> @int2() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r1, %r1}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r1}; ; CHECK-NEXT: ret; ret <2 x i32> zeroinitializer } @@ -97,7 +97,7 @@ define <1 x i32> @int1() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <1 x i32> zeroinitializer } @@ -109,7 +109,7 @@ define <9 x i16> @short9() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b16 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b16 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.b16 [func_retval0+16], %rs1; ; CHECK-NEXT: ret; @@ -123,7 +123,7 @@ define <8 x i16> @short8() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r1, %r1, %r1, %r1}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r1, %r1, %r1, %r1}; ; CHECK-NEXT: ret; ret <8 x i16> zeroinitializer } @@ -135,7 +135,7 @@ define <7 x i16> @short7() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b16 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b16 [func_retval0+8], {%rs1, %rs1}; ; CHECK-NEXT: st.param.b16 [func_retval0+12], %rs1; ; CHECK-NEXT: ret; @@ -149,7 +149,7 @@ define <5 x i16> @short5() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b16 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b16 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.b16 [func_retval0+8], %rs1; ; CHECK-NEXT: ret; ret <5 x i16> zeroinitializer @@ -162,7 +162,7 @@ define <4 x i16> @short4() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r1, %r1}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r1}; ; CHECK-NEXT: ret; ret <4 x i16> zeroinitializer } @@ -174,7 +174,7 @@ define <3 x i16> @short3() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v2.b16 [func_retval0+0], {%rs1, %rs1}; +; CHECK-NEXT: st.param.v2.b16 [func_retval0], {%rs1, %rs1}; ; CHECK-NEXT: st.param.b16 [func_retval0+4], %rs1; ; CHECK-NEXT: ret; ret <3 x i16> zeroinitializer @@ -187,7 +187,7 @@ define <2 x i16> @short2() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <2 x i16> zeroinitializer } @@ -199,7 +199,7 @@ define <1 x i16> @short1() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b16 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b16 [func_retval0], %rs1; ; CHECK-NEXT: ret; ret <1 x i16> zeroinitializer } @@ -211,7 +211,7 @@ define <17 x i8> @byte17() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+12], {%rs1, %rs1, %rs1, %rs1}; @@ -227,7 +227,7 @@ define <16 x i8> @byte16() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v4.b32 [func_retval0+0], {%r1, %r1, %r1, %r1}; +; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r1, %r1, %r1, %r1}; ; CHECK-NEXT: ret; ret <16 x i8> zeroinitializer } @@ -239,7 +239,7 @@ define <15 x i8> @byte15() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+12], {%rs1, %rs1}; @@ -255,7 +255,7 @@ define <9 x i8> @byte9() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.b8 [func_retval0+8], %rs1; ; CHECK-NEXT: ret; @@ -269,7 +269,7 @@ define <8 x i8> @byte8() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {%r1, %r1}; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r1}; ; CHECK-NEXT: ret; ret <8 x i8> zeroinitializer } @@ -281,7 +281,7 @@ define <7 x i8> @byte7() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+4], {%rs1, %rs1}; ; CHECK-NEXT: st.param.b8 [func_retval0+6], %rs1; ; CHECK-NEXT: ret; @@ -295,7 +295,7 @@ define <5 x i8> @byte5() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.b8 [func_retval0+4], %rs1; ; CHECK-NEXT: ret; ret <5 x i8> zeroinitializer @@ -308,7 +308,7 @@ define <4 x i8> @byte4() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <4 x i8> zeroinitializer } @@ -320,7 +320,7 @@ define <3 x i8> @byte3() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <3 x i8> zeroinitializer } @@ -332,7 +332,7 @@ define <2 x i8> @byte2() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.b32 %r1, 0; -; CHECK-NEXT: st.param.b32 [func_retval0+0], %r1; +; CHECK-NEXT: st.param.b32 [func_retval0], %r1; ; CHECK-NEXT: ret; ret <2 x i8> zeroinitializer } @@ -344,7 +344,7 @@ define <1 x i8> @byte1() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: ret; ret <1 x i8> zeroinitializer } @@ -356,7 +356,7 @@ define <17 x i1> @bit17() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v4.b8 [func_retval0+0], {%rs1, %rs1, %rs1, %rs1}; +; CHECK-NEXT: st.param.v4.b8 [func_retval0], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+4], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+8], {%rs1, %rs1, %rs1, %rs1}; ; CHECK-NEXT: st.param.v4.b8 [func_retval0+12], {%rs1, %rs1, %rs1, %rs1}; @@ -372,7 +372,7 @@ define <16 x i1> @bit16() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {%rs1, %rs1}; +; CHECK-NEXT: st.param.v2.b8 [func_retval0], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+2], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+4], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+6], {%rs1, %rs1}; @@ -391,7 +391,7 @@ define <15 x i1> @bit15() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {%rs1, %rs1}; +; CHECK-NEXT: st.param.v2.b8 [func_retval0], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+2], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+4], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+6], {%rs1, %rs1}; @@ -410,7 +410,7 @@ define <9 x i1> @bit9() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {%rs1, %rs1}; +; CHECK-NEXT: st.param.v2.b8 [func_retval0], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+2], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+4], {%rs1, %rs1}; ; CHECK-NEXT: st.param.v2.b8 [func_retval0+6], {%rs1, %rs1}; @@ -426,7 +426,7 @@ define <8 x i1> @bit8() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+2], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+3], %rs1; @@ -445,7 +445,7 @@ define <7 x i1> @bit7() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+2], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+3], %rs1; @@ -463,7 +463,7 @@ define <5 x i1> @bit5() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+2], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+3], %rs1; @@ -479,7 +479,7 @@ define <4 x i1> @bit4() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+2], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+3], %rs1; @@ -494,7 +494,7 @@ define <3 x i1> @bit3() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+2], %rs1; ; CHECK-NEXT: ret; @@ -508,7 +508,7 @@ define <2 x i1> @bit2() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: st.param.b8 [func_retval0+1], %rs1; ; CHECK-NEXT: ret; ret <2 x i1> zeroinitializer @@ -521,7 +521,7 @@ define <1 x i1> @bit1() { ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov.u16 %rs1, 0; -; CHECK-NEXT: st.param.b8 [func_retval0+0], %rs1; +; CHECK-NEXT: st.param.b8 [func_retval0], %rs1; ; CHECK-NEXT: ret; ret <1 x i1> zeroinitializer } diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll index 15729ee2bc61e9..57be0e5e4199ac 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll @@ -90,3 +90,51 @@ define double @constraint_double_abi_name(double %a) nounwind { %2 = tail call double asm "fadd.d $0, $1, $2", "={t1},{a0},{s0}"(double %a, double %1) ret double %2 } + +define double @constraint_f_double(double %a) nounwind { +; RV32FINX-LABEL: constraint_f_double: +; RV32FINX: # %bb.0: +; RV32FINX-NEXT: lui a2, %hi(gd) +; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2) +; RV32FINX-NEXT: lw a2, %lo(gd)(a2) +; RV32FINX-NEXT: #APP +; RV32FINX-NEXT: fadd.d a0, a0, a2 +; RV32FINX-NEXT: #NO_APP +; RV32FINX-NEXT: ret +; +; RV64FINX-LABEL: constraint_f_double: +; RV64FINX: # %bb.0: +; RV64FINX-NEXT: lui a1, %hi(gd) +; RV64FINX-NEXT: ld a1, %lo(gd)(a1) +; RV64FINX-NEXT: #APP +; RV64FINX-NEXT: fadd.d a0, a0, a1 +; RV64FINX-NEXT: #NO_APP +; RV64FINX-NEXT: ret + %1 = load double, ptr @gd + %2 = tail call double asm "fadd.d $0, $1, $2", "=f,f,f"(double %a, double %1) + ret double %2 +} + +define double @constraint_cf_double(double %a) nounwind { +; RV32FINX-LABEL: constraint_cf_double: +; RV32FINX: # %bb.0: +; RV32FINX-NEXT: lui a2, %hi(gd) +; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2) +; RV32FINX-NEXT: lw a2, %lo(gd)(a2) +; RV32FINX-NEXT: #APP +; RV32FINX-NEXT: fadd.d a0, a0, a2 +; RV32FINX-NEXT: #NO_APP +; RV32FINX-NEXT: ret +; +; RV64FINX-LABEL: constraint_cf_double: +; RV64FINX: # %bb.0: +; RV64FINX-NEXT: lui a1, %hi(gd) +; RV64FINX-NEXT: ld a1, %lo(gd)(a1) +; RV64FINX-NEXT: #APP +; RV64FINX-NEXT: fadd.d a0, a0, a1 +; RV64FINX-NEXT: #NO_APP +; RV64FINX-NEXT: ret + %1 = load double, ptr @gd + %2 = tail call double asm "fadd.d $0, $1, $2", "=^cf,^cf,^cf"(double %a, double %1) + ret double %2 +} diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll index a8d3515fe1890e..1c0de6c3f16121 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll @@ -87,3 +87,48 @@ define float @constraint_float_abi_name(float %a) nounwind { ret float %2 } +define float @constraint_f_float(float %a) nounwind { +; RV32FINX-LABEL: constraint_f_float: +; RV32FINX: # %bb.0: +; RV32FINX-NEXT: lui a1, %hi(gf) +; RV32FINX-NEXT: lw a1, %lo(gf)(a1) +; RV32FINX-NEXT: #APP +; RV32FINX-NEXT: fadd.s a0, a0, a1 +; RV32FINX-NEXT: #NO_APP +; RV32FINX-NEXT: ret +; +; RV64FINX-LABEL: constraint_f_float: +; RV64FINX: # %bb.0: +; RV64FINX-NEXT: lui a1, %hi(gf) +; RV64FINX-NEXT: lw a1, %lo(gf)(a1) +; RV64FINX-NEXT: #APP +; RV64FINX-NEXT: fadd.s a0, a0, a1 +; RV64FINX-NEXT: #NO_APP +; RV64FINX-NEXT: ret + %1 = load float, ptr @gf + %2 = tail call float asm "fadd.s $0, $1, $2", "=f,f,f"(float %a, float %1) + ret float %2 +} + +define float @constraint_cf_float(float %a) nounwind { +; RV32FINX-LABEL: constraint_cf_float: +; RV32FINX: # %bb.0: +; RV32FINX-NEXT: lui a1, %hi(gf) +; RV32FINX-NEXT: lw a1, %lo(gf)(a1) +; RV32FINX-NEXT: #APP +; RV32FINX-NEXT: fadd.s a0, a0, a1 +; RV32FINX-NEXT: #NO_APP +; RV32FINX-NEXT: ret +; +; RV64FINX-LABEL: constraint_cf_float: +; RV64FINX: # %bb.0: +; RV64FINX-NEXT: lui a1, %hi(gf) +; RV64FINX-NEXT: lw a1, %lo(gf)(a1) +; RV64FINX-NEXT: #APP +; RV64FINX-NEXT: fadd.s a0, a0, a1 +; RV64FINX-NEXT: #NO_APP +; RV64FINX-NEXT: ret + %1 = load float, ptr @gf + %2 = tail call float asm "fadd.s $0, $1, $2", "=^cf,cf,cf"(float %a, float %1) + ret float %2 +} diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll index f9707c6c8995dc..086d2a1d6f3b2f 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll @@ -156,3 +156,85 @@ define half @constraint_half_abi_name(half %a) nounwind { %2 = tail call half asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(half %a, half %1) ret half %2 } + +define half @constraint_f_half(half %a) nounwind { +; RV32ZHINX-LABEL: constraint_f_half: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: lui a1, %hi(gh) +; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV32ZHINX-NEXT: #APP +; RV32ZHINX-NEXT: fadd.h a0, a0, a1 +; RV32ZHINX-NEXT: #NO_APP +; RV32ZHINX-NEXT: ret +; +; RV64ZHINX-LABEL: constraint_f_half: +; RV64ZHINX: # %bb.0: +; RV64ZHINX-NEXT: lui a1, %hi(gh) +; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV64ZHINX-NEXT: #APP +; RV64ZHINX-NEXT: fadd.h a0, a0, a1 +; RV64ZHINX-NEXT: #NO_APP +; RV64ZHINX-NEXT: ret +; +; RV32DINXZHINX-LABEL: constraint_f_half: +; RV32DINXZHINX: # %bb.0: +; RV32DINXZHINX-NEXT: lui a1, %hi(gh) +; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV32DINXZHINX-NEXT: #APP +; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1 +; RV32DINXZHINX-NEXT: #NO_APP +; RV32DINXZHINX-NEXT: ret +; +; RV64DINXZHINX-LABEL: constraint_f_half: +; RV64DINXZHINX: # %bb.0: +; RV64DINXZHINX-NEXT: lui a1, %hi(gh) +; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV64DINXZHINX-NEXT: #APP +; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1 +; RV64DINXZHINX-NEXT: #NO_APP +; RV64DINXZHINX-NEXT: ret + %1 = load half, ptr @gh + %2 = tail call half asm "fadd.h $0, $1, $2", "=f,f,f"(half %a, half %1) + ret half %2 +} + +define half @constraint_cf_half(half %a) nounwind { +; RV32ZHINX-LABEL: constraint_cf_half: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: lui a1, %hi(gh) +; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV32ZHINX-NEXT: #APP +; RV32ZHINX-NEXT: fadd.h a0, a0, a1 +; RV32ZHINX-NEXT: #NO_APP +; RV32ZHINX-NEXT: ret +; +; RV64ZHINX-LABEL: constraint_cf_half: +; RV64ZHINX: # %bb.0: +; RV64ZHINX-NEXT: lui a1, %hi(gh) +; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV64ZHINX-NEXT: #APP +; RV64ZHINX-NEXT: fadd.h a0, a0, a1 +; RV64ZHINX-NEXT: #NO_APP +; RV64ZHINX-NEXT: ret +; +; RV32DINXZHINX-LABEL: constraint_cf_half: +; RV32DINXZHINX: # %bb.0: +; RV32DINXZHINX-NEXT: lui a1, %hi(gh) +; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV32DINXZHINX-NEXT: #APP +; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1 +; RV32DINXZHINX-NEXT: #NO_APP +; RV32DINXZHINX-NEXT: ret +; +; RV64DINXZHINX-LABEL: constraint_cf_half: +; RV64DINXZHINX: # %bb.0: +; RV64DINXZHINX-NEXT: lui a1, %hi(gh) +; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1) +; RV64DINXZHINX-NEXT: #APP +; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1 +; RV64DINXZHINX-NEXT: #NO_APP +; RV64DINXZHINX-NEXT: ret + %1 = load half, ptr @gh + %2 = tail call half asm "fadd.h $0, $1, $2", "=^cf,^cf,^cf"(half %a, half %1) + ret half %2 +} diff --git a/llvm/test/CodeGen/SPARC/fmuladd-soft-float.ll b/llvm/test/CodeGen/SPARC/fmuladd-soft-float.ll new file mode 100644 index 00000000000000..a9e666e3c9b4db --- /dev/null +++ b/llvm/test/CodeGen/SPARC/fmuladd-soft-float.ll @@ -0,0 +1,385 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=sparc < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32 +; RUN: llc -mtriple=sparc64 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64 + +define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -96, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %i1, %o1 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %i2, %o1 +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore %g0, %o0, %o0 +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: srl %i0, 0, %o0 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: srl %i1, 0, %o1 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: srl %i2, 0, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o0 + %result = call float @llvm.fmuladd.f32(float %a, float %b, float %c) + ret float %result +} + +define double @fmuladd_intrinsic_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -96, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: mov %i1, %o1 +; SOFT-FLOAT-32-NEXT: mov %i2, %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: mov %i3, %o3 +; SOFT-FLOAT-32-NEXT: mov %i4, %o2 +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: mov %i5, %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i0 +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore %g0, %o1, %o1 +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: mov %i0, %o0 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %i1, %o1 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %i2, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o0 + %result = call double @llvm.fmuladd.f64(double %a, double %b, double %c) + ret double %result +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -96, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %i1, %o1 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %i2, %o1 +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore %g0, %o0, %o0 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: srl %i0, 0, %o0 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: srl %i1, 0, %o1 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: srl %i2, 0, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o0 + %product = fmul contract float %a, %b + %result = fadd contract float %product, %c + ret float %result +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -96, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: mov %i1, %o1 +; SOFT-FLOAT-32-NEXT: mov %i2, %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: mov %i3, %o3 +; SOFT-FLOAT-32-NEXT: mov %i4, %o2 +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: mov %i5, %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i0 +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore %g0, %o1, %o1 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: mov %i0, %o0 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %i1, %o1 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %i2, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o0 + %product = fmul contract double %a, %b + %result = fadd contract double %product, %c + ret double %result +} + +define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -96, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: ld [%fp+100], %l0 +; SOFT-FLOAT-32-NEXT: ld [%fp+104], %l1 +; SOFT-FLOAT-32-NEXT: ld [%fp+108], %l2 +; SOFT-FLOAT-32-NEXT: ld [%fp+112], %l3 +; SOFT-FLOAT-32-NEXT: ld [%fp+96], %l4 +; SOFT-FLOAT-32-NEXT: ld [%fp+92], %l5 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %i4, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %l6 +; SOFT-FLOAT-32-NEXT: mov %i1, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %i5, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %i1 +; SOFT-FLOAT-32-NEXT: mov %i2, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %l5, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %i4 +; SOFT-FLOAT-32-NEXT: mov %i3, %o0 +; SOFT-FLOAT-32-NEXT: call __mulsf3 +; SOFT-FLOAT-32-NEXT: mov %l4, %o1 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %l3, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %i3 +; SOFT-FLOAT-32-NEXT: mov %i4, %o0 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %l2, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %i2 +; SOFT-FLOAT-32-NEXT: mov %i1, %o0 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %l1, %o1 +; SOFT-FLOAT-32-NEXT: mov %o0, %i1 +; SOFT-FLOAT-32-NEXT: mov %l6, %o0 +; SOFT-FLOAT-32-NEXT: call __addsf3 +; SOFT-FLOAT-32-NEXT: mov %l0, %o1 +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore %g0, %o0, %o0 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: ld [%fp+2267], %l0 +; SOFT-FLOAT-64-NEXT: ld [%fp+2259], %l1 +; SOFT-FLOAT-64-NEXT: ld [%fp+2251], %l2 +; SOFT-FLOAT-64-NEXT: ld [%fp+2243], %l3 +; SOFT-FLOAT-64-NEXT: ld [%fp+2227], %l4 +; SOFT-FLOAT-64-NEXT: ld [%fp+2235], %o1 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: srl %i3, 0, %o0 +; SOFT-FLOAT-64-NEXT: mov %o0, %i3 +; SOFT-FLOAT-64-NEXT: srl %i2, 0, %o0 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: mov %l4, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i2 +; SOFT-FLOAT-64-NEXT: srl %i1, 0, %o0 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: srl %i5, 0, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i1 +; SOFT-FLOAT-64-NEXT: srl %i0, 0, %o0 +; SOFT-FLOAT-64-NEXT: call __mulsf3 +; SOFT-FLOAT-64-NEXT: srl %i4, 0, %o1 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: mov %l3, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i0 +; SOFT-FLOAT-64-NEXT: mov %i1, %o0 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: mov %l2, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i1 +; SOFT-FLOAT-64-NEXT: mov %i2, %o0 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: mov %l1, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i2 +; SOFT-FLOAT-64-NEXT: mov %i3, %o0 +; SOFT-FLOAT-64-NEXT: call __addsf3 +; SOFT-FLOAT-64-NEXT: mov %l0, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o3 + %product = fmul contract <4 x float> %a, %b + %result = fadd contract <4 x float> %product, %c + ret <4 x float> %result +} + +define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32: .cfi_startproc +; SOFT-FLOAT-32-NEXT: ! %bb.0: +; SOFT-FLOAT-32-NEXT: save %sp, -128, %sp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-32-NEXT: .cfi_window_save +; SOFT-FLOAT-32-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-32-NEXT: ld [%fp+64], %l6 +; SOFT-FLOAT-32-NEXT: ld [%fp+156], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-4] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+160], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-8] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+148], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-12] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+152], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-16] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+140], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-20] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+144], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-24] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+132], %g2 +; SOFT-FLOAT-32-NEXT: st %g2, [%fp+-28] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: ld [%fp+136], %l7 +; SOFT-FLOAT-32-NEXT: ld [%fp+100], %l0 +; SOFT-FLOAT-32-NEXT: ld [%fp+104], %l1 +; SOFT-FLOAT-32-NEXT: ld [%fp+108], %l2 +; SOFT-FLOAT-32-NEXT: ld [%fp+112], %l3 +; SOFT-FLOAT-32-NEXT: ld [%fp+116], %l4 +; SOFT-FLOAT-32-NEXT: ld [%fp+120], %l5 +; SOFT-FLOAT-32-NEXT: ld [%fp+92], %o0 +; SOFT-FLOAT-32-NEXT: ld [%fp+96], %o1 +; SOFT-FLOAT-32-NEXT: ld [%fp+124], %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: ld [%fp+128], %o3 +; SOFT-FLOAT-32-NEXT: st %o0, [%fp+-32] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: st %o1, [%fp+-36] ! 4-byte Folded Spill +; SOFT-FLOAT-32-NEXT: mov %i4, %o0 +; SOFT-FLOAT-32-NEXT: mov %i5, %o1 +; SOFT-FLOAT-32-NEXT: mov %l4, %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: mov %l5, %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %l4 +; SOFT-FLOAT-32-NEXT: mov %o1, %l5 +; SOFT-FLOAT-32-NEXT: mov %i2, %o0 +; SOFT-FLOAT-32-NEXT: mov %i3, %o1 +; SOFT-FLOAT-32-NEXT: mov %l2, %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: mov %l3, %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i4 +; SOFT-FLOAT-32-NEXT: mov %o1, %i5 +; SOFT-FLOAT-32-NEXT: mov %i0, %o0 +; SOFT-FLOAT-32-NEXT: mov %i1, %o1 +; SOFT-FLOAT-32-NEXT: mov %l0, %o2 +; SOFT-FLOAT-32-NEXT: call __muldf3 +; SOFT-FLOAT-32-NEXT: mov %l1, %o3 +; SOFT-FLOAT-32-NEXT: ld [%fp+-28], %o2 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: mov %l7, %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i2 +; SOFT-FLOAT-32-NEXT: mov %o1, %i3 +; SOFT-FLOAT-32-NEXT: mov %i4, %o0 +; SOFT-FLOAT-32-NEXT: mov %i5, %o1 +; SOFT-FLOAT-32-NEXT: ld [%fp+-20], %o2 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: ld [%fp+-24], %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i4 +; SOFT-FLOAT-32-NEXT: mov %o1, %i5 +; SOFT-FLOAT-32-NEXT: mov %l4, %o0 +; SOFT-FLOAT-32-NEXT: mov %l5, %o1 +; SOFT-FLOAT-32-NEXT: ld [%fp+-12], %o2 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: ld [%fp+-16], %o3 +; SOFT-FLOAT-32-NEXT: mov %o0, %i0 +; SOFT-FLOAT-32-NEXT: mov %o1, %i1 +; SOFT-FLOAT-32-NEXT: ld [%fp+-32], %o0 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: ld [%fp+-36], %o1 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: ld [%fp+-4], %o2 ! 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: call __adddf3 +; SOFT-FLOAT-32-NEXT: ld [%fp+-8], %o3 +; SOFT-FLOAT-32-NEXT: ! kill: def $o0 killed $o0 killed $o0_o1 def $o0_o1 +; SOFT-FLOAT-32-NEXT: ! kill: def $o1 killed $o1 killed $o0_o1 def $o0_o1 +; SOFT-FLOAT-32-NEXT: std %o0, [%l6+24] +; SOFT-FLOAT-32-NEXT: std %i0, [%l6+16] +; SOFT-FLOAT-32-NEXT: std %i4, [%l6+8] +; SOFT-FLOAT-32-NEXT: std %i2, [%l6] +; SOFT-FLOAT-32-NEXT: ret +; SOFT-FLOAT-32-NEXT: restore +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64: .cfi_startproc +; SOFT-FLOAT-64-NEXT: ! %bb.0: +; SOFT-FLOAT-64-NEXT: save %sp, -176, %sp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_register %fp +; SOFT-FLOAT-64-NEXT: .cfi_window_save +; SOFT-FLOAT-64-NEXT: .cfi_register %o7, %i7 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2263], %l0 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2255], %l1 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2247], %l2 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2239], %l3 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2223], %l4 +; SOFT-FLOAT-64-NEXT: ldx [%fp+2231], %o1 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %i3, %o0 +; SOFT-FLOAT-64-NEXT: mov %o0, %i3 +; SOFT-FLOAT-64-NEXT: mov %i2, %o0 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %l4, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i2 +; SOFT-FLOAT-64-NEXT: mov %i1, %o0 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %i5, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i1 +; SOFT-FLOAT-64-NEXT: mov %i0, %o0 +; SOFT-FLOAT-64-NEXT: call __muldf3 +; SOFT-FLOAT-64-NEXT: mov %i4, %o1 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %l3, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i0 +; SOFT-FLOAT-64-NEXT: mov %i1, %o0 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %l2, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i1 +; SOFT-FLOAT-64-NEXT: mov %i2, %o0 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %l1, %o1 +; SOFT-FLOAT-64-NEXT: mov %o0, %i2 +; SOFT-FLOAT-64-NEXT: mov %i3, %o0 +; SOFT-FLOAT-64-NEXT: call __adddf3 +; SOFT-FLOAT-64-NEXT: mov %l0, %o1 +; SOFT-FLOAT-64-NEXT: ret +; SOFT-FLOAT-64-NEXT: restore %g0, %o0, %o3 + %product = fmul contract <4 x double> %a, %b + %result = fadd contract <4 x double> %product, %c + ret <4 x double> %result +} + +attributes #0 = { "use-soft-float"="true" } + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll new file mode 100644 index 00000000000000..b01c348b631b88 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/fmuladd-soft-float.ll @@ -0,0 +1,230 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=s390x < %s | FileCheck %s -check-prefix=SOFT-FLOAT + +define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r13, %r15, 104(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -160 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 320 +; SOFT-FLOAT-NEXT: llgfr %r2, %r2 +; SOFT-FLOAT-NEXT: llgfr %r3, %r3 +; SOFT-FLOAT-NEXT: lr %r13, %r4 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: llgfr %r3, %r13 +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: # kill: def $r2l killed $r2l killed $r2d +; SOFT-FLOAT-NEXT: lmg %r13, %r15, 264(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %result = call float @llvm.fmuladd.f32(float %a, float %b, float %c) + ret float %result +} + +define double @fmuladd_intrinsic_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r13, %r15, 104(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -160 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 320 +; SOFT-FLOAT-NEXT: lgr %r13, %r4 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r3, %r13 +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lmg %r13, %r15, 264(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %result = call double @llvm.fmuladd.f64(double %a, double %b, double %c) + ret double %result +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r13, %r15, 104(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -160 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 320 +; SOFT-FLOAT-NEXT: llgfr %r2, %r2 +; SOFT-FLOAT-NEXT: llgfr %r3, %r3 +; SOFT-FLOAT-NEXT: lr %r13, %r4 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: llgfr %r3, %r13 +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: # kill: def $r2l killed $r2l killed $r2d +; SOFT-FLOAT-NEXT: lmg %r13, %r15, 264(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %product = fmul contract float %a, %b + %result = fadd contract float %product, %c + ret float %result +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r13, %r15, 104(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -160 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 320 +; SOFT-FLOAT-NEXT: lgr %r13, %r4 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r3, %r13 +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lmg %r13, %r15, 264(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %product = fmul contract double %a, %b + %result = fadd contract double %product, %c + ret double %result +} + +define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r7, %r15, 56(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r7, -104 +; SOFT-FLOAT-NEXT: .cfi_offset %r8, -96 +; SOFT-FLOAT-NEXT: .cfi_offset %r9, -88 +; SOFT-FLOAT-NEXT: .cfi_offset %r10, -80 +; SOFT-FLOAT-NEXT: .cfi_offset %r11, -72 +; SOFT-FLOAT-NEXT: .cfi_offset %r12, -64 +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -176 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 336 +; SOFT-FLOAT-NEXT: llgf %r0, 388(%r15) +; SOFT-FLOAT-NEXT: stg %r0, 168(%r15) # 8-byte Folded Spill +; SOFT-FLOAT-NEXT: llgf %r0, 380(%r15) +; SOFT-FLOAT-NEXT: stg %r0, 160(%r15) # 8-byte Folded Spill +; SOFT-FLOAT-NEXT: llgf %r11, 372(%r15) +; SOFT-FLOAT-NEXT: llgf %r10, 364(%r15) +; SOFT-FLOAT-NEXT: llgf %r8, 340(%r15) +; SOFT-FLOAT-NEXT: llgf %r0, 356(%r15) +; SOFT-FLOAT-NEXT: llgf %r7, 348(%r15) +; SOFT-FLOAT-NEXT: llgfr %r1, %r5 +; SOFT-FLOAT-NEXT: lr %r9, %r4 +; SOFT-FLOAT-NEXT: lr %r13, %r3 +; SOFT-FLOAT-NEXT: lr %r12, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r1 +; SOFT-FLOAT-NEXT: lgr %r3, %r0 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: llgfr %r0, %r9 +; SOFT-FLOAT-NEXT: lgr %r9, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r0 +; SOFT-FLOAT-NEXT: lgr %r3, %r7 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: llgfr %r0, %r13 +; SOFT-FLOAT-NEXT: lgr %r13, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r0 +; SOFT-FLOAT-NEXT: lgr %r3, %r8 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: llgfr %r0, %r12 +; SOFT-FLOAT-NEXT: llgfr %r3, %r6 +; SOFT-FLOAT-NEXT: lgr %r12, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r0 +; SOFT-FLOAT-NEXT: brasl %r14, __mulsf3@PLT +; SOFT-FLOAT-NEXT: lgr %r3, %r10 +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: lgr %r10, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r12 +; SOFT-FLOAT-NEXT: lgr %r3, %r11 +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: lgr %r12, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r13 +; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: lgr %r13, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r9 +; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload +; SOFT-FLOAT-NEXT: brasl %r14, __addsf3@PLT +; SOFT-FLOAT-NEXT: lgr %r5, %r2 +; SOFT-FLOAT-NEXT: lr %r2, %r10 +; SOFT-FLOAT-NEXT: lr %r3, %r12 +; SOFT-FLOAT-NEXT: lr %r4, %r13 +; SOFT-FLOAT-NEXT: # kill: def $r5l killed $r5l killed $r5d +; SOFT-FLOAT-NEXT: lmg %r7, %r15, 232(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %product = fmul contract <4 x float> %a, %b + %result = fadd contract <4 x float> %product, %c + ret <4 x float> %result +} + +define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 { +; SOFT-FLOAT-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT: # %bb.0: +; SOFT-FLOAT-NEXT: stmg %r6, %r15, 48(%r15) +; SOFT-FLOAT-NEXT: .cfi_offset %r6, -112 +; SOFT-FLOAT-NEXT: .cfi_offset %r7, -104 +; SOFT-FLOAT-NEXT: .cfi_offset %r8, -96 +; SOFT-FLOAT-NEXT: .cfi_offset %r9, -88 +; SOFT-FLOAT-NEXT: .cfi_offset %r10, -80 +; SOFT-FLOAT-NEXT: .cfi_offset %r11, -72 +; SOFT-FLOAT-NEXT: .cfi_offset %r12, -64 +; SOFT-FLOAT-NEXT: .cfi_offset %r13, -56 +; SOFT-FLOAT-NEXT: .cfi_offset %r14, -48 +; SOFT-FLOAT-NEXT: .cfi_offset %r15, -40 +; SOFT-FLOAT-NEXT: aghi %r15, -184 +; SOFT-FLOAT-NEXT: .cfi_def_cfa_offset 344 +; SOFT-FLOAT-NEXT: mvc 176(8,%r15), 24(%r4) # 8-byte Folded Spill +; SOFT-FLOAT-NEXT: mvc 168(8,%r15), 16(%r4) # 8-byte Folded Spill +; SOFT-FLOAT-NEXT: mvc 160(8,%r15), 8(%r4) # 8-byte Folded Spill +; SOFT-FLOAT-NEXT: lg %r10, 0(%r4) +; SOFT-FLOAT-NEXT: lg %r9, 0(%r2) +; SOFT-FLOAT-NEXT: lg %r8, 0(%r3) +; SOFT-FLOAT-NEXT: lg %r7, 8(%r2) +; SOFT-FLOAT-NEXT: lg %r6, 8(%r3) +; SOFT-FLOAT-NEXT: lg %r13, 16(%r2) +; SOFT-FLOAT-NEXT: lg %r2, 24(%r2) +; SOFT-FLOAT-NEXT: lg %r0, 24(%r3) +; SOFT-FLOAT-NEXT: lg %r12, 16(%r3) +; SOFT-FLOAT-NEXT: lgr %r3, %r0 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r11, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r13 +; SOFT-FLOAT-NEXT: lgr %r3, %r12 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r13, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r7 +; SOFT-FLOAT-NEXT: lgr %r3, %r6 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r12, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r9 +; SOFT-FLOAT-NEXT: lgr %r3, %r8 +; SOFT-FLOAT-NEXT: brasl %r14, __muldf3@PLT +; SOFT-FLOAT-NEXT: lgr %r3, %r10 +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lgr %r10, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r12 +; SOFT-FLOAT-NEXT: lg %r3, 160(%r15) # 8-byte Folded Reload +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lgr %r12, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r13 +; SOFT-FLOAT-NEXT: lg %r3, 168(%r15) # 8-byte Folded Reload +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lgr %r13, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r11 +; SOFT-FLOAT-NEXT: lg %r3, 176(%r15) # 8-byte Folded Reload +; SOFT-FLOAT-NEXT: brasl %r14, __adddf3@PLT +; SOFT-FLOAT-NEXT: lgr %r5, %r2 +; SOFT-FLOAT-NEXT: lgr %r2, %r10 +; SOFT-FLOAT-NEXT: lgr %r3, %r12 +; SOFT-FLOAT-NEXT: lgr %r4, %r13 +; SOFT-FLOAT-NEXT: lmg %r6, %r15, 232(%r15) +; SOFT-FLOAT-NEXT: br %r14 + %product = fmul contract <4 x double> %a, %b + %result = fadd contract <4 x double> %product, %c + ret <4 x double> %result +} + +attributes #0 = { "use-soft-float"="true" } + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/CodeGen/X86/fmuladd-soft-float.ll b/llvm/test/CodeGen/X86/fmuladd-soft-float.ll new file mode 100644 index 00000000000000..ccb2f37590b0ad --- /dev/null +++ b/llvm/test/CodeGen/X86/fmuladd-soft-float.ll @@ -0,0 +1,1777 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=i386 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32 +; RUN: llc -mtriple=i386 -mattr +fma < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32-FMA +; RUN: llc -mtriple=i386 -mattr +fma4 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-32-FMA4 +; RUN: llc -mtriple=x86_64 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64 +; RUN: llc -mtriple=x86_64 -mattr +fma < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64-FMA +; RUN: llc -mtriple=x86_64 -mattr +fma4 < %s | FileCheck %s -check-prefix=SOFT-FLOAT-64-FMA4 + +define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_intrinsic_f32: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %result = call float @llvm.fmuladd.f32(float %a, float %b, float %c) + ret float %result +} + +define double @fmuladd_intrinsic_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: popl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: popl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl +; +; SOFT-FLOAT-64-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_intrinsic_f64: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %result = call double @llvm.fmuladd.f64(double %a, double %b, double %c) + ret double %result +} + +define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_contract_f32: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movl %edx, %ebx +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl %ebx, %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %product = fmul contract float %a, %b + %result = fadd contract float %product, %c + ret float %result +} + +define double @fmuladd_contract_f64(double %a, double %b, double %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: popl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: popl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %edi, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_contract_f64: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rdx, %rbx +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq %rbx, %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %product = fmul contract double %a, %b + %result = fadd contract double %product, %c + ret double %result +} + +define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-NEXT: pushl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __addsf3 +; SOFT-FLOAT-32-NEXT: addl $8, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-NEXT: movl %eax, 12(%esi) +; SOFT-FLOAT-32-NEXT: movl %ebx, 8(%esi) +; SOFT-FLOAT-32-NEXT: movl %edi, 4(%esi) +; SOFT-FLOAT-32-NEXT: movl %ebp, (%esi) +; SOFT-FLOAT-32-NEXT: movl %esi, %eax +; SOFT-FLOAT-32-NEXT: addl $4, %esp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-NEXT: popl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: popl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: popl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl $4 +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, 12(%esi) +; SOFT-FLOAT-32-FMA-NEXT: movl %ebx, 8(%esi) +; SOFT-FLOAT-32-FMA-NEXT: movl %edi, 4(%esi) +; SOFT-FLOAT-32-FMA-NEXT: movl %ebp, (%esi) +; SOFT-FLOAT-32-FMA-NEXT: movl %esi, %eax +; SOFT-FLOAT-32-FMA-NEXT: addl $4, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA-NEXT: popl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: popl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: popl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl $4 +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __mulsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %ebx +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __addsf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $8, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, 12(%esi) +; SOFT-FLOAT-32-FMA4-NEXT: movl %ebx, 8(%esi) +; SOFT-FLOAT-32-FMA4-NEXT: movl %edi, 4(%esi) +; SOFT-FLOAT-32-FMA4-NEXT: movl %ebp, (%esi) +; SOFT-FLOAT-32-FMA4-NEXT: movl %esi, %eax +; SOFT-FLOAT-32-FMA4-NEXT: addl $4, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA4-NEXT: popl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: popl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl $4 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: pushq %r15 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-NEXT: pushq %r14 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-NEXT: pushq %r13 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-NEXT: pushq %r12 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-NEXT: pushq %rax +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-NEXT: movl %r9d, %r13d +; SOFT-FLOAT-64-NEXT: movl %ecx, %ebp +; SOFT-FLOAT-64-NEXT: movl %edx, %r14d +; SOFT-FLOAT-64-NEXT: movl %esi, %r12d +; SOFT-FLOAT-64-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: movl %r8d, %edi +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %r15d +; SOFT-FLOAT-64-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-NEXT: movl %r12d, %edi +; SOFT-FLOAT-64-NEXT: movl %r13d, %esi +; SOFT-FLOAT-64-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %r12d +; SOFT-FLOAT-64-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-NEXT: movl %r15d, %edi +; SOFT-FLOAT-64-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-NEXT: movl %eax, 12(%rbx) +; SOFT-FLOAT-64-NEXT: movl %ebp, 8(%rbx) +; SOFT-FLOAT-64-NEXT: movl %r14d, 4(%rbx) +; SOFT-FLOAT-64-NEXT: movl %r12d, (%rbx) +; SOFT-FLOAT-64-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-NEXT: popq %r12 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-NEXT: popq %r13 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-NEXT: popq %r14 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-NEXT: popq %r15 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: popq %rbp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r15 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r14 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r13 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r12 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA-NEXT: pushq %rax +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-FMA-NEXT: movl %r9d, %r13d +; SOFT-FLOAT-64-FMA-NEXT: movl %ecx, %ebp +; SOFT-FLOAT-64-FMA-NEXT: movl %edx, %r14d +; SOFT-FLOAT-64-FMA-NEXT: movl %esi, %r12d +; SOFT-FLOAT-64-FMA-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: movl %r8d, %edi +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %r15d +; SOFT-FLOAT-64-FMA-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-FMA-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-FMA-NEXT: movl %r12d, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl %r13d, %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %r12d +; SOFT-FLOAT-64-FMA-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-FMA-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-FMA-NEXT: movl %r15d, %edi +; SOFT-FLOAT-64-FMA-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movl %eax, 12(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movl %ebp, 8(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movl %r14d, 4(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movl %r12d, (%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-FMA-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA-NEXT: popq %r12 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA-NEXT: popq %r13 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA-NEXT: popq %r14 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA-NEXT: popq %r15 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: popq %rbp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_contract_v4f32: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r15 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r14 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r13 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r12 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rax +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movl %r9d, %r13d +; SOFT-FLOAT-64-FMA4-NEXT: movl %ecx, %ebp +; SOFT-FLOAT-64-FMA4-NEXT: movl %edx, %r14d +; SOFT-FLOAT-64-FMA4-NEXT: movl %esi, %r12d +; SOFT-FLOAT-64-FMA4-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: movl %r8d, %edi +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %r15d +; SOFT-FLOAT-64-FMA4-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-FMA4-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-FMA4-NEXT: movl %r12d, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl %r13d, %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __mulsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %r12d +; SOFT-FLOAT-64-FMA4-NEXT: movl %r14d, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %r14d +; SOFT-FLOAT-64-FMA4-NEXT: movl %ebp, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, %ebp +; SOFT-FLOAT-64-FMA4-NEXT: movl %r15d, %edi +; SOFT-FLOAT-64-FMA4-NEXT: movl {{[0-9]+}}(%rsp), %esi +; SOFT-FLOAT-64-FMA4-NEXT: callq __addsf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movl %eax, 12(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movl %ebp, 8(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movl %r14d, 4(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movl %r12d, (%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-FMA4-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r12 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r13 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r14 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r15 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %product = fmul contract <4 x float> %a, %b + %result = fadd contract <4 x float> %product, %c + ret <4 x float> %result +} + +define <4 x double> @fmuladd_contract_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #0 { +; SOFT-FLOAT-32-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32: # %bb.0: +; SOFT-FLOAT-32-NEXT: pushl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-NEXT: subl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 36 +; SOFT-FLOAT-32-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-NEXT: pushl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, %esi +; SOFT-FLOAT-32-NEXT: movl %edx, %ebp +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __muldf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %eax +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-NEXT: movl %edx, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %edi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl %esi +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-NEXT: movl %edx, %esi +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: calll __adddf3 +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; SOFT-FLOAT-32-NEXT: movl %edx, 28(%ecx) +; SOFT-FLOAT-32-NEXT: movl %eax, 24(%ecx) +; SOFT-FLOAT-32-NEXT: movl %esi, 20(%ecx) +; SOFT-FLOAT-32-NEXT: movl %ebp, 16(%ecx) +; SOFT-FLOAT-32-NEXT: movl %ebx, 12(%ecx) +; SOFT-FLOAT-32-NEXT: movl %edi, 8(%ecx) +; SOFT-FLOAT-32-NEXT: movl (%esp), %eax # 4-byte Reload +; SOFT-FLOAT-32-NEXT: movl %eax, 4(%ecx) +; SOFT-FLOAT-32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; SOFT-FLOAT-32-NEXT: movl %eax, (%ecx) +; SOFT-FLOAT-32-NEXT: movl %ecx, %eax +; SOFT-FLOAT-32-NEXT: addl $16, %esp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-NEXT: popl %esi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-NEXT: popl %edi +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-NEXT: popl %ebx +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-NEXT: popl %ebp +; SOFT-FLOAT-32-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-NEXT: retl $4 +; +; SOFT-FLOAT-32-FMA-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32-FMA: # %bb.0: +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA-NEXT: subl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 36 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-FMA-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %esi +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, %ebp +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, %esi +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA-NEXT: movl {{[0-9]+}}(%esp), %ecx +; SOFT-FLOAT-32-FMA-NEXT: movl %edx, 28(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, 24(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %esi, 20(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %ebp, 16(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %ebx, 12(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %edi, 8(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl (%esp), %eax # 4-byte Reload +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, 4(%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; SOFT-FLOAT-32-FMA-NEXT: movl %eax, (%ecx) +; SOFT-FLOAT-32-FMA-NEXT: movl %ecx, %eax +; SOFT-FLOAT-32-FMA-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA-NEXT: popl %esi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA-NEXT: popl %edi +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA-NEXT: popl %ebx +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA-NEXT: popl %ebp +; SOFT-FLOAT-32-FMA-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA-NEXT: retl $4 +; +; SOFT-FLOAT-32-FMA4-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-32-FMA4: # %bb.0: +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA4-NEXT: subl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 36 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %esi, -20 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %edi, -16 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %ebx, -12 +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_offset %ebp, -8 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %edi +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %ebx +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %esi +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %ebp +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %esi +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, %ebp +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __muldf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %eax +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, (%esp) # 4-byte Spill +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %edi +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, %ebx +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, %ebp +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, %esi +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[0-9]+}}(%esp) +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: calll __adddf3 +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_adjust_cfa_offset -16 +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[0-9]+}}(%esp), %ecx +; SOFT-FLOAT-32-FMA4-NEXT: movl %edx, 28(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, 24(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %esi, 20(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %ebp, 16(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %ebx, 12(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %edi, 8(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl (%esp), %eax # 4-byte Reload +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, 4(%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; SOFT-FLOAT-32-FMA4-NEXT: movl %eax, (%ecx) +; SOFT-FLOAT-32-FMA4-NEXT: movl %ecx, %eax +; SOFT-FLOAT-32-FMA4-NEXT: addl $16, %esp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 20 +; SOFT-FLOAT-32-FMA4-NEXT: popl %esi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-32-FMA4-NEXT: popl %edi +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 12 +; SOFT-FLOAT-32-FMA4-NEXT: popl %ebx +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-32-FMA4-NEXT: popl %ebp +; SOFT-FLOAT-32-FMA4-NEXT: .cfi_def_cfa_offset 4 +; SOFT-FLOAT-32-FMA4-NEXT: retl $4 +; +; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64: # %bb.0: +; SOFT-FLOAT-64-NEXT: pushq %rbp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: pushq %r15 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-NEXT: pushq %r14 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-NEXT: pushq %r13 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-NEXT: pushq %r12 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-NEXT: pushq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-NEXT: pushq %rax +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-NEXT: movq %rcx, %r14 +; SOFT-FLOAT-64-NEXT: movq %rdx, %r15 +; SOFT-FLOAT-64-NEXT: movq %rsi, %r12 +; SOFT-FLOAT-64-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rbp +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: movq %r8, %rdi +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r13 +; SOFT-FLOAT-64-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-NEXT: movq %rbp, %rsi +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-NEXT: movq %r12, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r12 +; SOFT-FLOAT-64-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-NEXT: movq %r13, %rdi +; SOFT-FLOAT-64-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-NEXT: movq %rax, 24(%rbx) +; SOFT-FLOAT-64-NEXT: movq %r14, 16(%rbx) +; SOFT-FLOAT-64-NEXT: movq %r15, 8(%rbx) +; SOFT-FLOAT-64-NEXT: movq %r12, (%rbx) +; SOFT-FLOAT-64-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-NEXT: popq %rbx +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-NEXT: popq %r12 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-NEXT: popq %r13 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-NEXT: popq %r14 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-NEXT: popq %r15 +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-NEXT: popq %rbp +; SOFT-FLOAT-64-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-NEXT: retq +; +; SOFT-FLOAT-64-FMA-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64-FMA: # %bb.0: +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r15 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r14 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r13 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA-NEXT: pushq %r12 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA-NEXT: pushq %rax +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-FMA-NEXT: movq %rcx, %r14 +; SOFT-FLOAT-64-FMA-NEXT: movq %rdx, %r15 +; SOFT-FLOAT-64-FMA-NEXT: movq %rsi, %r12 +; SOFT-FLOAT-64-FMA-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rbp +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: movq %r8, %rdi +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r13 +; SOFT-FLOAT-64-FMA-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq %rbp, %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-FMA-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-FMA-NEXT: movq %r12, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r12 +; SOFT-FLOAT-64-FMA-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-FMA-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-FMA-NEXT: movq %r13, %rdi +; SOFT-FLOAT-64-FMA-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA-NEXT: movq %rax, 24(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movq %r14, 16(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movq %r15, 8(%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movq %r12, (%rbx) +; SOFT-FLOAT-64-FMA-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-FMA-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA-NEXT: popq %r12 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA-NEXT: popq %r13 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA-NEXT: popq %r14 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA-NEXT: popq %r15 +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA-NEXT: popq %rbp +; SOFT-FLOAT-64-FMA-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA-NEXT: retq +; +; SOFT-FLOAT-64-FMA4-LABEL: fmuladd_contract_v4f64: +; SOFT-FLOAT-64-FMA4: # %bb.0: +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r15 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r14 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r13 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %r12 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA4-NEXT: pushq %rax +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 64 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbx, -56 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r12, -48 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r13, -40 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r14, -32 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %r15, -24 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_offset %rbp, -16 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rcx, %r14 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rdx, %r15 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rsi, %r12 +; SOFT-FLOAT-64-FMA4-NEXT: movq %rdi, %rbx +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rbp +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: movq %r8, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r13 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq %rbp, %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r12, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __muldf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r12 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r15, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r15 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r14, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, %r14 +; SOFT-FLOAT-64-FMA4-NEXT: movq %r13, %rdi +; SOFT-FLOAT-64-FMA4-NEXT: movq {{[0-9]+}}(%rsp), %rsi +; SOFT-FLOAT-64-FMA4-NEXT: callq __adddf3@PLT +; SOFT-FLOAT-64-FMA4-NEXT: movq %rax, 24(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movq %r14, 16(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movq %r15, 8(%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movq %r12, (%rbx) +; SOFT-FLOAT-64-FMA4-NEXT: movq %rbx, %rax +; SOFT-FLOAT-64-FMA4-NEXT: addq $8, %rsp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 56 +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbx +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 48 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r12 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 40 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r13 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 32 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r14 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 24 +; SOFT-FLOAT-64-FMA4-NEXT: popq %r15 +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 16 +; SOFT-FLOAT-64-FMA4-NEXT: popq %rbp +; SOFT-FLOAT-64-FMA4-NEXT: .cfi_def_cfa_offset 8 +; SOFT-FLOAT-64-FMA4-NEXT: retq + %product = fmul contract <4 x double> %a, %b + %result = fadd contract <4 x double> %product, %c + ret <4 x double> %result +} + +attributes #0 = { "use-soft-float"="true" } + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll index 116ab7e3978cf7..31517939a4b75c 100644 --- a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll +++ b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll @@ -10,7 +10,7 @@ ; CHECK: .loc 1 5 3 // t.c:5:3 ; CHECK: { // callseq 0, 0 ; CHECK: .param .b64 param0; -; CHECK: st.param.b64 [param0+0], %rd1; +; CHECK: st.param.b64 [param0], %rd1; ; CHECK: call.uni ; CHECK: escape_foo, ; CHECK: ( diff --git a/llvm/test/Transforms/InstCombine/ilogb.ll b/llvm/test/Transforms/InstCombine/ilogb.ll new file mode 100644 index 00000000000000..e30791fe68e7b2 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/ilogb.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +define i32 @ilogbf_const1() { +; CHECK-LABEL: define i32 @ilogbf_const1() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 7.000000e+00) +; CHECK-NEXT: ret i32 2 +; + %r = call i32 @ilogbf(float 7.000000e+00) + ret i32 %r +} + +define i32 @ilogb_const1() { +; CHECK-LABEL: define i32 @ilogb_const1() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double -7.000000e+00) +; CHECK-NEXT: ret i32 2 +; + %r = call i32 @ilogb(double -7.000000e+00) + ret i32 %r +} + +define i32 @ilogbf_const2() { +; CHECK-LABEL: define i32 @ilogbf_const2() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 5.000000e-01) +; CHECK-NEXT: ret i32 -1 +; + %r = call i32 @ilogbf(float 5.000000e-01) + ret i32 %r +} + +define i32 @ilogb_const2() { +; CHECK-LABEL: define i32 @ilogb_const2() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double -5.000000e-01) +; CHECK-NEXT: ret i32 -1 +; + %r = call i32 @ilogb(double -5.000000e-01) + ret i32 %r +} + +define i32 @ilogbf_zero() { +; CHECK-LABEL: define i32 @ilogbf_zero() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0.000000e+00) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0.000000e+00) + ret i32 %r +} + +define i32 @ilogb_zero() { +; CHECK-LABEL: define i32 @ilogb_zero() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0.000000e+00) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0.000000e+00) + ret i32 %r +} + +define i32 @ilogbf_neg_zero() { +; CHECK-LABEL: define i32 @ilogbf_neg_zero() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float -0.000000e+00) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float -0.000000e+00) + ret i32 %r +} + +define i32 @ilogb_neg_zero() { +; CHECK-LABEL: define i32 @ilogb_neg_zero() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double -0.000000e+00) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double -0.000000e+00) + ret i32 %r +} + +define i32 @ilogbf_inf() { +; CHECK-LABEL: define i32 @ilogbf_inf() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0x7FF0000000000000) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0x7FF0000000000000) + ret i32 %r +} + +define i32 @ilogb_inf() { +; CHECK-LABEL: define i32 @ilogb_inf() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0x7FF0000000000000) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0x7FF0000000000000) + ret i32 %r +} + +define i32 @ilogbf_nan() { +; CHECK-LABEL: define i32 @ilogbf_nan() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0x7FF8000000000000) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0x7FF8000000000000) + ret i32 %r +} + +define i32 @ilogb_nan() { +; CHECK-LABEL: define i32 @ilogb_nan() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0x7FF8000000000000) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0x7FF8000000000000) + ret i32 %r +} + +define i32 @ilogbf_zero_readnone() { +; CHECK-LABEL: define i32 @ilogbf_zero_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0.000000e+00) #[[ATTR0:[0-9]+]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0.000000e+00) readnone + ret i32 %r +} + +define i32 @ilogb_zero_readnone() { +; CHECK-LABEL: define i32 @ilogb_zero_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0.000000e+00) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0.000000e+00) readnone + ret i32 %r +} + +define i32 @ilogbf_neg_zero_readnone() { +; CHECK-LABEL: define i32 @ilogbf_neg_zero_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float -0.000000e+00) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float -0.000000e+00) readnone + ret i32 %r +} + +define i32 @ilogb_neg_zero_readnone() { +; CHECK-LABEL: define i32 @ilogb_neg_zero_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double -0.000000e+00) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double -0.000000e+00) readnone + ret i32 %r +} + +define i32 @ilogbf_inf_readnone() { +; CHECK-LABEL: define i32 @ilogbf_inf_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0x7FF0000000000000) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0x7FF0000000000000) readnone + ret i32 %r +} + +define i32 @ilogb_inf_readnone() { +; CHECK-LABEL: define i32 @ilogb_inf_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0x7FF0000000000000) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0x7FF0000000000000) readnone + ret i32 %r +} + +define i32 @ilogbf_nan_readnone() { +; CHECK-LABEL: define i32 @ilogbf_nan_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float 0x7FF8000000000000) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float 0x7FF8000000000000) readnone + ret i32 %r +} + +define i32 @ilogb_nan_readnone() { +; CHECK-LABEL: define i32 @ilogb_nan_readnone() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double 0x7FF8000000000000) #[[ATTR0]] +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double 0x7FF8000000000000) readnone + ret i32 %r +} + +define i32 @ilogbf_poison() { +; CHECK-LABEL: define i32 @ilogbf_poison() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogbf(float poison) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogbf(float poison) + ret i32 %r +} + +define i32 @ilogb_poison() { +; CHECK-LABEL: define i32 @ilogb_poison() { +; CHECK-NEXT: [[R:%.*]] = call i32 @ilogb(double poison) +; CHECK-NEXT: ret i32 [[R]] +; + %r = call i32 @ilogb(double poison) + ret i32 %r +} + +declare i32 @ilogbf(float) +declare i32 @ilogb(double) diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll index 7d1d326641e124..ad0068dc3f6be7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll @@ -586,6 +586,184 @@ exit: ret void } +; Test case for https://github.com/llvm/llvm-project/issues/112922. +define void @interleave_store_double_i64(ptr %dst) { +; CHECK-LABEL: define void @interleave_store_double_i64( +; CHECK-SAME: ptr [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[VEC_IND]] to <2 x double> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> zeroinitializer, <2 x double> [[TMP2]], <4 x i32> +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> +; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1 +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.1 = getelementptr { double, i64 }, ptr %dst, i64 %iv, i32 1 + store i64 %iv, ptr %gep.1, align 8 + %gep.0 = getelementptr { double, i64 }, ptr %dst, i64 %iv + store double 0.000000e+00, ptr %gep.0, align 8 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +define void @interleave_store_i64_double(ptr %dst) { +; CHECK-LABEL: define void @interleave_store_i64_double( +; CHECK-SAME: ptr [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8 +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1 +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.0 = getelementptr { double, i64 }, ptr %dst, i64 %iv + store double 0.000000e+00, ptr %gep.0, align 8 + %gep.1 = getelementptr { double, i64 }, ptr %dst, i64 %iv, i32 1 + store i64 %iv, ptr %gep.1, align 8 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +; TODO: The interleave group should likely have the same cost as @interleave_store_double_i64. +define void @interleave_store_double_i64_2(ptr %dst) { +; CHECK-LABEL: define void @interleave_store_double_i64_2( +; CHECK-SAME: ptr [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1 +; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.1 = getelementptr { i64, double }, ptr %dst, i64 %iv, i32 1 + store double 0.000000e+00, ptr %gep.1, align 8 + %gep.0 = getelementptr { i64, double }, ptr %dst, i64 %iv + store i64 %iv, ptr %gep.0, align 8 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +define void @interleave_store_i64_double_2(ptr %dst) { +; CHECK-LABEL: define void @interleave_store_i64_double_2( +; CHECK-SAME: ptr [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[VEC_IND]] to <2 x double> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> zeroinitializer, <4 x i32> +; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> +; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8 +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1 +; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.0 = getelementptr { i64, double }, ptr %dst, i64 %iv + store i64 %iv, ptr %gep.0, align 8 + %gep.1 = getelementptr { i64, double }, ptr %dst, i64 %iv, i32 1 + store double 0.000000e+00, ptr %gep.1, align 8 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + + + attributes #0 = { "target-features"="+sse4.2" } attributes #1 = { "min-legal-vector-width"="0" "target-cpu"="cascadelake" } @@ -601,4 +779,8 @@ attributes #1 = { "min-legal-vector-width"="0" "target-cpu"="cascadelake" } ; CHECK: [[META8]] = distinct !{[[META8]], !"LVerDomain"} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/NaryReassociate/NVPTX/nary-slsr.ll b/llvm/test/Transforms/NaryReassociate/NVPTX/nary-slsr.ll index c9c1406a0fa8ad..face96f85975ac 100644 --- a/llvm/test/Transforms/NaryReassociate/NVPTX/nary-slsr.ll +++ b/llvm/test/Transforms/NaryReassociate/NVPTX/nary-slsr.ll @@ -22,7 +22,7 @@ define void @nary_reassociate_after_slsr(i32 %a, i32 %b, i32 %c) { %abc = add i32 %ab, %c call void @foo(i32 %abc) ; CHECK: call void @foo(i32 %abc) -; PTX: st.param.b32 [param0+0], [[abc:%r[0-9]+]]; +; PTX: st.param.b32 [param0], [[abc:%r[0-9]+]]; %b2 = shl i32 %b, 1 %ab2 = add i32 %a, %b2 @@ -31,7 +31,7 @@ define void @nary_reassociate_after_slsr(i32 %a, i32 %b, i32 %c) { ; PTX: add.s32 [[ab2c:%r[0-9]+]], [[abc]], [[b]] call void @foo(i32 %ab2c) ; CHECK-NEXT: call void @foo(i32 %ab2c) -; PTX: st.param.b32 [param0+0], [[ab2c]]; +; PTX: st.param.b32 [param0], [[ab2c]]; %b3 = mul i32 %b, 3 %ab3 = add i32 %a, %b3 @@ -40,7 +40,7 @@ define void @nary_reassociate_after_slsr(i32 %a, i32 %b, i32 %c) { ; PTX: add.s32 [[ab3c:%r[0-9]+]], [[ab2c]], [[b]] call void @foo(i32 %ab3c) ; CHECK-NEXT: call void @foo(i32 %ab3c) -; PTX: st.param.b32 [param0+0], [[ab3c]]; +; PTX: st.param.b32 [param0], [[ab3c]]; ret void } diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/nvptx-basic.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/nvptx-basic.ll.expected index 5c9af3bb44da2a..a64364019de15e 100644 --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/nvptx-basic.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/nvptx-basic.ll.expected @@ -23,10 +23,10 @@ define dso_local void @caller_St8x4(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ld.param.u64 %rd3, [caller_St8x4_param_0+8]; ; CHECK-NEXT: st.u64 [%SP+8], %rd3; ; CHECK-NEXT: ld.param.u64 %rd4, [caller_St8x4_param_0]; -; CHECK-NEXT: st.u64 [%SP+0], %rd4; +; CHECK-NEXT: st.u64 [%SP], %rd4; ; CHECK-NEXT: { // callseq 0, 0 ; CHECK-NEXT: .param .align 16 .b8 param0[32]; -; CHECK-NEXT: st.param.v2.b64 [param0+0], {%rd4, %rd3}; +; CHECK-NEXT: st.param.v2.b64 [param0], {%rd4, %rd3}; ; CHECK-NEXT: st.param.v2.b64 [param0+16], {%rd2, %rd1}; ; CHECK-NEXT: .param .align 16 .b8 retval0[32]; ; CHECK-NEXT: call.uni (retval0), @@ -34,7 +34,7 @@ define dso_local void @caller_St8x4(ptr nocapture noundef readonly byval(%struct ; CHECK-NEXT: ( ; CHECK-NEXT: param0 ; CHECK-NEXT: ); -; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [retval0+0]; +; CHECK-NEXT: ld.param.v2.b64 {%rd5, %rd6}, [retval0]; ; CHECK-NEXT: ld.param.v2.b64 {%rd7, %rd8}, [retval0+16]; ; CHECK-NEXT: } // callseq 0 ; CHECK-NEXT: st.u64 [%r1], %rd5; @@ -66,7 +66,7 @@ define internal fastcc [4 x i64] @callee_St8x4(ptr nocapture noundef readonly by ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.v2.u64 {%rd1, %rd2}, [callee_St8x4_param_0]; ; CHECK-NEXT: ld.param.v2.u64 {%rd3, %rd4}, [callee_St8x4_param_0+16]; -; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {%rd1, %rd2}; +; CHECK-NEXT: st.param.v2.b64 [func_retval0], {%rd1, %rd2}; ; CHECK-NEXT: st.param.v2.b64 [func_retval0+16], {%rd3, %rd4}; ; CHECK-NEXT: ret; %1 = load i64, ptr %in, align 8 diff --git a/llvm/test/tools/llvm-cov/branch-c-general.test b/llvm/test/tools/llvm-cov/branch-c-general.test index 9b5889babde366..2fa99dfe61532e 100644 --- a/llvm/test/tools/llvm-cov/branch-c-general.test +++ b/llvm/test/tools/llvm-cov/branch-c-general.test @@ -47,7 +47,7 @@ // CHECK: Branch (103:9): [True: 9, False: 1] // CHECK: switches() -// CHECK: Branch (113:3): [True: 1, False: 0] +// CHECK: Branch (113:3): [True: 1, Folded] // CHECK: Branch (117:63): [True: 15, False: 0] // CHECK: Branch (119:5): [True: 1, False: 14] // CHECK: Branch (120:11): [True: 0, False: 1] @@ -57,7 +57,7 @@ // CHECK: Branch (126:11): [True: 3, False: 0] // CHECK: Branch (128:5): [True: 4, False: 11] // CHECK: Branch (129:11): [True: 4, False: 0] -// CHECK: Branch (131:7): [True: 4, False: 0] +// CHECK: Branch (131:7): [True: 4, Folded] // CHECK: Branch (132:13): [True: 4, False: 0] // CHECK: Branch (136:5): [True: 5, False: 10] // CHECK: Branch (137:11): [True: 1, False: 4] @@ -114,13 +114,13 @@ -// REPORT: Name Regions Miss Cover Lines Miss Cover Branches Miss Cover +// REPORT: Name Regions Miss Cover Lines Miss Cover Branches Miss Cover // REPORT-NEXT: --- // REPORT-NEXT: simple_loops 8 0 100.00% 9 0 100.00% 6 0 100.00% // REPORT-NEXT: conditionals 24 0 100.00% 15 0 100.00% 16 2 87.50% // REPORT-NEXT: early_exits 20 4 80.00% 25 2 92.00% 16 6 62.50% // REPORT-NEXT: jumps 39 12 69.23% 48 2 95.83% 26 9 65.38% -// REPORT-NEXT: switches 28 5 82.14% 38 4 89.47% 30 9 70.00% +// REPORT-NEXT: switches 28 5 82.14% 38 4 89.47% 28 7 75.00% // REPORT-NEXT: big_switch 25 1 96.00% 32 0 100.00% 30 6 80.00% // REPORT-NEXT: boolean_operators 16 0 100.00% 13 0 100.00% 22 2 90.91% // REPORT-NEXT: boolop_loops 19 0 100.00% 14 0 100.00% 16 2 87.50% @@ -129,12 +129,12 @@ // REPORT-NEXT: main 1 0 100.00% 16 0 100.00% 0 0 0.00% // REPORT-NEXT: c-general.c:static_func 4 0 100.00% 4 0 100.00% 2 0 100.00% // REPORT-NEXT: --- -// REPORT-NEXT: TOTAL 197 24 87.82% 234 8 96.58% 174 38 78.16% +// REPORT-NEXT: TOTAL 197 24 87.82% 234 8 96.58% 172 36 79.07% // Test file-level report. // RUN: llvm-profdata merge %S/Inputs/branch-c-general.proftext -o %t.profdata // RUN: llvm-cov report %S/Inputs/branch-c-general.o32l -instr-profile %t.profdata -path-equivalence=/tmp,%S/Inputs %S/Inputs/branch-c-general.c | FileCheck %s -check-prefix=FILEREPORT -// FILEREPORT: TOTAL{{.*}}174 38 78.16% +// FILEREPORT: TOTAL{{.*}}172 36 79.07% // Test color True/False output. // RUN: llvm-cov show --use-color --show-branches=count %S/Inputs/branch-c-general.o32l -instr-profile %t.profdata -path-equivalence=/tmp,%S/Inputs %S/Inputs/branch-c-general.c | FileCheck %s -check-prefix=USECOLOR @@ -161,6 +161,6 @@ // HTML-INDEX: // HTML-INDEX: 87.82% (173/197) // HTML-INDEX: -// HTML-INDEX: 78.16% (136/174) +// HTML-INDEX: 79.07% (136/172) // HTML-INDEX: // HTML-INDEX: Totals diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp index 9a8c7c94f06124..4088c1b053aa8d 100644 --- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp +++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp @@ -125,7 +125,7 @@ json::Array renderRegions(ArrayRef Regions) { json::Array renderBranchRegions(ArrayRef Regions) { json::Array RegionArray; for (const auto &Region : Regions) - if (!Region.Folded) + if (!Region.TrueFolded || !Region.FalseFolded) RegionArray.push_back(renderBranch(Region)); return RegionArray; } diff --git a/llvm/tools/llvm-cov/CoverageExporterLcov.cpp b/llvm/tools/llvm-cov/CoverageExporterLcov.cpp index ae8f556edb313b..d6b9367ae4c514 100644 --- a/llvm/tools/llvm-cov/CoverageExporterLcov.cpp +++ b/llvm/tools/llvm-cov/CoverageExporterLcov.cpp @@ -139,7 +139,7 @@ void renderBranchExecutionCounts(raw_ostream &OS, unsigned BranchIndex = 0; while (NextBranch != EndBranch && CurrentLine == NextBranch->LineStart) { - if (!NextBranch->Folded) { + if (!NextBranch->TrueFolded || !NextBranch->FalseFolded) { unsigned BC1 = NextBranch->ExecutionCount; unsigned BC2 = NextBranch->FalseExecutionCount; bool BranchNotExecuted = (BC1 == 0 && BC2 == 0); diff --git a/llvm/tools/llvm-cov/CoverageSummaryInfo.cpp b/llvm/tools/llvm-cov/CoverageSummaryInfo.cpp index 4f150020ee3815..58e7918d392709 100644 --- a/llvm/tools/llvm-cov/CoverageSummaryInfo.cpp +++ b/llvm/tools/llvm-cov/CoverageSummaryInfo.cpp @@ -19,18 +19,18 @@ using namespace coverage; static void sumBranches(size_t &NumBranches, size_t &CoveredBranches, const ArrayRef &Branches) { for (const auto &BR : Branches) { - // Skip folded branches. - if (BR.Folded) - continue; - - // "True" Condition Branches. - ++NumBranches; - if (BR.ExecutionCount > 0) - ++CoveredBranches; - // "False" Condition Branches. - ++NumBranches; - if (BR.FalseExecutionCount > 0) - ++CoveredBranches; + if (!BR.TrueFolded) { + // "True" Condition Branches. + ++NumBranches; + if (BR.ExecutionCount > 0) + ++CoveredBranches; + } + if (!BR.FalseFolded) { + // "False" Condition Branches. + ++NumBranches; + if (BR.FalseExecutionCount > 0) + ++CoveredBranches; + } } } diff --git a/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp b/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp index 6f4d327679d6b6..7421763dd7a427 100644 --- a/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp +++ b/llvm/tools/llvm-cov/SourceCoverageViewHTML.cpp @@ -1128,36 +1128,45 @@ void SourceCoverageViewHTML::renderBranchView(raw_ostream &OS, BranchView &BRV, "line-number") + "): ["; - if (R.Folded) { + if (R.TrueFolded && R.FalseFolded) { OS << "Folded - Ignored]\n"; continue; } // Display TrueCount or TruePercent. - std::string TrueColor = R.ExecutionCount ? "None" : "red branch"; + std::string TrueColor = + (R.TrueFolded || R.ExecutionCount ? "None" : "red branch"); std::string TrueCovClass = - (R.ExecutionCount > 0) ? "covered-line" : "uncovered-line"; - - OS << tag("span", "True", TrueColor); - OS << ": "; - if (getOptions().ShowBranchCounts) - OS << tag("span", formatCount(R.ExecutionCount), TrueCovClass) << ", "; - else - OS << format("%0.2f", TruePercent) << "%, "; + (R.TrueFolded || R.ExecutionCount > 0 ? "covered-line" + : "uncovered-line"); + + if (R.TrueFolded) + OS << "Folded, "; + else { + OS << tag("span", "True", TrueColor) << ": "; + if (getOptions().ShowBranchCounts) + OS << tag("span", formatCount(R.ExecutionCount), TrueCovClass) << ", "; + else + OS << format("%0.2f", TruePercent) << "%, "; + } // Display FalseCount or FalsePercent. - std::string FalseColor = R.FalseExecutionCount ? "None" : "red branch"; + std::string FalseColor = + (R.FalseFolded || R.FalseExecutionCount ? "None" : "red branch"); std::string FalseCovClass = - (R.FalseExecutionCount > 0) ? "covered-line" : "uncovered-line"; - - OS << tag("span", "False", FalseColor); - OS << ": "; - if (getOptions().ShowBranchCounts) - OS << tag("span", formatCount(R.FalseExecutionCount), FalseCovClass); - else - OS << format("%0.2f", FalsePercent) << "%"; - - OS << "]\n"; + (R.FalseFolded || R.FalseExecutionCount > 0 ? "covered-line" + : "uncovered-line"); + + if (R.FalseFolded) + OS << "Folded]\n"; + else { + OS << tag("span", "False", FalseColor) << ": "; + if (getOptions().ShowBranchCounts) + OS << tag("span", formatCount(R.FalseExecutionCount), FalseCovClass) + << "]\n"; + else + OS << format("%0.2f", FalsePercent) << "%]\n"; + } } OS << EndPre; OS << EndExpansionDiv; diff --git a/llvm/tools/llvm-cov/SourceCoverageViewText.cpp b/llvm/tools/llvm-cov/SourceCoverageViewText.cpp index 8b93b592910b3d..444f33dac10837 100644 --- a/llvm/tools/llvm-cov/SourceCoverageViewText.cpp +++ b/llvm/tools/llvm-cov/SourceCoverageViewText.cpp @@ -309,31 +309,38 @@ void SourceCoverageViewText::renderBranchView(raw_ostream &OS, BranchView &BRV, renderLinePrefix(OS, ViewDepth); OS << " Branch (" << R.LineStart << ":" << R.ColumnStart << "): ["; - if (R.Folded) { + if (R.TrueFolded && R.FalseFolded) { OS << "Folded - Ignored]\n"; continue; } - colored_ostream(OS, raw_ostream::RED, - getOptions().Colors && !R.ExecutionCount, - /*Bold=*/false, /*BG=*/true) - << "True"; - - if (getOptions().ShowBranchCounts) - OS << ": " << formatCount(R.ExecutionCount) << ", "; - else - OS << ": " << format("%0.2f", TruePercent) << "%, "; - - colored_ostream(OS, raw_ostream::RED, - getOptions().Colors && !R.FalseExecutionCount, - /*Bold=*/false, /*BG=*/true) - << "False"; + if (R.TrueFolded) + OS << "Folded, "; + else { + colored_ostream(OS, raw_ostream::RED, + getOptions().Colors && !R.ExecutionCount, + /*Bold=*/false, /*BG=*/true) + << "True"; + + if (getOptions().ShowBranchCounts) + OS << ": " << formatCount(R.ExecutionCount) << ", "; + else + OS << ": " << format("%0.2f", TruePercent) << "%, "; + } - if (getOptions().ShowBranchCounts) - OS << ": " << formatCount(R.FalseExecutionCount); - else - OS << ": " << format("%0.2f", FalsePercent) << "%"; - OS << "]\n"; + if (R.FalseFolded) + OS << "Folded]\n"; + else { + colored_ostream(OS, raw_ostream::RED, + getOptions().Colors && !R.FalseExecutionCount, + /*Bold=*/false, /*BG=*/true) + << "False"; + + if (getOptions().ShowBranchCounts) + OS << ": " << formatCount(R.FalseExecutionCount) << "]\n"; + else + OS << ": " << format("%0.2f", FalsePercent) << "%]\n"; + } } } diff --git a/llvm/tools/llvm-diff/lib/DifferenceEngine.cpp b/llvm/tools/llvm-diff/lib/DifferenceEngine.cpp index 05cae4b67d7e52..9be0eec7b73f3e 100644 --- a/llvm/tools/llvm-diff/lib/DifferenceEngine.cpp +++ b/llvm/tools/llvm-diff/lib/DifferenceEngine.cpp @@ -189,11 +189,11 @@ class FunctionDifferenceEngine { // The returned reference is not permanently valid and should not be stored. BlockDiffCandidate &getOrCreateBlockDiffCandidate(const BasicBlock *LBB, const BasicBlock *RBB) { - auto It = BlockDiffCandidateIndices.find(LBB); + auto [It, Inserted] = + BlockDiffCandidateIndices.try_emplace(LBB, BlockDiffCandidates.size()); // Check if LBB already has a diff candidate - if (It == BlockDiffCandidateIndices.end()) { + if (Inserted) { // Add new one - BlockDiffCandidateIndices[LBB] = BlockDiffCandidates.size(); BlockDiffCandidates.push_back( {LBB, RBB, SmallDenseMap(), false}); return BlockDiffCandidates.back(); diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn index a30c291e156723..e39d8114d1f473 100644 --- a/llvm/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn +++ b/llvm/utils/gn/secondary/compiler-rt/lib/hwasan/BUILD.gn @@ -57,14 +57,27 @@ source_set("sources") { "hwasan_poisoning.h", "hwasan_report.cpp", "hwasan_report.h", - "hwasan_setjmp_aarch64.S", - "hwasan_setjmp_riscv64.S", "hwasan_thread.cpp", "hwasan_thread.h", "hwasan_thread_list.cpp", "hwasan_thread_list.h", "hwasan_type_test.cpp", ] + if (current_cpu == "arm64") { + sources += [ + "hwasan_setjmp_aarch64.S", + "hwasan_tag_mismatch_aarch64.S", + ] + } + if (current_cpu == "riscv64") { + sources += [ + "hwasan_setjmp_riscv64.S", + "hwasan_tag_mismatch_riscv64.S", + ] + } + if (current_cpu == "x64") { + sources += [ "hwasan_setjmp_x86_64.S" ] + } } source_set("cxx_sources") { diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn index e5628a1d860419..3b452939839b28 100644 --- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn +++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn @@ -1039,7 +1039,6 @@ if (current_toolchain == default_toolchain) { "limits", "list", "locale", - "locale.h", "map", "math.h", "mdspan", @@ -1070,7 +1069,6 @@ if (current_toolchain == default_toolchain) { "stdbool.h", "stddef.h", "stdexcept", - "stdint.h", "stdio.h", "stdlib.h", "stop_token", diff --git a/mlir/docs/Dialects/Linalg/_index.md b/mlir/docs/Dialects/Linalg/_index.md index fbd1a451dc094e..976f0fd3c7e911 100644 --- a/mlir/docs/Dialects/Linalg/_index.md +++ b/mlir/docs/Dialects/Linalg/_index.md @@ -667,7 +667,7 @@ directly. This facility is currently in flight and is intended to subsume the above when ready. See the C++ class to YAML mapping traits in -`mlir-mlinalg-ods-yaml-gen.cpp` as the source of truth for the schema. +`mlir-linalg-ods-yaml-gen.cpp` as the source of truth for the schema. Most of the above documentation roughly applies to this path and will be ported as migration continues. diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml index 8cb698096ef5b7..bf2f26de26e9ed 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgNamedStructuredOps.yaml @@ -3114,6 +3114,143 @@ structured_op: !LinalgStructuredOpConfig - !ScalarExpression scalar_arg: KZp --- !LinalgOpConfig +metadata: !LinalgOpMetadata + name: conv_2d_nchw_fchw_q + cpp_class_name: Conv2DNchwFchwQOp + doc: |- + Performs 2-D convolution with zero point offsets. + + Layout: + * Input: NCHW. + * Kernel: FCHW. + + Numeric casting is performed on the operands to the inner multiply, promoting + them to the same data type as the accumulator/output. This includes the zero + point offsets common to quantized operations. + implements: + - LinalgConvolutionOpInterface +structured_op: !LinalgStructuredOpConfig + args: + - !LinalgOperandDefConfig + name: I + kind: input_tensor + type_var: T1 + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s0, + s1, s2 * s3 + s4 * s5, s6 * s7 + s8 * s9)> + - !LinalgOperandDefConfig + name: K + kind: input_tensor + type_var: T2 + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s10, + s1, s4, s8)> + - !LinalgOperandDefConfig + name: IZp + kind: scalar + type_var: I32 + - !LinalgOperandDefConfig + name: KZp + kind: scalar + type_var: I32 + - !LinalgOperandDefConfig + name: O + kind: output_tensor + type_var: U + shape_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> (s0, + s10, s2, s6)> + - !LinalgOperandDefConfig + name: strides + kind: index_attr + index_attr_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> + (s3, s7)> + default_indices: + - 1 + - 1 + - !LinalgOperandDefConfig + name: dilations + kind: index_attr + index_attr_map: affine_map<()[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10] -> + (s5, s9)> + default_indices: + - 1 + - 1 + indexing_maps: !LinalgIndexingMapsConfig + static_indexing_maps: + - affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8, + s9, s10] -> (d0, d4, d2 * s3 + d5 * s5, d3 * s7 + d6 * s9)> + - affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8, + s9, s10] -> (d1, d4, d5, d6)> + - affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8, + s9, s10] -> ()> + - affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8, + s9, s10] -> ()> + - affine_map<(d0, d1, d2, d3, d4, d5, d6)[s0, s1, s2, s3, s4, s5, s6, s7, s8, + s9, s10] -> (d0, d1, d2, d3)> + iterator_types: + - parallel + - parallel + - parallel + - parallel + - reduction + - reduction + - reduction + assignments: + - !ScalarAssign + arg: O + value: !ScalarExpression + scalar_fn: + kind: binary + fn_name: add + operands: + - !ScalarExpression + scalar_arg: O + - !ScalarExpression + scalar_fn: + kind: binary + fn_name: mul + operands: + - !ScalarExpression + scalar_fn: + kind: binary + fn_name: sub + operands: + - !ScalarExpression + scalar_fn: + kind: type + fn_name: cast_signed + type_var: U + operands: + - !ScalarExpression + scalar_arg: I + - !ScalarExpression + scalar_fn: + kind: type + fn_name: cast_signed + type_var: U + operands: + - !ScalarExpression + scalar_arg: IZp + - !ScalarExpression + scalar_fn: + kind: binary + fn_name: sub + operands: + - !ScalarExpression + scalar_fn: + kind: type + fn_name: cast_signed + type_var: U + operands: + - !ScalarExpression + scalar_arg: K + - !ScalarExpression + scalar_fn: + kind: type + fn_name: cast_signed + type_var: U + operands: + - !ScalarExpression + scalar_arg: KZp +--- !LinalgOpConfig metadata: !LinalgOpMetadata name: conv_2d_nchw_fchw cpp_class_name: Conv2DNchwFchwOp diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 45313200d4f0b9..626539cb7bde42 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -948,7 +948,7 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> { objects (e.g. derived types or classes), indicates the bounds to be copied of the variable. When it's an array slice it is in rank order where rank 0 is the inner-most dimension. - - 'map_clauses': OpenMP map type for this map capture, for example: from, to and + - 'map_type': OpenMP map type for this map capture, for example: from, to and always. It's a bitfield composed of the OpenMP runtime flags stored in OpenMPOffloadMappingFlags. - 'map_capture_type': Capture type for the variable e.g. this, byref, byvalue, byvla diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td index 1ebea94fced0a3..14593305490661 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td @@ -54,7 +54,7 @@ def SPIRV_ControlBarrierOp : SPIRV_Op<"ControlBarrier", []> { #### Example: ```mlir - spirv.ControlBarrier "Workgroup", "Device", "Acquire|UniformMemory" + spirv.ControlBarrier , , ``` }]; diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td index 71ecabfb444bd0..022cbbbb6720fb 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td @@ -1,4 +1,4 @@ -//===-- SPIRVBarrierOps.td - MLIR SPIR-V Barrier Ops -------*- tablegen -*-===// +//===-- SPIRVMiscOps.td - MLIR SPIR-V Misc Ops -------------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp index 74c169c9a7e76a..f28473a108e1b5 100644 --- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp @@ -1024,6 +1024,71 @@ class ReturnValuePattern : public SPIRVToLLVMConversion { } }; +static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, + StringRef name, + ArrayRef paramTypes, + Type resultType) { + auto func = dyn_cast_or_null( + SymbolTable::lookupSymbolIn(symbolTable, name)); + if (func) + return func; + + OpBuilder b(symbolTable->getRegion(0)); + func = b.create( + symbolTable->getLoc(), name, + LLVM::LLVMFunctionType::get(resultType, paramTypes)); + func.setCConv(LLVM::cconv::CConv::SPIR_FUNC); + func.setConvergent(true); + func.setNoUnwind(true); + func.setWillReturn(true); + return func; +} + +static LLVM::CallOp createSPIRVBuiltinCall(Location loc, OpBuilder &builder, + LLVM::LLVMFuncOp func, + ValueRange args) { + auto call = builder.create(loc, func, args); + call.setCConv(func.getCConv()); + call.setConvergentAttr(func.getConvergentAttr()); + call.setNoUnwindAttr(func.getNoUnwindAttr()); + call.setWillReturnAttr(func.getWillReturnAttr()); + return call; +} + +class ControlBarrierPattern + : public SPIRVToLLVMConversion { +public: + using SPIRVToLLVMConversion::SPIRVToLLVMConversion; + + LogicalResult + matchAndRewrite(spirv::ControlBarrierOp controlBarrierOp, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + constexpr StringLiteral funcName = "_Z22__spirv_ControlBarrieriii"; + Operation *symbolTable = + controlBarrierOp->getParentWithTrait(); + + Type i32 = rewriter.getI32Type(); + + Type voidTy = rewriter.getType(); + LLVM::LLVMFuncOp func = + lookupOrCreateSPIRVFn(symbolTable, funcName, {i32, i32, i32}, voidTy); + + Location loc = controlBarrierOp->getLoc(); + Value execution = rewriter.create( + loc, i32, static_cast(adaptor.getExecutionScope())); + Value memory = rewriter.create( + loc, i32, static_cast(adaptor.getMemoryScope())); + Value semantics = rewriter.create( + loc, i32, static_cast(adaptor.getMemorySemantics())); + + auto call = createSPIRVBuiltinCall(loc, rewriter, func, + {execution, memory, semantics}); + + rewriter.replaceOp(controlBarrierOp, call); + return success(); + } +}; + /// Converts `spirv.mlir.loop` to LLVM dialect. All blocks within selection /// should be reachable for conversion to succeed. The structure of the loop in /// LLVM dialect will be the following: @@ -1648,7 +1713,10 @@ void mlir::populateSPIRVToLLVMConversionPatterns( ShiftPattern, // Return ops - ReturnPattern, ReturnValuePattern>(patterns.getContext(), typeConverter); + ReturnPattern, ReturnValuePattern, + + // Barrier ops + ControlBarrierPattern>(patterns.getContext(), typeConverter); patterns.add(clientAPI, patterns.getContext(), typeConverter); diff --git a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py index e4a6ec7487bb2f..b45fecd0ee1457 100644 --- a/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/ops/core_named_ops.py @@ -876,6 +876,35 @@ def conv_2d_nhwc_fhwc_q( ) * (TypeFn.cast_signed(U, K[D.f, D.kh, D.kw, D.c]) - TypeFn.cast_signed(U, KZp)) +@linalg_structured_op +def conv_2d_nchw_fchw_q( + I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW), + K=TensorDef(T2, S.F, S.C, S.KH, S.KW), + IZp=ScalarDef(I32), + KZp=ScalarDef(I32), + O=TensorDef(U, S.N, S.F, S.OH, S.OW, output=True), + strides=IndexAttrDef(S.SH, S.SW, default=[1, 1]), + dilations=IndexAttrDef(S.DH, S.DW, default=[1, 1]), +): + """Performs 2-D convolution with zero point offsets. + + Layout: + * Input: NCHW. + * Kernel: FCHW. + + Numeric casting is performed on the operands to the inner multiply, promoting + them to the same data type as the accumulator/output. This includes the zero + point offsets common to quantized operations. + """ + implements(ConvolutionOpInterface) + domain(D.n, D.f, D.oh, D.ow, D.c, D.kh, D.kw) + O[D.n, D.f, D.oh, D.ow] += ( + TypeFn.cast_signed( + U, I[D.n, D.c, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW] + ) + - TypeFn.cast_signed(U, IZp) + ) * (TypeFn.cast_signed(U, K[D.f, D.c, D.kh, D.kw]) - TypeFn.cast_signed(U, KZp)) + @linalg_structured_op def conv_2d_nchw_fchw( I=TensorDef(T1, S.N, S.C, S.OH * S.SH + S.KH * S.DH, S.OW * S.SW + S.KW * S.DW), diff --git a/mlir/test/Conversion/SPIRVToLLVM/barrier-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/barrier-ops-to-llvm.mlir new file mode 100644 index 00000000000000..d53afeeea15d10 --- /dev/null +++ b/mlir/test/Conversion/SPIRVToLLVM/barrier-ops-to-llvm.mlir @@ -0,0 +1,23 @@ +// RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s + +//===----------------------------------------------------------------------===// +// spirv.ControlBarrierOp +//===----------------------------------------------------------------------===// + +// CHECK: llvm.func spir_funccc @_Z22__spirv_ControlBarrieriii(i32, i32, i32) attributes {convergent, no_unwind, will_return} + +// CHECK-LABEL: @control_barrier +spirv.func @control_barrier() "None" { + // CHECK: [[EXECUTION:%.*]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: [[MEMORY:%.*]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: [[SEMANTICS:%.*]] = llvm.mlir.constant(768 : i32) : i32 + // CHECK: llvm.call spir_funccc @_Z22__spirv_ControlBarrieriii([[EXECUTION]], [[MEMORY]], [[SEMANTICS]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> () + spirv.ControlBarrier , , + + // CHECK: [[EXECUTION:%.*]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: [[MEMORY:%.*]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: [[SEMANTICS:%.*]] = llvm.mlir.constant(256 : i32) : i32 + // CHECK: llvm.call spir_funccc @_Z22__spirv_ControlBarrieriii([[EXECUTION]], [[MEMORY]], [[SEMANTICS]]) {convergent, no_unwind, will_return} : (i32, i32, i32) -> () + spirv.ControlBarrier , , + spirv.Return +} diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir index 146e9780b8ebbe..1b8969bd115595 100644 --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -664,3 +664,33 @@ func.func @winograd_output_dyn(%arg0: tensor<6x6x?x?x?x?xf32>, %arg1: tensor) outs(%arg1 : tensor) -> tensor + +// ----- + +func.func @conv2d_channel_first_q(%img: tensor<100x3x224x224xi32>, %filt: tensor<64x3x5x5xi32>, %a: i32, %b: i32) -> tensor<100x64x220x220xi32> { + %init = arith.constant dense<0> : tensor<100x64x220x220xi32> + %1 = linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, + strides = dense<1> : tensor<2xi64>} + ins(%img, %filt, %a, %b : tensor<100x3x224x224xi32>, tensor<64x3x5x5xi32>, i32, i32) + outs(%init : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32> + return %1 : tensor<100x64x220x220xi32> +} + +// CHECK-LABEL: func @conv2d_channel_first_q( +// CHECK: %[[arg0:[a-zA-z0-9]*]]: tensor<100x3x224x224xi32>, %[[arg1:[a-zA-z0-9]*]]: tensor<64x3x5x5xi32>, %[[arg2:[a-zA-z0-9]*]]: i32, %[[arg3:[a-zA-z0-9]*]]: i32) +// CHECK: linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]] : tensor<100x3x224x224xi32>, tensor<64x3x5x5xi32>, i32, i32) outs(%{{.*}} : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32> + +// ----- + +func.func @conv2d_channel_first_q_promote(%img: tensor<100x3x224x224xi8>, %filt: tensor<64x3x5x5xi8>, %a: i8, %b: i8) -> tensor<100x64x220x220xi32> { + %init = arith.constant dense<0> : tensor<100x64x220x220xi32> + %1 = linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, + strides = dense<1> : tensor<2xi64>} + ins(%img, %filt, %a, %b : tensor<100x3x224x224xi8>, tensor<64x3x5x5xi8>, i8, i8) + outs(%init : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32> + return %1 : tensor<100x64x220x220xi32> +} + +// CHECK-LABEL: func @conv2d_channel_first_q_promote( +// CHECK: %[[arg0:[a-zA-z0-9]*]]: tensor<100x3x224x224xi8>, %[[arg1:[a-zA-z0-9]*]]: tensor<64x3x5x5xi8>, %[[arg2:[a-zA-z0-9]*]]: i8, %[[arg3:[a-zA-z0-9]*]]: i8) +// CHECK: linalg.conv_2d_nchw_fchw_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]] : tensor<100x3x224x224xi8>, tensor<64x3x5x5xi8>, i8, i8) outs(%{{.*}} : tensor<100x64x220x220xi32>) -> tensor<100x64x220x220xi32> diff --git a/mlir/tools/mlir-tblgen/OpDocGen.cpp b/mlir/tools/mlir-tblgen/OpDocGen.cpp index ff3c6b16bb6ebc..d499c78a5cf44d 100644 --- a/mlir/tools/mlir-tblgen/OpDocGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDocGen.cpp @@ -70,7 +70,10 @@ void mlir::tblgen::emitSummary(StringRef summary, raw_ostream &os) { // nested in the op definition. void mlir::tblgen::emitDescription(StringRef description, raw_ostream &os) { raw_indented_ostream ros(os); - ros.printReindented(description.rtrim(" \t")); + StringRef trimmed = description.rtrim(" \t"); + ros.printReindented(trimmed); + if (!trimmed.ends_with("\n")) + ros << "\n"; } void mlir::tblgen::emitDescriptionComment(StringRef description,