Skip to content

[AArch64] Skip storing of stack arguments when lowering tail calls #126735

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -617,6 +617,10 @@ LLVM_ABI bool isGuaranteedNotToBeUndef(Register Reg,
/// estimate of the type.
LLVM_ABI Type *getTypeForLLT(LLT Ty, LLVMContext &C);

/// Returns true if the instruction \p MI is one of the assert
/// instructions.
LLVM_ABI bool isAssertMI(const MachineInstr &MI);

/// An integer-like constant.
///
/// It abstracts over scalar, fixed-length vectors, and scalable vectors.
Expand Down
13 changes: 13 additions & 0 deletions llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -724,6 +724,19 @@ END_TWO_BYTE_PACK()
}
}

/// Test if this node is an assert operation.
bool isAssert() const {
switch (NodeType) {
default:
return false;
case ISD::AssertAlign:
case ISD::AssertNoFPClass:
case ISD::AssertSext:
case ISD::AssertZext:
return true;
}
}

/// Test if this node is a vector predication operation.
bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }

Expand Down
11 changes: 11 additions & 0 deletions llvm/lib/CodeGen/GlobalISel/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2003,6 +2003,17 @@ Type *llvm::getTypeForLLT(LLT Ty, LLVMContext &C) {
return IntegerType::get(C, Ty.getSizeInBits());
}

bool llvm::isAssertMI(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
case TargetOpcode::G_ASSERT_ALIGN:
case TargetOpcode::G_ASSERT_SEXT:
case TargetOpcode::G_ASSERT_ZEXT:
return true;
}
}

APInt llvm::GIConstant::getScalarValue() const {
assert(Kind == GIConstantKind::Scalar && "Expected scalar constant");

Expand Down
46 changes: 44 additions & 2 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8761,6 +8761,45 @@ getSMToggleCondition(const SMECallAttrs &CallAttrs) {
llvm_unreachable("Unsupported attributes");
}

/// Check whether a stack argument requires lowering in a tail call.
static bool shouldLowerTailCallStackArg(const MachineFunction &MF,
const CCValAssign &VA, SDValue Arg,
ISD::ArgFlagsTy Flags, int CallOffset) {
// FIXME: We should be able to handle this case, but it's not clear how to.
if (Flags.isZExt() || Flags.isSExt())
return true;

for (;;) {
// Look through nodes that don't alter the bits of the incoming value.
unsigned Op = Arg.getOpcode();
if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST ||
Arg->isAssert() || Op == AArch64ISD::ASSERT_ZEXT_BOOL) {
Arg = Arg.getOperand(0);
continue;
}
break;
}

// If the argument is a load from the same immutable stack slot, we can reuse
// it.
if (auto *LoadNode = dyn_cast<LoadSDNode>(Arg)) {
if (auto *FINode = dyn_cast<FrameIndexSDNode>(LoadNode->getBasePtr())) {
const MachineFrameInfo &MFI = MF.getFrameInfo();
int FI = FINode->getIndex();
if (!MFI.isImmutableObjectIndex(FI))
return true;
if (CallOffset != MFI.getObjectOffset(FI))
return true;
uint64_t SizeInBits = LoadNode->getMemoryVT().getFixedSizeInBits();
if (SizeInBits / 8 != MFI.getObjectSize(FI))
return true;
return false;
}
}

return true;
}

/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
/// and add input and output parameter nodes.
SDValue
Expand Down Expand Up @@ -9183,10 +9222,13 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
}
unsigned LocMemOffset = VA.getLocMemOffset();
int32_t Offset = LocMemOffset + BEAlign;
SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL);
PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);

if (IsTailCall) {
// When the frame pointer is perfectly aligned for the tail call and the
// same stack argument is passed down intact, we can reuse it.
if (!FPDiff && !shouldLowerTailCallStackArg(MF, VA, Arg, Flags, Offset))
continue;

Offset = Offset + FPDiff;
int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);

Expand Down
49 changes: 49 additions & 0 deletions llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/LowLevelTypeUtils.h"
Expand All @@ -35,6 +36,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
Expand Down Expand Up @@ -296,10 +298,57 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
MIRBuilder.buildCopy(PhysReg, ExtReg);
}

/// Check whether a stack argument requires lowering in a tail call.
static bool shouldLowerTailCallStackArg(const MachineFunction &MF,
const CCValAssign &VA,
Register ValVReg,
Register StoreAddr) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Print the defining instruction for the value.
auto *DefMI = MRI.getVRegDef(ValVReg);
assert(DefMI && "No defining instruction");
for (;;) {
// Look through nodes that don't alter the bits of the incoming value.
unsigned Op = DefMI->getOpcode();
if (Op == TargetOpcode::G_ZEXT || Op == TargetOpcode::G_ANYEXT ||
Op == TargetOpcode::G_BITCAST || isAssertMI(*DefMI)) {
DefMI = MRI.getVRegDef(DefMI->getOperand(1).getReg());
continue;
}
break;
}

auto *Load = dyn_cast<GLoad>(DefMI);
if (!Load)
return true;
Register LoadReg = Load->getPointerReg();
auto *LoadAddrDef = MRI.getVRegDef(LoadReg);
if (LoadAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
return true;
const MachineFrameInfo &MFI = MF.getFrameInfo();
int LoadFI = LoadAddrDef->getOperand(1).getIndex();

auto *StoreAddrDef = MRI.getVRegDef(StoreAddr);
if (StoreAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
return true;
int StoreFI = StoreAddrDef->getOperand(1).getIndex();

if (!MFI.isImmutableObjectIndex(LoadFI))
return true;
if (MFI.getObjectOffset(LoadFI) != MFI.getObjectOffset(StoreFI))
return true;
if (Load->getMemSize() != MFI.getObjectSize(StoreFI))
return true;

return false;
}

void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
const MachinePointerInfo &MPO,
const CCValAssign &VA) override {
MachineFunction &MF = MIRBuilder.getMF();
if (!FPDiff && !shouldLowerTailCallStackArg(MF, VA, ValVReg, Addr))
return;
auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
inferAlignFromPtrInfo(MF, MPO));
MIRBuilder.buildStore(ValVReg, Addr, *MMO);
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/darwinpcs-tail.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@

; CHECK-LABEL: __ZThn16_N1C3addEPKcz:
; CHECK: b __ZN1C3addEPKcz

; CHECK-LABEL: _tailTest:
; CHECK: b __ZN1C3addEPKcz

; CHECK-LABEL: __ZThn8_N1C1fEiiiiiiiiiz:
; CHECK: ldr w9, [sp, #4]
; CHECK: str w9, [sp, #4]
; CHECK: b __ZN1C1fEiiiiiiiiiz

%class.C = type { %class.A.base, [4 x i8], %class.B.base, [4 x i8] }
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/scavenge-large-call.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
; CHECK: add {{x[0-9]+}}, sp,

define void @caller(ptr %0, i16 %1, i16 %2, i8 %3, double %4, i16 %5, i8 %6, ptr %7, double %8, i32 %9, ptr %10, double %11, double %12, [2 x i64] %13, [2 x i64] %14, [2 x i64] %15, double %16, double %17, [2 x i64] %18, [2 x i64] %19, i16 %20, i32 %21, double %22, i8 %23, [2 x i64] %24, [2 x i64] %25, [2 x i64] %26, i8 %27, i16 %28, i16 %29, i16 %30, i32 %31, [2 x i64] %32, [2 x i64] %33, [2 x i64] %34, [2 x i64] %35, [2 x i64] %36, i32 %37, i32 %38) {
tail call void @callee(ptr %0, i16 %1, i16 %2, i8 %3, double 0.000000e+00, i16 %5, i8 %6, ptr %7, double 0.000000e+00, i32 %9, ptr %10, double 0.000000e+00, double 0.000000e+00, [2 x i64] %13, [2 x i64] %14, [2 x i64] %15, double 0.000000e+00, double 0.000000e+00, [2 x i64] %18, [2 x i64] %19, i16 %20, i32 %21, double 0.000000e+00, i8 %23, [2 x i64] %24, [2 x i64] %25, [2 x i64] zeroinitializer, i8 %27, i16 0, i16 0, i16 %28, i32 0, [2 x i64] zeroinitializer, [2 x i64] zeroinitializer, [2 x i64] zeroinitializer, [2 x i64] %35, [2 x i64] %36, i32 0, i32 0)
call void @callee(ptr %0, i16 %1, i16 %2, i8 %3, double 0.000000e+00, i16 %5, i8 %6, ptr %7, double 0.000000e+00, i32 %9, ptr %10, double 0.000000e+00, double 0.000000e+00, [2 x i64] %13, [2 x i64] %14, [2 x i64] %15, double 0.000000e+00, double 0.000000e+00, [2 x i64] %18, [2 x i64] %19, i16 %20, i32 %21, double 0.000000e+00, i8 %23, [2 x i64] %24, [2 x i64] %25, [2 x i64] zeroinitializer, i8 %27, i16 0, i16 0, i16 %28, i32 0, [2 x i64] zeroinitializer, [2 x i64] zeroinitializer, [2 x i64] zeroinitializer, [2 x i64] %35, [2 x i64] %36, i32 0, i32 0)
ret void
}

Expand Down
102 changes: 50 additions & 52 deletions llvm/test/CodeGen/AArch64/sve-fixed-length-frame-offests-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -11,66 +11,64 @@ target triple = "aarch64-unknown-linux-gnu"
define dso_local void @func1(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6, ptr %v7, ptr %v8,
; CHECK-LABEL: func1:
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-48]! // 8-byte Folded Spill
; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w22, -32
; CHECK-NEXT: .cfi_offset w29, -48
; CHECK-NEXT: add x10, sp, #176
; CHECK-NEXT: add x8, sp, #48
; CHECK-NEXT: add x9, sp, #144
; CHECK-NEXT: ldr z3, [x10]
; CHECK-NEXT: sub sp, sp, #368
; CHECK-NEXT: stp x29, x30, [sp, #336] // 16-byte Folded Spill
; CHECK-NEXT: str x28, [sp, #352] // 8-byte Folded Spill
; CHECK-NEXT: add x29, sp, #336
; CHECK-NEXT: .cfi_def_cfa w29, 32
; CHECK-NEXT: .cfi_offset w28, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: add x8, x29, #32
; CHECK-NEXT: add x9, x29, #72
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ldr z0, [x8]
; CHECK-NEXT: add x8, sp, #112
; CHECK-NEXT: ldr z2, [x9]
; CHECK-NEXT: add x8, x29, #256
; CHECK-NEXT: ldr z3, [x9]
; CHECK-NEXT: ldr z1, [x8]
; CHECK-NEXT: add x20, sp, #176
; CHECK-NEXT: ldp x9, x8, [sp, #328]
; CHECK-NEXT: ldr x15, [sp, #104]
; CHECK-NEXT: ldp x11, x10, [sp, #312]
; CHECK-NEXT: ldur q4, [sp, #88]
; CHECK-NEXT: ldp x13, x12, [sp, #296]
; CHECK-NEXT: ldr x19, [sp, #272]
; CHECK-NEXT: ldp x18, x14, [sp, #280]
; CHECK-NEXT: ldp x16, x17, [sp, #208]
; CHECK-NEXT: ldp x21, x22, [sp, #352]
; CHECK-NEXT: str z3, [x20]
; CHECK-NEXT: add x20, sp, #144
; CHECK-NEXT: str z2, [x20]
; CHECK-NEXT: add x20, sp, #112
; CHECK-NEXT: str z1, [x20]
; CHECK-NEXT: add x20, sp, #48
; CHECK-NEXT: str z0, [x20]
; CHECK-NEXT: stp x21, x22, [sp, #352]
; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: stp x19, x18, [sp, #272]
; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; CHECK-NEXT: stp x16, x17, [sp, #208]
; CHECK-NEXT: stur q4, [sp, #88]
; CHECK-NEXT: str x15, [sp, #104]
; CHECK-NEXT: stp x14, x13, [sp, #288]
; CHECK-NEXT: stp x12, x11, [sp, #304]
; CHECK-NEXT: stp x10, x9, [sp, #320]
; CHECK-NEXT: str x8, [sp, #336]
; CHECK-NEXT: ldr x29, [sp], #48 // 8-byte Folded Reload
; CHECK-NEXT: b func2
; CHECK-NEXT: add x8, x29, #288
; CHECK-NEXT: add x9, x29, #168
; CHECK-NEXT: ldr z2, [x8]
; CHECK-NEXT: add x8, x29, #104
; CHECK-NEXT: ldr z6, [x9]
; CHECK-NEXT: ldr z4, [x8]
; CHECK-NEXT: add x8, x29, #136
; CHECK-NEXT: mov x12, #17 // =0x11
; CHECK-NEXT: ldr z5, [x8]
; CHECK-NEXT: ldp x10, x11, [x29, #336]
; CHECK-NEXT: st1d { z6.d }, p0, [sp, x12, lsl #3]
; CHECK-NEXT: mov x12, #13 // =0xd
; CHECK-NEXT: ldr x8, [x29, #200]
; CHECK-NEXT: ldr x9, [x29, #320]
; CHECK-NEXT: st1d { z5.d }, p0, [sp, x12, lsl #3]
; CHECK-NEXT: mov x12, #9 // =0x9
; CHECK-NEXT: st1d { z4.d }, p0, [sp, x12, lsl #3]
; CHECK-NEXT: mov x12, #5 // =0x5
; CHECK-NEXT: st1d { z3.d }, p0, [sp, x12, lsl #3]
; CHECK-NEXT: stp x10, x11, [sp, #304]
; CHECK-NEXT: str x9, [sp, #288]
; CHECK-NEXT: str z2, [sp, #8, mul vl]
; CHECK-NEXT: str z1, [sp, #7, mul vl]
; CHECK-NEXT: str x8, [sp, #168]
; CHECK-NEXT: str z0, [sp]
; CHECK-NEXT: bl func2
; CHECK-NEXT: ldp x29, x30, [sp, #336] // 16-byte Folded Reload
; CHECK-NEXT: ldr x28, [sp, #352] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #368
; CHECK-NEXT: ret
ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr %v13, ptr %v14, ptr %v15, ptr %v16,
ptr %v17, ptr %v18, ptr %v19, ptr %v20, ptr %v21, ptr %v22, ptr %v23, ptr %v24,
ptr %v25, ptr %v26, ptr %v27, ptr %v28, ptr %v29, ptr %v30, ptr %v31, ptr %v32,
ptr %v33, ptr %v34, ptr %v35, ptr %v36, ptr %v37, ptr %v38, ptr %v39, ptr %v40,
ptr %v41, ptr %v42, ptr %v43, ptr %v44, ptr %v45, ptr %v46, ptr %v47, ptr %v48,
i64 %v49) #0 {
tail call void @func2(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6, ptr %v7, ptr %v8,
ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr undef, ptr %v14, ptr %v15, ptr %v16,
ptr %v17, ptr %v18, ptr %v19, ptr %v20, ptr %v21, ptr %v22, ptr %v23, ptr %v24,
ptr %v25, ptr %v26, ptr %v27, ptr %v28, ptr %v29, ptr %v30, ptr undef, ptr undef,
ptr undef, ptr undef, ptr undef, ptr undef, ptr %v37, ptr %v38, ptr %v39, ptr %v40,
ptr %v41, ptr %v42, ptr %v43, ptr %v44, ptr %v45, ptr undef, ptr %v47, ptr %v48,
i64 undef)
call void @func2(ptr %v1, ptr %v2, ptr %v3, ptr %v4, ptr %v5, ptr %v6, ptr %v7, ptr %v8,
ptr %v9, ptr %v10, ptr %v11, ptr %v12, ptr undef, ptr %v14, ptr %v15, ptr %v16,
ptr %v17, ptr %v18, ptr %v19, ptr %v20, ptr %v21, ptr %v22, ptr %v23, ptr %v24,
ptr %v25, ptr %v26, ptr %v27, ptr %v28, ptr %v29, ptr %v30, ptr undef, ptr undef,
ptr undef, ptr undef, ptr undef, ptr undef, ptr %v37, ptr %v38, ptr %v39, ptr %v40,
ptr %v41, ptr %v42, ptr %v43, ptr %v44, ptr %v45, ptr undef, ptr %v47, ptr %v48,
i64 undef)
ret void
}

Expand Down
67 changes: 67 additions & 0 deletions llvm/test/CodeGen/AArch64/tail-call-stack-args.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc %s -mtriple=aarch64 -o - | FileCheck %s --check-prefixes=CHECK,SDAG
; RUN: llc %s -mtriple=aarch64 -global-isel -o - | FileCheck %s --check-prefixes=CHECK,GI

; Tail calls which have stack arguments in the same offsets as the caller do not
; need to load and store the arguments from the stack.

declare void @func(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j)

define void @wrapper_func(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j) {
; CHECK-LABEL: wrapper_func:
; CHECK: // %bb.0:
; CHECK-NEXT: b func

tail call void @func(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j)
ret void
}

define void @wrapper_func_zero_arg(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j) {
; CHECK-LABEL: wrapper_func_zero_arg:
; CHECK: // %bb.0:
; CHECK-NEXT: str wzr, [sp, #8]
; CHECK-NEXT: b func
tail call void @func(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 0)
ret void
}

define void @wrapper_func_overriden_arg(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j) {
; CHECK-LABEL: wrapper_func_overriden_arg:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w8, [sp]
; CHECK-NEXT: str wzr, [sp]
; CHECK-NEXT: str w8, [sp, #8]
; CHECK-NEXT: b func
tail call void @func(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 0, i32 %i)
ret void
}

declare void @func_i1(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 %j)

; FIXME: Support i1 passthrough stack arguments in GlobalISel.
define void @wrapper_func_i1(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 %j) {
; SDAG-LABEL: wrapper_func_i1:
; SDAG: // %bb.0:
; SDAG-NEXT: b func_i1
;
; GI-LABEL: wrapper_func_i1:
; GI: // %bb.0:
; GI-NEXT: ldrb w8, [sp, #8]
; GI-NEXT: strb w8, [sp, #8]
; GI-NEXT: b func_i1
tail call void @func_i1(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 %j)
ret void
}

declare void @func_signext_i1(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 signext %j)

; FIXME: Support zero/sign-extended stack arguments.
define void @wrapper_func_i8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 signext %j) {
; CHECK-LABEL: wrapper_func_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrsb w8, [sp, #8]
; CHECK-NEXT: strb w8, [sp, #8]
; CHECK-NEXT: b func_signext_i1
tail call void @func_signext_i1(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 signext %j)
ret void
}
Loading