-
Notifications
You must be signed in to change notification settings - Fork 14k
[SDAG] Add missing SoftenFloatRes legalization for FMODF #129264
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
This is needed on some ARM platforms.
@llvm/pr-subscribers-backend-arm Author: Benjamin Maxwell (MacDue) ChangesThis is needed on some ARM platforms. Patch is 20.92 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/129264.diff 3 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 9fbcb5bc31537..c2107a73301bc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -132,6 +132,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::STRICT_FLDEXP: R = SoftenFloatRes_ExpOp(N); break;
case ISD::FFREXP: R = SoftenFloatRes_FFREXP(N); break;
case ISD::FSINCOS: R = SoftenFloatRes_FSINCOS(N); break;
+ case ISD::FMODF: R = SoftenFloatRes_FMODF(N); break;
case ISD::STRICT_FREM:
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
case ISD::STRICT_FRINT:
@@ -791,27 +792,35 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FFREXP(SDNode *N) {
return ReturnVal;
}
-SDValue
-DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N,
- RTLIB::Libcall LC) {
+SDValue DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(
+ SDNode *N, RTLIB::Libcall LC, std::optional<unsigned> CallRetResNo) {
assert(!N->isStrictFPOpcode() && "strictfp not implemented");
EVT VT = N->getValueType(0);
+ assert(VT == N->getValueType(1) &&
+ "expected both return values to have the same type");
+
if (!TLI.getLibcallName(LC))
return SDValue();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- SDValue FirstResultSlot = DAG.CreateStackTemporary(NVT);
- SDValue SecondResultSlot = DAG.CreateStackTemporary(NVT);
SDLoc DL(N);
- TargetLowering::MakeLibCallOptions CallOptions;
- std::array Ops{GetSoftenedFloat(N->getOperand(0)), FirstResultSlot,
- SecondResultSlot};
- std::array OpsVT{VT, FirstResultSlot.getValueType(),
- SecondResultSlot.getValueType()};
+ SmallVector<SDValue, 3> Ops = {GetSoftenedFloat(N->getOperand(0))};
+ SmallVector<EVT, 3> OpsVT = {VT};
+
+ std::array<SDValue, 2> StackSlots;
+ for (unsigned ResNum = 0; ResNum < N->getNumValues(); ++ResNum) {
+ if (ResNum == CallRetResNo)
+ continue;
+ SDValue StackSlot = DAG.CreateStackTemporary(NVT);
+ Ops.push_back(StackSlot);
+ OpsVT.push_back(StackSlot.getValueType());
+ StackSlots[ResNum] = StackSlot;
+ }
+ TargetLowering::MakeLibCallOptions CallOptions;
// TODO: setTypeListBeforeSoften can't properly express multiple return types,
// but since both returns have the same type it should be okay.
CallOptions.setTypeListBeforeSoften({OpsVT}, VT, true);
@@ -825,8 +834,14 @@ DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
return DAG.getLoad(NVT, DL, Chain, StackSlot, PtrInfo);
};
- SetSoftenedFloat(SDValue(N, 0), CreateStackLoad(FirstResultSlot));
- SetSoftenedFloat(SDValue(N, 1), CreateStackLoad(SecondResultSlot));
+
+ for (auto [ResNum, SlackSlot] : enumerate(StackSlots)) {
+ if (CallRetResNo == ResNum) {
+ SetSoftenedFloat(SDValue(N, ResNum), ReturnVal);
+ continue;
+ }
+ SetSoftenedFloat(SDValue(N, ResNum), CreateStackLoad(SlackSlot));
+ }
return SDValue();
}
@@ -836,6 +851,11 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSINCOS(SDNode *N) {
N, RTLIB::getSINCOS(N->getValueType(0)));
}
+SDValue DAGTypeLegalizer::SoftenFloatRes_FMODF(SDNode *N) {
+ return SoftenFloatRes_UnaryWithTwoFPResults(
+ N, RTLIB::getMODF(N->getValueType(0)), /*CallRetResNo=*/0);
+}
+
SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
return SoftenFloatRes_Binary(N, GetFPLibCall(N->getValueType(0),
RTLIB::REM_F32,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 74d7210743372..50247cebb91b1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -562,7 +562,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
// Convert Float Results to Integer.
void SoftenFloatResult(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_Unary(SDNode *N, RTLIB::Libcall LC);
- SDValue SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N, RTLIB::Libcall LC);
+ SDValue SoftenFloatRes_UnaryWithTwoFPResults(
+ SDNode *N, RTLIB::Libcall LC, std::optional<unsigned> CallRetResNo = {});
SDValue SoftenFloatRes_Binary(SDNode *N, RTLIB::Libcall LC);
SDValue SoftenFloatRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_ARITH_FENCE(SDNode *N);
@@ -608,6 +609,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue SoftenFloatRes_ExpOp(SDNode *N);
SDValue SoftenFloatRes_FFREXP(SDNode *N);
SDValue SoftenFloatRes_FSINCOS(SDNode *N);
+ SDValue SoftenFloatRes_FMODF(SDNode *N);
SDValue SoftenFloatRes_FREEZE(SDNode *N);
SDValue SoftenFloatRes_FREM(SDNode *N);
SDValue SoftenFloatRes_FRINT(SDNode *N);
diff --git a/llvm/test/CodeGen/ARM/llvm.modf.ll b/llvm/test/CodeGen/ARM/llvm.modf.ll
new file mode 100644
index 0000000000000..66f6c9b9383a7
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/llvm.modf.ll
@@ -0,0 +1,504 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=thumbv7-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -mtriple=armv6m < %s | FileCheck %s --check-prefix=THUMB
+
+define { half, half } @test_modf_f16(half %a) {
+; CHECK-LABEL: test_modf_f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: mov r1, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_f16:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r1, r0
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_modf_f16_only_use_fractional_part(half %a) {
+; CHECK-LABEL: test_modf_f16_only_use_fractional_part:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f16_only_use_fractional_part:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_modf_f16_only_use_integral_part(half %a) {
+; CHECK-LABEL: test_modf_f16_only_use_integral_part:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f16_only_use_integral_part:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) {
+; CHECK-LABEL: test_modf_v2f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #14]
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #12]
+; CHECK-NEXT: add r0, sp, #12
+; CHECK-NEXT: vld1.32 {d8[0]}, [r0:32]
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: ldr r1, [sp]
+; CHECK-NEXT: strh.w r0, [sp, #10]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #8]
+; CHECK-NEXT: add r0, sp, #8
+; CHECK-NEXT: vmovl.u16 q9, d8
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vmov.32 r0, d18[0]
+; CHECK-NEXT: vmov.32 r1, d18[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: vmov.32 r3, d16[1]
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f16:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r5, r1
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: uxth r0, r5
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r5, r0
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r3, r0
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: mov r1, r5
+; THUMB-NEXT: mov r2, r6
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, r5, r6, pc}
+ %result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_modf_f32(float %a) {
+; CHECK-LABEL: test_modf_f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: ldr r1, [sp, #4]
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { float, float } @llvm.modf.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) {
+; CHECK-LABEL: test_modf_v3f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: vldr d9, [sp, #40]
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: vmov d8, r2, r3
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vldmia sp, {s0, s1}
+; CHECK-NEXT: add.w r1, r4, #16
+; CHECK-NEXT: vst1.32 {d0}, [r1:64]!
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: vmov s1, r5
+; CHECK-NEXT: vmov s0, r6
+; CHECK-NEXT: vst1.32 {d0}, [r4:64]!
+; CHECK-NEXT: str r0, [r4]
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB-LABEL: test_modf_v3f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB-NEXT: sub sp, #12
+; THUMB-NEXT: mov r7, r3
+; THUMB-NEXT: mov r5, r2
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #32]
+; THUMB-NEXT: add r1, sp, #8
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: ldr r0, [sp, #8]
+; THUMB-NEXT: str r0, [r4, #24]
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: mov r0, r7
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r7, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: str r0, [r4, #20]
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: mov r0, r5
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r1, [sp]
+; THUMB-NEXT: str r1, [r4, #16]
+; THUMB-NEXT: stm r4!, {r0, r7}
+; THUMB-NEXT: str r6, [r4]
+; THUMB-NEXT: add sp, #12
+; THUMB-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) {
+; CHECK-LABEL: test_modf_v2f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: vldr s1, [sp]
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: vldr s0, [sp, #4]
+; CHECK-NEXT: vmov r2, r3, d0
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r4, r1
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r5, r0
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r1, r0
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: mov r0, r5
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, r5, r7, pc}
+ %result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
+; CHECK-LABEL: test_modf_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov r3, r2
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r3
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r12, r3, [sp, #40]
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: vmov d9, r0, r1
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vldr d16, [sp, #8]
+; CHECK-NEXT: vst1.64 {d8, d9}, [r4]!
+; CHECK-NEXT: vst1.64 {d16, d17}, [r4]
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f64:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB-NEXT: sub sp, #28
+; THUMB-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; THUMB-NEXT: mov r7, r2
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #48]
+; THUMB-NEXT: ldr r1, [sp, #52]
+; THUMB-NEXT: add r2, sp, #16
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: mov r5, r1
+; THUMB-NEXT: ldr r0, [sp, #20]
+; THUMB-NEXT: str r0, [r4, #28]
+; THUMB-NEXT: ldr r0, [sp, #16]
+; THUMB-NEXT: str r0, [r4, #24]
+; THUMB-NEXT: add r2, sp, #8
+; THUMB-NEXT: mov r0, r7
+; THUMB-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp, #12]
+; THUMB-NEXT: str r2, [r4, #20]
+; THUMB-NEXT: ldr r2, [sp, #8]
+; THUMB-NEXT: str r2, [r4, #16]
+; THUMB-NEXT: str r5, [r4, #12]
+; THUMB-NEXT: stm r4!, {r0, r1, r6}
+; THUMB-NEXT: add sp, #28
+; THUMB-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+define { double, double } @test_modf_f64(double %a) {
+; CHECK-LABEL: test_modf_f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r2, r3, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
+
+define double @test_modf_f64_only_use_intergral(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_intergral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r0, r1, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_intergral:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ %result.1 = extractvalue { double, double } %result, 1
+ ret double %result.1
+}
+
+define double @test_modf_f64_only_use_fractional(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_fractional:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_fractional:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ %result.1 = extractvalue { double, double } %result, 0
+ ret double %result.1
+}
+
+define { double, double } @test_modf_f64_tail_call(double %a) {
+; CHECK-LABEL: test_modf_f64_tail_call:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r2, r3, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_tail_call:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = tail call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
+
+define double @test_modf_f64_only_use_intergral_tail_call(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_intergral_tail_call:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r0, r1, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_intergral_tail_call:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, ...
[truncated]
|
@llvm/pr-subscribers-llvm-selectiondag Author: Benjamin Maxwell (MacDue) ChangesThis is needed on some ARM platforms. Patch is 20.92 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/129264.diff 3 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 9fbcb5bc31537..c2107a73301bc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -132,6 +132,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::STRICT_FLDEXP: R = SoftenFloatRes_ExpOp(N); break;
case ISD::FFREXP: R = SoftenFloatRes_FFREXP(N); break;
case ISD::FSINCOS: R = SoftenFloatRes_FSINCOS(N); break;
+ case ISD::FMODF: R = SoftenFloatRes_FMODF(N); break;
case ISD::STRICT_FREM:
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
case ISD::STRICT_FRINT:
@@ -791,27 +792,35 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FFREXP(SDNode *N) {
return ReturnVal;
}
-SDValue
-DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N,
- RTLIB::Libcall LC) {
+SDValue DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(
+ SDNode *N, RTLIB::Libcall LC, std::optional<unsigned> CallRetResNo) {
assert(!N->isStrictFPOpcode() && "strictfp not implemented");
EVT VT = N->getValueType(0);
+ assert(VT == N->getValueType(1) &&
+ "expected both return values to have the same type");
+
if (!TLI.getLibcallName(LC))
return SDValue();
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
- SDValue FirstResultSlot = DAG.CreateStackTemporary(NVT);
- SDValue SecondResultSlot = DAG.CreateStackTemporary(NVT);
SDLoc DL(N);
- TargetLowering::MakeLibCallOptions CallOptions;
- std::array Ops{GetSoftenedFloat(N->getOperand(0)), FirstResultSlot,
- SecondResultSlot};
- std::array OpsVT{VT, FirstResultSlot.getValueType(),
- SecondResultSlot.getValueType()};
+ SmallVector<SDValue, 3> Ops = {GetSoftenedFloat(N->getOperand(0))};
+ SmallVector<EVT, 3> OpsVT = {VT};
+
+ std::array<SDValue, 2> StackSlots;
+ for (unsigned ResNum = 0; ResNum < N->getNumValues(); ++ResNum) {
+ if (ResNum == CallRetResNo)
+ continue;
+ SDValue StackSlot = DAG.CreateStackTemporary(NVT);
+ Ops.push_back(StackSlot);
+ OpsVT.push_back(StackSlot.getValueType());
+ StackSlots[ResNum] = StackSlot;
+ }
+ TargetLowering::MakeLibCallOptions CallOptions;
// TODO: setTypeListBeforeSoften can't properly express multiple return types,
// but since both returns have the same type it should be okay.
CallOptions.setTypeListBeforeSoften({OpsVT}, VT, true);
@@ -825,8 +834,14 @@ DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
return DAG.getLoad(NVT, DL, Chain, StackSlot, PtrInfo);
};
- SetSoftenedFloat(SDValue(N, 0), CreateStackLoad(FirstResultSlot));
- SetSoftenedFloat(SDValue(N, 1), CreateStackLoad(SecondResultSlot));
+
+ for (auto [ResNum, SlackSlot] : enumerate(StackSlots)) {
+ if (CallRetResNo == ResNum) {
+ SetSoftenedFloat(SDValue(N, ResNum), ReturnVal);
+ continue;
+ }
+ SetSoftenedFloat(SDValue(N, ResNum), CreateStackLoad(SlackSlot));
+ }
return SDValue();
}
@@ -836,6 +851,11 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSINCOS(SDNode *N) {
N, RTLIB::getSINCOS(N->getValueType(0)));
}
+SDValue DAGTypeLegalizer::SoftenFloatRes_FMODF(SDNode *N) {
+ return SoftenFloatRes_UnaryWithTwoFPResults(
+ N, RTLIB::getMODF(N->getValueType(0)), /*CallRetResNo=*/0);
+}
+
SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
return SoftenFloatRes_Binary(N, GetFPLibCall(N->getValueType(0),
RTLIB::REM_F32,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 74d7210743372..50247cebb91b1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -562,7 +562,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
// Convert Float Results to Integer.
void SoftenFloatResult(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_Unary(SDNode *N, RTLIB::Libcall LC);
- SDValue SoftenFloatRes_UnaryWithTwoFPResults(SDNode *N, RTLIB::Libcall LC);
+ SDValue SoftenFloatRes_UnaryWithTwoFPResults(
+ SDNode *N, RTLIB::Libcall LC, std::optional<unsigned> CallRetResNo = {});
SDValue SoftenFloatRes_Binary(SDNode *N, RTLIB::Libcall LC);
SDValue SoftenFloatRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
SDValue SoftenFloatRes_ARITH_FENCE(SDNode *N);
@@ -608,6 +609,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
SDValue SoftenFloatRes_ExpOp(SDNode *N);
SDValue SoftenFloatRes_FFREXP(SDNode *N);
SDValue SoftenFloatRes_FSINCOS(SDNode *N);
+ SDValue SoftenFloatRes_FMODF(SDNode *N);
SDValue SoftenFloatRes_FREEZE(SDNode *N);
SDValue SoftenFloatRes_FREM(SDNode *N);
SDValue SoftenFloatRes_FRINT(SDNode *N);
diff --git a/llvm/test/CodeGen/ARM/llvm.modf.ll b/llvm/test/CodeGen/ARM/llvm.modf.ll
new file mode 100644
index 0000000000000..66f6c9b9383a7
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/llvm.modf.ll
@@ -0,0 +1,504 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=thumbv7-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -mtriple=armv6m < %s | FileCheck %s --check-prefix=THUMB
+
+define { half, half } @test_modf_f16(half %a) {
+; CHECK-LABEL: test_modf_f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: mov r1, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_f16:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r1, r0
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_modf_f16_only_use_fractional_part(half %a) {
+; CHECK-LABEL: test_modf_f16_only_use_fractional_part:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f16_only_use_fractional_part:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_modf_f16_only_use_integral_part(half %a) {
+; CHECK-LABEL: test_modf_f16_only_use_integral_part:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f16_only_use_integral_part:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { half, half } @llvm.modf.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) {
+; CHECK-LABEL: test_modf_v2f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #14]
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: bl __gnu_h2f_ieee
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #12]
+; CHECK-NEXT: add r0, sp, #12
+; CHECK-NEXT: vld1.32 {d8[0]}, [r0:32]
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: ldr r1, [sp]
+; CHECK-NEXT: strh.w r0, [sp, #10]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: strh.w r0, [sp, #8]
+; CHECK-NEXT: add r0, sp, #8
+; CHECK-NEXT: vmovl.u16 q9, d8
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vmov.32 r0, d18[0]
+; CHECK-NEXT: vmov.32 r1, d18[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: vmov.32 r3, d16[1]
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f16:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r5, r1
+; THUMB-NEXT: uxth r0, r0
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: uxth r0, r5
+; THUMB-NEXT: bl __gnu_h2f_ieee
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r5, r0
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: bl __gnu_f2h_ieee
+; THUMB-NEXT: mov r3, r0
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: mov r1, r5
+; THUMB-NEXT: mov r2, r6
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, r5, r6, pc}
+ %result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_modf_f32(float %a) {
+; CHECK-LABEL: test_modf_f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: ldr r1, [sp, #4]
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { float, float } @llvm.modf.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) {
+; CHECK-LABEL: test_modf_v3f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: vldr d9, [sp, #40]
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: vmov d8, r2, r3
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vldmia sp, {s0, s1}
+; CHECK-NEXT: add.w r1, r4, #16
+; CHECK-NEXT: vst1.32 {d0}, [r1:64]!
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: vmov s1, r5
+; CHECK-NEXT: vmov s0, r6
+; CHECK-NEXT: vst1.32 {d0}, [r4:64]!
+; CHECK-NEXT: str r0, [r4]
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB-LABEL: test_modf_v3f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB-NEXT: sub sp, #12
+; THUMB-NEXT: mov r7, r3
+; THUMB-NEXT: mov r5, r2
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #32]
+; THUMB-NEXT: add r1, sp, #8
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: ldr r0, [sp, #8]
+; THUMB-NEXT: str r0, [r4, #24]
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: mov r0, r7
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r7, r0
+; THUMB-NEXT: ldr r0, [sp, #4]
+; THUMB-NEXT: str r0, [r4, #20]
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: mov r0, r5
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: ldr r1, [sp]
+; THUMB-NEXT: str r1, [r4, #16]
+; THUMB-NEXT: stm r4!, {r0, r7}
+; THUMB-NEXT: str r6, [r4]
+; THUMB-NEXT: add sp, #12
+; THUMB-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) {
+; CHECK-LABEL: test_modf_v2f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: bl modff
+; CHECK-NEXT: vldr s1, [sp]
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: vldr s0, [sp, #4]
+; CHECK-NEXT: vmov r2, r3, d0
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f32:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r4, r1
+; THUMB-NEXT: mov r1, sp
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r5, r0
+; THUMB-NEXT: add r1, sp, #4
+; THUMB-NEXT: mov r0, r4
+; THUMB-NEXT: bl modff
+; THUMB-NEXT: mov r1, r0
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: mov r0, r5
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r4, r5, r7, pc}
+ %result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
+; CHECK-LABEL: test_modf_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov r3, r2
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r3
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r12, r3, [sp, #40]
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: vmov d9, r0, r1
+; CHECK-NEXT: vldr d17, [sp]
+; CHECK-NEXT: vldr d16, [sp, #8]
+; CHECK-NEXT: vst1.64 {d8, d9}, [r4]!
+; CHECK-NEXT: vst1.64 {d16, d17}, [r4]
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, pc}
+;
+; THUMB-LABEL: test_modf_v2f64:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB-NEXT: sub sp, #28
+; THUMB-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; THUMB-NEXT: mov r7, r2
+; THUMB-NEXT: mov r4, r0
+; THUMB-NEXT: ldr r0, [sp, #48]
+; THUMB-NEXT: ldr r1, [sp, #52]
+; THUMB-NEXT: add r2, sp, #16
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: mov r6, r0
+; THUMB-NEXT: mov r5, r1
+; THUMB-NEXT: ldr r0, [sp, #20]
+; THUMB-NEXT: str r0, [r4, #28]
+; THUMB-NEXT: ldr r0, [sp, #16]
+; THUMB-NEXT: str r0, [r4, #24]
+; THUMB-NEXT: add r2, sp, #8
+; THUMB-NEXT: mov r0, r7
+; THUMB-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp, #12]
+; THUMB-NEXT: str r2, [r4, #20]
+; THUMB-NEXT: ldr r2, [sp, #8]
+; THUMB-NEXT: str r2, [r4, #16]
+; THUMB-NEXT: str r5, [r4, #12]
+; THUMB-NEXT: stm r4!, {r0, r1, r6}
+; THUMB-NEXT: add sp, #28
+; THUMB-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+define { double, double } @test_modf_f64(double %a) {
+; CHECK-LABEL: test_modf_f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r2, r3, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
+
+define double @test_modf_f64_only_use_intergral(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_intergral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r0, r1, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_intergral:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ %result.1 = extractvalue { double, double } %result, 1
+ ret double %result.1
+}
+
+define double @test_modf_f64_only_use_fractional(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_fractional:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_fractional:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = call { double, double } @llvm.modf.f64(double %a)
+ %result.1 = extractvalue { double, double } %result, 0
+ ret double %result.1
+}
+
+define { double, double } @test_modf_f64_tail_call(double %a) {
+; CHECK-LABEL: test_modf_f64_tail_call:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r2, r3, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_tail_call:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r2, [sp]
+; THUMB-NEXT: ldr r3, [sp, #4]
+; THUMB-NEXT: add sp, #8
+; THUMB-NEXT: pop {r7, pc}
+ %result = tail call { double, double } @llvm.modf.f64(double %a)
+ ret { double, double } %result
+}
+
+define double @test_modf_f64_only_use_intergral_tail_call(double %a) {
+; CHECK-LABEL: test_modf_f64_only_use_intergral_tail_call:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl modf
+; CHECK-NEXT: ldrd r0, r1, [sp], #8
+; CHECK-NEXT: pop {r7, pc}
+;
+; THUMB-LABEL: test_modf_f64_only_use_intergral_tail_call:
+; THUMB: @ %bb.0:
+; THUMB-NEXT: push {r7, lr}
+; THUMB-NEXT: sub sp, #8
+; THUMB-NEXT: mov r2, sp
+; THUMB-NEXT: bl modf
+; THUMB-NEXT: ldr r0, [sp]
+; THUMB-NEXT: ldr r1, [sp, #4]
+; THUMB-NEXT: add sp, ...
[truncated]
|
Kind ping :) |
This isn't fully working for our downstream target. Our targets calling convention is using different registers for pointers and integers. But the pointer argument will end up being treated as an integer argument. Now we get
while the correct thing would be
As far as I can tell other libcalls involving pointers (e.g. looking at SelectionDAG::getMemset, or SelectionDAG::expandMultipleResultFPLibCall) is using LowerCallTo directly without using the makeLibCall helper. The TargetLowering::makeLibCall helper does not support setting setting Entry.Ty to identify a PointerType when setting up the ArgListTy vector. |
I guess one either need to "inline" makeLibCalls in DAGTypeLegalizer::SoftenFloatRes_UnaryWithTwoFPResults making it possible to setup the ArgListTy correctly. Or maybe we should add support in MakeLibCallOptions to denote that certain arguments are pointers (and maybe also the pointer address space). |
I think this would be the easiest fix for now. Does this patch https://gist.github.com/MacDue/d9766e2b6b738824b11941194156efac solve your issue? |
Thanks! I also noticed that there seem to be (at least) three functions in math.h that is passing pointers (fmod, sincos and frexp). So the bug is present also in DAGTypeLegalizer::SoftenFloatRes_FFREXP (which can be solved by adding similar calls to setOpsTypeOverrides to override type of second argument). |
…c" (#129885)" This broke modff calls on 32-bit x86 Windows. See comment on the PR. > This updates the existing modf[f|l] builtin to be lowered via the > llvm.modf.* intrinsic (rather than directly to a library call). > > The legalization issues exposed by the original PR (#126750) should have > been fixed in #128055 and #129264. This reverts commit cd1d9a8.
…k slots Solution for: llvm#129264 (comment)
…k slots Solution for: llvm#129264 (comment)
…k slots (#130647) Solution for: #129264 (comment)
…oatRes stack slots (#130647) Solution for: llvm/llvm-project#129264 (comment)
…k slots (llvm#130647) Solution for: llvm#129264 (comment)
This is needed on some ARM platforms.
…#129885) Reverts llvm#127987 Original description: This updates the existing modf[f|l] builtin to be lowered via the llvm.modf.* intrinsic (rather than directly to a library call). The legalization issues exposed by the original PR (llvm#126750) should have been fixed in llvm#128055 and llvm#129264.
This is needed on some ARM platforms.