-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[X86][FP16] Widen 128/256-bit CVTTP2xI to 512-bit when VLX not enabled #142763
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-x86 Author: Phoebe Wang (phoebewang) ChangesPatch is 58.45 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/142763.diff 4 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b1a3e3c006bb3..fb76846297eb9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2371,6 +2371,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::LLRINT, MVT::v8f16, Legal);
}
+ setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
+
if (Subtarget.hasVLX()) {
setGroup(MVT::v8f16);
setGroup(MVT::v16f16);
@@ -2386,10 +2391,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
setOperationAction(ISD::FP_ROUND, MVT::v8f16, Legal);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal);
setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Custom);
@@ -20010,10 +20011,12 @@ static SDValue promoteXINT_TO_FP(SDValue Op, const SDLoc &dl,
static bool isLegalConversion(MVT VT, MVT FloatVT, bool IsSigned,
const X86Subtarget &Subtarget) {
- if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
- return true;
- if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
- return true;
+ if (FloatVT.getScalarType() != MVT::f16 || Subtarget.hasVLX()) {
+ if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
+ return true;
+ if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
+ return true;
+ }
if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
return true;
if (Subtarget.useAVX512Regs()) {
@@ -21552,6 +21555,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
bool IsStrict = Op->isStrictFPOpcode();
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
+ bool HasVLX = Subtarget.hasVLX();
MVT VT = Op->getSimpleValueType(0);
SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
@@ -21582,7 +21586,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
else
Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
- if (!IsSigned && !Subtarget.hasVLX()) {
+ if (!IsSigned && !HasVLX) {
assert(Subtarget.useAVX512Regs() && "Unexpected features!");
// Widen to 512-bits.
ResVT = MVT::v8i32;
@@ -21612,7 +21616,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
}
if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
- if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
+ if ((HasVLX && (VT == MVT::v8i16 || VT == MVT::v16i16)) ||
+ VT == MVT::v32i16)
return Op;
MVT ResVT = VT;
@@ -21620,7 +21625,7 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
if (EleVT != MVT::i64)
ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
- if (SrcVT != MVT::v8f16) {
+ if (SrcVT == MVT::v2f16 || SrcVT == MVT::v4f16) {
SDValue Tmp =
IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
@@ -21628,6 +21633,22 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
}
+ if (!HasVLX) {
+ assert(Subtarget.useAVX512Regs() && "Unexpected features!");
+ // Widen to 512-bits.
+ unsigned IntSize = EleVT.getSizeInBits();
+ unsigned Num = IntSize > 16 ? 512 / IntSize : 32;
+ MVT TmpVT = MVT::getVectorVT(MVT::f16, Num);
+ ResVT = MVT::getVectorVT(EleVT, Num);
+ // Need to concat with zero vector for strict fp to avoid spurious
+ // exceptions.
+ // TODO: Should we just do this for non-strict as well?
+ SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, TmpVT)
+ : DAG.getUNDEF(TmpVT);
+ Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, TmpVT, Tmp, Src,
+ DAG.getVectorIdxConstant(0, dl));
+ }
+
if (IsStrict) {
Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
: X86ISD::STRICT_CVTTP2UI,
@@ -21640,7 +21661,8 @@ SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
// TODO: Need to add exception check code for strict FP.
if (EleVT.getSizeInBits() < 16) {
- ResVT = MVT::getVectorVT(EleVT, 8);
+ if (HasVLX)
+ ResVT = MVT::getVectorVT(EleVT, 8);
Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
}
@@ -34123,12 +34145,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
}
if (IsStrict) {
- Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
Res =
DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
Chain = Res.getValue(1);
} else {
- Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
Res = DAG.getNode(Opc, dl, ResVT, Src);
}
@@ -44126,7 +44146,12 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
// Conversions.
// TODO: Add more CVT opcodes when we have test coverage.
case X86ISD::CVTTP2SI:
- case X86ISD::CVTTP2UI:
+ case X86ISD::CVTTP2UI: {
+ if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f16 &&
+ !Subtarget.hasVLX())
+ break;
+ [[fallthrough]];
+ }
case X86ISD::CVTPH2PS: {
SDLoc DL(Op);
unsigned Scale = SizeInBits / ExtSizeInBits;
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
index 0a9dd78afb8cc..0126685f2bb32 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128-fp16.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16 -O3 | FileCheck %s --check-prefixes=NOVL
declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata)
declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata)
@@ -34,6 +35,16 @@ define <2 x i64> @strict_vector_fptosi_v2f16_to_v2i64(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2qq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i64:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vcvttsh2si %xmm0, %rax
+; NOVL-NEXT: vmovq %rax, %xmm1
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2si %xmm0, %rax
+; NOVL-NEXT: vmovq %rax, %xmm0
+; NOVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOVL-NEXT: retq
%ret = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i64> %ret
@@ -46,6 +57,16 @@ define <2 x i64> @strict_vector_fptoui_v2f16_to_v2i64(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2uqq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i64:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT: vmovq %rax, %xmm1
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2usi %xmm0, %rax
+; NOVL-NEXT: vmovq %rax, %xmm0
+; NOVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; NOVL-NEXT: retq
%ret = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i64> %ret
@@ -58,6 +79,17 @@ define <2 x i32> @strict_vector_fptosi_v2f16_to_v2i32(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2dq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i32:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT: vcvttph2dq %ymm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i32> %ret
@@ -70,6 +102,17 @@ define <2 x i32> @strict_vector_fptoui_v2f16_to_v2i32(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2udq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i32:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; NOVL-NEXT: vcvttph2udq %ymm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i32> %ret
@@ -82,6 +125,17 @@ define <2 x i16> @strict_vector_fptosi_v2f16_to_v2i16(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2w %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i16:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i16> %ret
@@ -94,6 +148,17 @@ define <2 x i16> @strict_vector_fptoui_v2f16_to_v2i16(<2 x half> %a) #0 {
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; CHECK-NEXT: vcvttph2uw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i16:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i16> %ret
@@ -107,6 +172,17 @@ define <2 x i8> @strict_vector_fptosi_v2f16_to_v2i8(<2 x half> %a) #0 {
; CHECK-NEXT: vcvttph2w %xmm0, %xmm0
; CHECK-NEXT: vpmovwb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i8:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i8> %ret
@@ -120,6 +196,17 @@ define <2 x i8> @strict_vector_fptoui_v2f16_to_v2i8(<2 x half> %a) #0 {
; CHECK-NEXT: vcvttph2uw %xmm0, %xmm0
; CHECK-NEXT: vpmovwb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i8:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; NOVL-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinsertf32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i8> %ret
@@ -136,6 +223,21 @@ define <2 x i1> @strict_vector_fptosi_v2f16_to_v2i1(<2 x half> %a) #0 {
; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v2f16_to_v2i1:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vcvttsh2si %xmm0, %eax
+; NOVL-NEXT: andl $1, %eax
+; NOVL-NEXT: kmovw %eax, %k0
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2si %xmm0, %eax
+; NOVL-NEXT: kmovd %eax, %k1
+; NOVL-NEXT: kshiftlw $1, %k1, %k1
+; NOVL-NEXT: korw %k1, %k0, %k1
+; NOVL-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i1> %ret
@@ -152,6 +254,21 @@ define <2 x i1> @strict_vector_fptoui_v2f16_to_v2i1(<2 x half> %a) #0 {
; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v2f16_to_v2i1:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vcvttsh2si %xmm0, %eax
+; NOVL-NEXT: andl $1, %eax
+; NOVL-NEXT: kmovw %eax, %k0
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2si %xmm0, %eax
+; NOVL-NEXT: kmovd %eax, %k1
+; NOVL-NEXT: kshiftlw $1, %k1, %k1
+; NOVL-NEXT: korw %k1, %k0, %k1
+; NOVL-NEXT: vpternlogq {{.*#+}} zmm0 {%k1} {z} = -1
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half> %a,
metadata !"fpexcept.strict") #0
ret <2 x i1> %ret
@@ -163,6 +280,21 @@ define <4 x i32> @strict_vector_fptosi_v4f16_to_v4i32(<4 x half> %a) #0 {
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: vcvttph2dq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i32:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT: vcvttsh2si %xmm1, %eax
+; NOVL-NEXT: vcvttsh2si %xmm0, %ecx
+; NOVL-NEXT: vmovd %ecx, %xmm1
+; NOVL-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; NOVL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT: vcvttsh2si %xmm2, %eax
+; NOVL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; NOVL-NEXT: vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2si %xmm0, %eax
+; NOVL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; NOVL-NEXT: retq
%ret = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x i32> %ret
@@ -174,6 +306,21 @@ define <4 x i32> @strict_vector_fptoui_v4f16_to_v4i32(<4 x half> %a) #0 {
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: vcvttph2udq %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i32:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vpsrld $16, %xmm0, %xmm1
+; NOVL-NEXT: vcvttsh2usi %xmm1, %eax
+; NOVL-NEXT: vcvttsh2usi %xmm0, %ecx
+; NOVL-NEXT: vmovd %ecx, %xmm1
+; NOVL-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; NOVL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; NOVL-NEXT: vcvttsh2usi %xmm2, %eax
+; NOVL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; NOVL-NEXT: vpsrlq $48, %xmm0, %xmm0
+; NOVL-NEXT: vcvttsh2usi %xmm0, %eax
+; NOVL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; NOVL-NEXT: retq
%ret = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x i32> %ret
@@ -185,6 +332,16 @@ define <4 x i16> @strict_vector_fptosi_v4f16_to_v4i16(<4 x half> %a) #0 {
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: vcvttph2w %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i16:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x i16> %ret
@@ -196,6 +353,16 @@ define <4 x i16> @strict_vector_fptoui_v4f16_to_v4i16(<4 x half> %a) #0 {
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; CHECK-NEXT: vcvttph2uw %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptoui_v4f16_to_v4i16:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2uw %zmm0, %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x i16> %ret
@@ -208,6 +375,16 @@ define <4 x i8> @strict_vector_fptosi_v4f16_to_v4i8(<4 x half> %a) #0 {
; CHECK-NEXT: vcvttph2w %xmm0, %xmm0
; CHECK-NEXT: vpmovwb %xmm0, %xmm0
; CHECK-NEXT: ret{{[l|q]}}
+;
+; NOVL-LABEL: strict_vector_fptosi_v4f16_to_v4i8:
+; NOVL: # %bb.0:
+; NOVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT: vinserti32x4 $0, %xmm0, %zmm1, %zmm0
+; NOVL-NEXT: vcvttph2w %zmm0, %zmm0
+; NOVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; NOVL-NEXT: vzeroupper
+; NOVL-NEXT: retq
%ret = call <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half> %a,
metadata !"fpexcept.strict") #0
ret <4 x i8> %ret
@@ -220,6 +397,16 @@ define <4 x i8> @strict_vector_fptoui_v4f16_to_v4i8(<4 x half> %a) #0 {
; CHECK-NEXT: vcvttph2uw %xmm0, %xmm0
; CHECK-NEXT: vpmovwb %xmm0, %xmm0
; CHECK-NEXT: ...
[truncated]
|
You can test this locally with the following command:git-clang-format --diff HEAD~1 HEAD --extensions cpp -- llvm/lib/Target/X86/X86ISelLowering.cpp View the diff from clang-format here.diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a3056296c..069f9a2d2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2354,10 +2354,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::LLRINT, MVT::v8f16, Legal);
}
- setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
- setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
- setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
+ setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
if (Subtarget.hasVLX()) {
setGroup(MVT::v8f16);
|
SDValue Tmp = | ||
IsStrict ? DAG.getConstantFP(0.0, dl, TmpVT) : DAG.getUNDEF(TmpVT); | ||
Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, TmpVT, Tmp, Src, | ||
DAG.getVectorIdxConstant(0, dl)); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
use widenSubVector ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good idea, done.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
No description provided.