Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
177 changes: 177 additions & 0 deletions llvm/test/CodeGen/X86/expand-vp-fp-intrinsics.ll
Original file line number Diff line number Diff line change
@@ -1,10 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512

define void @vp_fadd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fadd_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fadd_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: addps %xmm1, %xmm0
Expand All @@ -23,6 +31,13 @@ define void @vp_fadd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_fsub_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fsub_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vsubps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fsub_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: subps %xmm1, %xmm0
Expand All @@ -41,6 +56,13 @@ define void @vp_fsub_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_fmul_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fmul_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fmul_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: mulps %xmm1, %xmm0
Expand All @@ -59,6 +81,13 @@ define void @vp_fmul_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_fdiv_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fdiv_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vdivps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fdiv_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: divps %xmm1, %xmm0
Expand All @@ -77,6 +106,48 @@ define void @vp_fdiv_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_frem_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_frem_v4f32:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: subl $80, %esp
; X86-NEXT: vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vextractps $2, %xmm1, {{[0-9]+}}(%esp)
; X86-NEXT: vextractps $2, %xmm0, (%esp)
; X86-NEXT: calll fmodf
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $1, %xmm0, (%esp)
; X86-NEXT: calll fmodf
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: calll fmodf
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $3, %xmm0, (%esp)
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: calll fmodf
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X86-NEXT: vmovaps %xmm0, (%esi)
; X86-NEXT: addl $80, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; SSE-LABEL: vp_frem_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: pushq %rbx
Expand Down Expand Up @@ -157,6 +228,13 @@ define void @vp_frem_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_fabs_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fabs_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fabs_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
Expand Down Expand Up @@ -188,6 +266,13 @@ define void @vp_fabs_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fabs.v4f32(<4 x float>, <4 x i1>, i32)

define void @vp_sqrt_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_sqrt_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vsqrtps %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_sqrt_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: sqrtps %xmm0, %xmm0
Expand All @@ -206,6 +291,13 @@ define void @vp_sqrt_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.sqrt.v4f32(<4 x float>, <4 x i1>, i32)

define void @vp_fneg_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_fneg_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fneg_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
Expand Down Expand Up @@ -237,6 +329,55 @@ define void @vp_fneg_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i32 %vp)
declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32)

define void @vp_fma_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5) nounwind {
; X86-LABEL: vp_fma_v4f32:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: subl $84, %esp
; X86-NEXT: vmovupd %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vextractps $2, %xmm0, (%esp)
; X86-NEXT: vshufpd {{.*#+}} xmm0 = xmm1[1,0]
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: calll fmaf
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $1, %xmm0, (%esp)
; X86-NEXT: vmovshdup {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; X86-NEXT: # xmm0 = mem[1,1,3,3]
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: calll fmaf
; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: calll fmaf
; X86-NEXT: vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT: vextractps $3, %xmm0, (%esp)
; X86-NEXT: vpermilps $255, {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; X86-NEXT: # xmm0 = mem[3,3,3,3]
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: calll fmaf
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X86-NEXT: vmovaps %xmm0, (%esi)
; X86-NEXT: addl $84, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; SSE-LABEL: vp_fma_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: pushq %rbx
Expand Down Expand Up @@ -372,6 +513,14 @@ define void @vp_fma_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5) no
declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32)

define void @vp_fmuladd_v4f32(<4 x float> %a0, <4 x float> %a1, ptr %out, i4 %a5) nounwind {
; X86-LABEL: vp_fmuladd_v4f32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmulps %xmm1, %xmm0, %xmm0
; X86-NEXT: vaddps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_fmuladd_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: mulps %xmm1, %xmm0
Expand Down Expand Up @@ -406,6 +555,13 @@ declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>

declare <4 x float> @llvm.vp.maxnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vfmax_vv_v4f32:
; X86: # %bb.0:
; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm2
; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; X86-NEXT: retl
;
; SSE-LABEL: vfmax_vv_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm2
Expand Down Expand Up @@ -443,6 +599,13 @@ define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m

declare <8 x float> @llvm.vp.maxnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vfmax_vv_v8f32:
; X86: # %bb.0:
; X86-NEXT: vmaxps %ymm0, %ymm1, %ymm2
; X86-NEXT: vcmpunordps %ymm0, %ymm0, %ymm0
; X86-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0
; X86-NEXT: retl
;
; SSE-LABEL: vfmax_vv_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm2, %xmm4
Expand Down Expand Up @@ -486,6 +649,13 @@ define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m

declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vfmin_vv_v4f32:
; X86: # %bb.0:
; X86-NEXT: vminps %xmm0, %xmm1, %xmm2
; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
; X86-NEXT: retl
;
; SSE-LABEL: vfmin_vv_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm2
Expand Down Expand Up @@ -523,6 +693,13 @@ define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m

declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vfmin_vv_v8f32:
; X86: # %bb.0:
; X86-NEXT: vminps %ymm0, %ymm1, %ymm2
; X86-NEXT: vcmpunordps %ymm0, %ymm0, %ymm0
; X86-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0
; X86-NEXT: retl
;
; SSE-LABEL: vfmin_vv_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm2, %xmm4
Expand Down
Loading