Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Accelerate Vector128<long>::op_Multiply on x64 #103555

Merged
merged 21 commits into from
Jun 28, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
move to jit
  • Loading branch information
EgorBo committed Jun 20, 2024
commit 573a37bcd2abec3af4c1b99ecd0af5309fde189e
46 changes: 44 additions & 2 deletions src/coreclr/jit/gentree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21592,11 +21592,53 @@ GenTree* Compiler::gtNewSimdBinOpNode(
{
intrinsic = NI_AVX10v1_MultiplyLow;
}
else
else if (compOpportunisticallyDependsOn(InstructionSet_AVX512DQ_VL))
tannergooding marked this conversation as resolved.
Show resolved Hide resolved
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX512DQ_VL));
intrinsic = NI_AVX512DQ_VL_MultiplyLow;
}
else
{
assert(((simdSize == 16) && compOpportunisticallyDependsOn(InstructionSet_SSE41)) ||
((simdSize == 32) && compOpportunisticallyDependsOn(InstructionSet_AVX2)));

// op1Dup = op1
GenTree* op1Dup = fgMakeMultiUse(&op1);

// op2Dup = op2
GenTree* op2Dup = fgMakeMultiUse(&op2);

// Vector256<ulong> tmp0 = Avx2.Multiply(left, right);
NamedIntrinsic ni = simdSize == 32 ? NI_AVX2_Multiply : NI_SSE2_Multiply;
GenTreeHWIntrinsic* tmp0 =
gtNewSimdHWIntrinsicNode(type, op1, op2, ni, CORINFO_TYPE_ULONG, simdSize);

// Vector256<uint> tmp1 = Avx2.Shuffle(right.AsUInt32(), ZWXY);
ni = simdSize == 32 ? NI_AVX2_Shuffle : NI_SSE2_Shuffle;
GenTreeHWIntrinsic* tmp1 =
gtNewSimdHWIntrinsicNode(type, op2Dup, gtNewIconNode(SHUFFLE_ZWXY, TYP_INT), ni,
CORINFO_TYPE_UINT, simdSize);

// Vector256<uint> tmp2 = Avx2.MultiplyLow(left.AsUInt32(), tmp1);
ni = simdSize == 32 ? NI_AVX2_MultiplyLow : NI_SSE41_MultiplyLow;
GenTreeHWIntrinsic* tmp2 =
gtNewSimdHWIntrinsicNode(type, op1Dup, tmp1, ni, CORINFO_TYPE_UINT, simdSize);

// Vector256<int> tmp3 = Avx2.HorizontalAdd(tmp2.AsInt32(), Vector256<int>.Zero);
ni = simdSize == 32 ? NI_AVX2_HorizontalAdd : NI_SSSE3_HorizontalAdd;
GenTreeHWIntrinsic* tmp3 = gtNewSimdHWIntrinsicNode(type, tmp2, gtNewZeroConNode(type), ni,
CORINFO_TYPE_UINT, simdSize);

// Vector256<int> tmp4 = Avx2.Shuffle(tmp3, YWXW);
ni = simdSize == 32 ? NI_AVX2_Shuffle : NI_SSE2_Shuffle;
GenTreeHWIntrinsic* tmp4 =
gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_YWXW, TYP_INT), ni,
CORINFO_TYPE_UINT, simdSize);

// result = tmp0 + tmp4;
op1 = tmp0;
op2 = tmp4;
intrinsic = simdSize == 32 ? NI_AVX2_Add : NI_SSE2_Add;
}

break;
}
Expand Down
22 changes: 15 additions & 7 deletions src/coreclr/jit/hwintrinsicxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2706,17 +2706,25 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,

if (varTypeIsLong(simdBaseType))
{
if (simdSize != 64 && !canUseEvexEncoding())
if (TARGET_POINTER_SIZE == 4)
{
// TODO-XARCH-CQ: We should support long/ulong multiplication
// TODO-XARCH-CQ: 32bit support
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's blocking 32-bit support? It doesn't look like we're using any _X64 intrinsics in the fallback logic?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure to be honest, that check was pre-existing, I only changed comment

break;
}
// else if simdSize == 64 then above assert would check if baseline isa supported

#if defined(TARGET_X86)
// TODO-XARCH-CQ: We need to support 64-bit CreateBroadcast
break;
#endif // TARGET_X86
if ((simdSize == 32) && compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
// We'll use AVX2 routine to emulate NI_AVX512DQ_VL_MultiplyLow
}
else if ((simdSize == 16) && compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// We'll use SSE41 routine to emulate NI_AVX512DQ_VL_MultiplyLow
}
else
{
// Fallback
break;
}
}

CORINFO_ARG_LIST_HANDLE arg1 = sig->args;
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/jit/simd.h
Original file line number Diff line number Diff line change
Expand Up @@ -1015,6 +1015,7 @@ void BroadcastConstantToSimd(TSimd* result, TBase arg0)
#define SHUFFLE_XYZW 0x1B // 00 01 10 11
#define SHUFFLE_YXYX 0x44 // 01 00 01 00
#define SHUFFLE_YWXZ 0x72 // 01 11 00 10
#define SHUFFLE_YWXW 0x73 // 01 11 00 11
#define SHUFFLE_YYZZ 0x5A // 01 01 10 10
#define SHUFFLE_ZXXX 0x80 // 10 00 00 00
#define SHUFFLE_ZXXY 0x81 // 10 00 00 01
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,33 +263,6 @@ public static Vector128<T> Zero
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static Vector128<T> operator *(Vector128<T> left, Vector128<T> right)
{
// Multiplication for long/ulong is currently accelerated only with AVX512
// We need workarounds for ARM64 and pre-AVX512 hw:
// TODO: move this expansion to JIT
if (typeof(T) == typeof(long) || typeof(T) == typeof(ulong))
{
if (Sse41.IsSupported)
{
Vector128<uint> tmp1 = Sse2.Shuffle(right.AsUInt32(), 0xB1); // _MM_SHUFFLE(2,3,0,1)
Vector128<uint> tmp2 = Sse41.MultiplyLow(left.AsUInt32(), tmp1);
Vector128<int> tmp3 = Ssse3.HorizontalAdd(tmp2.AsInt32(), Vector128<int>.Zero);
Vector128<int> tmp4 = Sse2.Shuffle(tmp3, 0x73); // _MM_SHUFFLE(1,3,0,3)
Vector128<ulong> tmp5 = Sse2.Multiply(left.AsUInt32(), right.AsUInt32());
return Unsafe.BitCast<Vector128<ulong>, Vector128<T>>(tmp5.AsUInt64() + tmp4.AsUInt64());
}
if (AdvSimd.Arm64.IsSupported)
{
Vector64<uint> lHi = AdvSimd.ShiftRightLogicalNarrowingLower(left.AsUInt64(), 32);
Vector64<uint> lLo = AdvSimd.ExtractNarrowingLower(left.AsUInt64());
Vector64<uint> rHi = AdvSimd.ShiftRightLogicalNarrowingLower(right.AsUInt64(), 32);
Vector64<uint> rLo = AdvSimd.ExtractNarrowingLower(right.AsUInt64());
Vector128<ulong> ret64 = AdvSimd.MultiplyWideningLower(lHi, rLo);
ret64 = AdvSimd.MultiplyWideningLowerAndAdd(ret64, lLo, rHi);
return Unsafe.BitCast<Vector128<ulong>, Vector128<T>>(
AdvSimd.MultiplyWideningLowerAndAdd(ret64 << 32, lLo, rLo));
}
}

return Vector128.Create(
left._lower * right._lower,
left._upper * right._upper
Expand Down
Loading