-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[X86] Extend combinei64TruncSrlAdd
to handle patterns with or
and xor
#128435
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[X86] Extend combinei64TruncSrlAdd
to handle patterns with or
and xor
#128435
Conversation
@llvm/pr-subscribers-backend-x86 Author: João Gouveia (joaotgouveia) ChangesAs discussed in #126448, the fold implemented by #126448 / #128353 can be extended to operations other than CC: @phoebewang @RKSimon Full diff: https://github.com/llvm/llvm-project/pull/128435.diff 2 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c146e1e6c0334..47dc9ffb4b24d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -53733,36 +53733,42 @@ static SDValue combineLRINT_LLRINT(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::CVTP2SI, DL, VT, Src);
}
-// Attempt to fold some (truncate (srl (add X, C1), C2)) patterns to
-// (add (truncate (srl X, C2)), C1'). C1' will be smaller than C1 so we are able
-// to avoid generating code with MOVABS and large constants in certain cases.
-static SDValue combinei64TruncSrlAdd(SDValue N, EVT VT, SelectionDAG &DAG,
- const SDLoc &DL) {
+// Attempt to fold some (truncate (srl (binop X, C1), C2)) patterns to
+// (binop (truncate (srl X, C2)), C1'). C1' will be smaller than C1 so we are
+// able to avoid generating code with MOVABS and large constants in certain
+// cases.
+static SDValue combinei64TruncSrlBinop(SDValue N, EVT VT, SelectionDAG &DAG,
+ const SDLoc &DL) {
using namespace llvm::SDPatternMatch;
- SDValue AddLhs;
- APInt AddConst, SrlConst;
+ SDValue BinopLhs;
+ APInt BinopConst, SrlConst;
if (VT != MVT::i32 ||
- !sd_match(N, m_AllOf(m_SpecificVT(MVT::i64),
- m_Srl(m_OneUse(m_Add(m_Value(AddLhs),
- m_ConstInt(AddConst))),
- m_ConstInt(SrlConst)))))
+ !sd_match(
+ N,
+ m_AllOf(m_SpecificVT(MVT::i64),
+ m_Srl(m_OneUse(m_AnyOf(
+ m_Add(m_Value(BinopLhs), m_ConstInt(BinopConst)),
+ m_Or(m_Value(BinopLhs), m_ConstInt(BinopConst)),
+ m_Xor(m_Value(BinopLhs), m_ConstInt(BinopConst)))),
+ m_ConstInt(SrlConst)))))
return SDValue();
- if (SrlConst.ule(32) || AddConst.countr_zero() < SrlConst.getZExtValue())
+ if (SrlConst.ule(32) || BinopConst.countr_zero() < SrlConst.getZExtValue())
return SDValue();
- SDValue AddLHSSrl =
- DAG.getNode(ISD::SRL, DL, MVT::i64, AddLhs, N.getOperand(1));
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, AddLHSSrl);
+ SDValue BinopLHSSrl =
+ DAG.getNode(ISD::SRL, DL, MVT::i64, BinopLhs, N.getOperand(1));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, BinopLHSSrl);
- APInt NewAddConstVal = AddConst.lshr(SrlConst).trunc(VT.getSizeInBits());
- SDValue NewAddConst = DAG.getConstant(NewAddConstVal, DL, VT);
- SDValue NewAddNode = DAG.getNode(ISD::ADD, DL, VT, Trunc, NewAddConst);
+ APInt NewBinopConstVal = BinopConst.lshr(SrlConst).trunc(VT.getSizeInBits());
+ SDValue NewBinopConst = DAG.getConstant(NewBinopConstVal, DL, VT);
+ SDValue NewBinopNode =
+ DAG.getNode(N.getOperand(0).getOpcode(), DL, VT, Trunc, NewBinopConst);
EVT CleanUpVT =
EVT::getIntegerVT(*DAG.getContext(), 64 - SrlConst.getZExtValue());
- return DAG.getZeroExtendInReg(NewAddNode, DL, CleanUpVT);
+ return DAG.getZeroExtendInReg(NewBinopNode, DL, CleanUpVT);
}
/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
@@ -53810,11 +53816,9 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
if (!Src.hasOneUse())
return SDValue();
- if (SDValue R = combinei64TruncSrlAdd(Src, VT, DAG, DL))
+ if (SDValue R = combinei64TruncSrlBinop(Src, VT, DAG, DL))
return R;
- // Only support vector truncation for now.
- // TODO: i64 scalar math would benefit as well.
if (!VT.isVector())
return SDValue();
diff --git a/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll b/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
index 14992ca5bf488..ec29cf9d56c29 100644
--- a/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
+++ b/llvm/test/CodeGen/X86/combine-i64-trunc-srl-add.ll
@@ -128,6 +128,61 @@ define i32 @test_trunc_add(i64 %x) {
ret i32 %conv
}
+define i32 @test_trunc_sub(i64 %x) {
+; X64-LABEL: test_trunc_sub:
+; X64: # %bb.0:
+; X64-NEXT: shrq $48, %rdi
+; X64-NEXT: addl $65522, %edi # imm = 0xFFF2
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: retq
+ %sub = sub i64 %x, 3940649673949184
+ %shr = lshr i64 %sub, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_and(i64 %x) {
+; X64-LABEL: test_trunc_and:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: andl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %and = and i64 %x, 3940649673949184
+ %shr = lshr i64 %and, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_or(i64 %x) {
+; X64-LABEL: test_trunc_or:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: orl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %or = or i64 %x, 3940649673949184
+ %shr = lshr i64 %or, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
+define i32 @test_trunc_xor(i64 %x) {
+; X64-LABEL: test_trunc_xor:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shrq $48, %rax
+; X64-NEXT: xorl $14, %eax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-NEXT: retq
+ %xor = xor i64 %x, 3940649673949184
+ %shr = lshr i64 %xor, 48
+ %conv = trunc i64 %shr to i32
+ ret i32 %conv
+}
+
; Make sure we don't crash on this test case.
define i32 @pr128158(i64 %x) {
@@ -137,10 +192,10 @@ define i32 @pr128158(i64 %x) {
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: shrq $32, %rax
; X64-NEXT: .p2align 4
-; X64-NEXT: .LBB9_1: # %for.body
+; X64-NEXT: .LBB13_1: # %for.body
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: cmpl $9, %eax
-; X64-NEXT: jb .LBB9_1
+; X64-NEXT: jb .LBB13_1
; X64-NEXT: # %bb.2: # %exit
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
|
return SDValue(); | ||
|
||
if (SrlConst.ule(32) || AddConst.countr_zero() < SrlConst.getZExtValue()) | ||
if (SrlConst.ule(32) || BinopConst.countr_zero() < SrlConst.getZExtValue()) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For or/xor
they don't need the second condition and without the limition https://alive2.llvm.org/ce/z/uEAYxG, the and
I showed in https://godbolt.org/z/1za939PKc should also work.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've added test cases that cover the scenario you mentioned for AND
, as well as equivalent cases for OR
and XOR
. It seems the fold works as-is for these cases, though I'm not entirely sure why.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You are right. I mistook it with add
.
@@ -53810,11 +53816,9 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG, | |||
if (!Src.hasOneUse()) | |||
return SDValue(); | |||
|
|||
if (SDValue R = combinei64TruncSrlAdd(Src, VT, DAG, DL)) | |||
if (SDValue R = combinei64TruncSrlBinop(Src, VT, DAG, DL)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Does it work for MUL
? Add test case if yes and exclude it explicitly in code if not?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We also have other Binop like min/max etc., so we should add switch check like below.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This exact fold doesn't work for MUL
: https://alive2.llvm.org/ce/z/Xoz8hb (had to reduce the size of the ints because Alive was timing out).
m_Add(m_Value(BinopLhs), m_ConstInt(BinopConst)), | ||
m_Or(m_Value(BinopLhs), m_ConstInt(BinopConst)), | ||
m_Xor(m_Value(BinopLhs), m_ConstInt(BinopConst)))), |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Use m_Binary
once we limited the binop to add/sub/and/or/xor
and/or mul
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we can get rid of SDPatternMatch by checking VT == MVT::i32 && SrcVT == MVT::i64 && isa<ConstantSDNode>(N->getOperand(1)) && isa<ConstantSDNode>(Src.getOperand(1))
before calling combinei64TruncSrlBinop
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Wouldn't it be neater to move those checks inside combinei64TruncSrlBinop
? We could use m_Binary
to match the pattern and then switch case the opcode to perform the different checks required for AND
/OR
/XOR
and ADD
/SUB
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My assumption is when we have only one exact match, either m_Add
or m_Binary
, would be good. If neither match, we don't bother it, because the check for OR/XOR/ADD/SUB
is enough.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've changed the function according to your suggestions. It could probably be simplified if we always clean up using getZeroExtendInReg
, as it seems that for OR and XOR the movz
is not generated, regardless of whether we’re zero-extending or any-extending.
if (SDValue R = combinei64TruncSrlAdd(Src, VT, DAG, DL)) | ||
return R; | ||
if (VT == MVT::i32 && SrcVT == MVT::i64 && SrcOpcode == ISD::SRL && | ||
Src.getOperand(0).getNumOperands() == 2 && |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do not need to check NumOperands = 2.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removing that check causes LLVM to crash on a few regression tests (CodeGen/X86/arg-cast.ll
, CodeGen/X86/pr49162.ll
, CodeGen/X86/vector-reduce-xor-bool.ll
, and ExecutionEngine/MCJIT/non-extern-addend.ll
). I might be mistaken, but at the time combineTruncatedArithmetic
is called, we have no information about whether the first operand of Src
is actually a binary operation. Calling Src.getOperand(0).getOperand(1)
might cause us to attempt to access an invalid operand.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Oh, right! We can check opcode = ADD/OR/AND before it then.
Src.getOperand(0).getNumOperands() == 2 && | ||
isa<ConstantSDNode>(Src.getOperand(1)) && | ||
isa<ConstantSDNode>(Src.getOperand(0).getOperand(1))) { | ||
if (SDValue R = combinei64TruncSrlBinop(Src, VT, DAG, DL)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe combinei64TruncSrlConstant
?
// Attempt to fold some (truncate (srl (binop X, C1), C2)) patterns to | ||
// (binop (truncate (srl X, C2)), C1'). C1' will be smaller than C1 so we are |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We can use add/or/xor
to replace binop
SDValue BinopLHSSrl = | ||
DAG.getNode(ISD::SRL, DL, MVT::i64, Binop.getOperand(0), N.getOperand(1)); | ||
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, BinopLHSSrl); | ||
|
||
APInt NewBinopConstVal = BinopConst.lshr(SrlConst).trunc(VT.getSizeInBits()); | ||
SDValue NewBinopConst = DAG.getConstant(NewBinopConstVal, DL, VT); | ||
SDValue NewBinopNode = DAG.getNode(Opcode, DL, VT, Trunc, NewBinopConst); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we can remove Binop, it's a bit misleading that works for all binary operators.
auto CleanUpFn = +[](SDValue Op, EVT CleanUpVT, EVT VT, SelectionDAG &DAG, | ||
const SDLoc &DL) { | ||
SDValue CleanUp = DAG.getAnyExtOrTrunc(Op, DL, CleanUpVT); | ||
return DAG.getAnyExtOrTrunc(CleanUp, DL, VT); | ||
}; | ||
auto ZeroExtCleanUp = +[](SDValue Op, EVT CleanUpVT, EVT VT, | ||
SelectionDAG &DAG, const SDLoc &DL) { | ||
return DAG.getZeroExtendInReg(Op, DL, CleanUpVT); | ||
}; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The lambda makes the readability worse. I'd like a simple if/else or not use AnyExt.
|
||
APInt NewOpConstVal = OpConst.lshr(SrlConst).trunc(VT.getSizeInBits()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
APInt::extractBits ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If I'm not mistaken, by using APInt::extractBits
the constant transformation becomes
APInt NewOpConstVal = OpConst.extractBits(64 - SrlConst.getZExtValue(), SrlConst.getZExtValue()).zext(VT.getSizeInBits());
. I believe the current implementation is simpler and easier to understand.
SDValue CleanUp = DAG.getAnyExtOrTrunc(NewOpNode, DL, CleanUpVT); | ||
return DAG.getAnyExtOrTrunc(CleanUp, DL, VT); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we return DAG.getAnyExtOrTrunc(NewOpNode, DL, VT);
directly?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That causes a crash because of an assert in SelectionDAG::ReplaceAllUsesWith
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I see the problem, so we should return NewOpNode
directly because we don't need to clean up the upper bits.
|
||
if (SrlConst.ule(32) || AddConst.countr_zero() < SrlConst.getZExtValue()) | ||
switch (Opcode) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No need to use switch since we have limit to ADD/OR/XOR in combineTruncatedArithmetic
.
; X64-NEXT: movzwl %di, %eax | ||
; X64-NEXT: retq | ||
%sub = sub i64 %x, 3940649673949184 | ||
%shr = lshr i64 %sub, 48 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let's avoid to use 48 to test for the problem #128353 solving.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure. Is changing one test enough, or should I change more than one?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd like change them all.
SDValue CleanUp = DAG.getAnyExtOrTrunc(NewOpNode, DL, CleanUpVT); | ||
return DAG.getAnyExtOrTrunc(CleanUp, DL, VT); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
return NewOpNode;
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
… `xor` (llvm#128435) As discussed in llvm#126448, the fold implemented by llvm#126448 / llvm#128353 can be extended to operations other than `add`. This patch extends the fold performed by `combinei64TruncSrlAdd` to include `or` and `xor` (proof: https://alive2.llvm.org/ce/z/AXuaQu). There's no need to extend it to `sub` and `and`, as similar folds are already being performed for those operations. CC: @phoebewang @RKSimon
As discussed in #126448, the fold implemented by #126448 / #128353 can be extended to operations other than
add
. This patch extends the fold performed bycombinei64TruncSrlAdd
to includeor
andxor
(proof: https://alive2.llvm.org/ce/z/AXuaQu). There's no need to extend it tosub
andand
, as similar folds are already being performed for those operations.CC: @phoebewang @RKSimon