-
Notifications
You must be signed in to change notification settings - Fork 13.6k
InstCombine: improve optimizations for ceiling division with no overflow #142869
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
InstCombine: improve optimizations for ceiling division with no overflow #142869
Conversation
Thank you for submitting a Pull Request (PR) to the LLVM Project! This PR will be automatically labeled and the relevant teams will be notified. If you wish to, you can add reviewers by using the "Reviewers" section on this page. If this is not working for you, it is probably because you do not have write permissions for the repository. In which case you can instead tag reviewers by name in a comment by using If you have received no comments on your PR for a week, you can request a review by "ping"ing the PR by adding a comment “Ping”. The common courtesy "ping" rate is once a week. Please remember that you are asking for valuable time from other developers. If you have further questions, they may be answered by the LLVM GitHub User Guide. You can also ask questions in a comment on this PR, on the LLVM Discord or on the forums. |
@llvm/pr-subscribers-llvm-transforms Author: None (gaynor-anthropic) Changesfixes #142497 The contents of this pull request were substantially written using claude-code. I've reviewed to the best of my ability (it's been years since I did any compilers work). Full diff: https://github.com/llvm/llvm-project/pull/142869.diff 2 Files Affected:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index a9ac5ff9b9c89..16ebd7bceff63 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1787,6 +1787,50 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
if (Instruction *Ashr = foldAddToAshr(I))
return Ashr;
+ // Ceiling division by power-of-2:
+ // (X >> log2(N)) + zext(X & (N-1) != 0) --> (X + (N-1)) >> log2(N)
+ // This is valid when adding (N-1) to X doesn't overflow.
+ {
+ Value *X = nullptr, *Cmp = nullptr;
+ const APInt *ShiftAmt = nullptr, *Mask = nullptr;
+ CmpPredicate Pred;
+
+ // Match: (X >> C) + zext((X & Mask) != 0)
+ // or: zext((X & Mask) != 0) + (X >> C)
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+
+ // Try matching with shift on left, zext on right
+ bool Matched = false;
+ if (match(Op0, m_LShr(m_Value(X), m_APInt(ShiftAmt))) &&
+ match(Op1, m_ZExt(m_Value(Cmp)))) {
+ Matched = match(Cmp, m_ICmp(Pred, m_And(m_Specific(X), m_APInt(Mask)),
+ m_ZeroInt()));
+ } else if (match(Op1, m_LShr(m_Value(X), m_APInt(ShiftAmt))) &&
+ match(Op0, m_ZExt(m_Value(Cmp)))) {
+ Matched = match(Cmp, m_ICmp(Pred, m_And(m_Specific(X), m_APInt(Mask)),
+ m_ZeroInt()));
+ }
+
+ if (Matched &&
+ Pred == ICmpInst::ICMP_NE &&
+ ShiftAmt && ShiftAmt->uge(1) && ShiftAmt->ult(BitWidth) &&
+ Mask && *Mask == (APInt(BitWidth, 1) << *ShiftAmt) - 1) {
+
+ // Check if X + Mask doesn't overflow
+ Constant *MaskC = ConstantInt::get(X->getType(), *Mask);
+ bool WillNotOverflowUnsigned = willNotOverflowUnsignedAdd(X, MaskC, I);
+
+ if (WillNotOverflowUnsigned) {
+ // (X + Mask) >> ShiftAmt
+ bool WillNotOverflowSigned = willNotOverflowSignedAdd(X, MaskC, I);
+ Value *Add = Builder.CreateAdd(X, MaskC, "", WillNotOverflowUnsigned,
+ WillNotOverflowSigned);
+ return BinaryOperator::CreateLShr(Add, ConstantInt::get(X->getType(), *ShiftAmt));
+ }
+ }
+ }
+
// (~X) + (~Y) --> -2 - (X + Y)
{
// To ensure we can save instructions we need to ensure that we consume both
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 495f99824652d..d364082eab317 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -4273,4 +4273,161 @@ define i32 @fold_zext_nneg_add_const_fail2(i8 %x) {
}
declare void @llvm.assume(i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+
+; Ceiling division by power-of-2: (x >> log2(N)) + ((x & (N-1)) != 0) -> (x + (N-1)) >> log2(N)
+; This is only valid when x + (N-1) doesn't overflow
+
+; Test with known range that prevents overflow
+define noundef range(i32 0, 100) i32 @ceil_div_by_8_known_range(i32 noundef range(i32 0, 100) %x) {
+; CHECK-LABEL: @ceil_div_by_8_known_range(
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i32 [[X:%.*]], 7
+; CHECK-NEXT: [[R:%.*]] = lshr i32 [[TMP1]], 3
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 3
+ %and = and i32 %x, 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add i32 %shr, %ext
+ ret i32 %r
+}
+
+; Test with the exact IR from the original testcase
+define noundef range(i32 0, 6) i32 @ceil_div_from_clz(i32 noundef %v) {
+; CHECK-LABEL: @ceil_div_from_clz(
+; CHECK-NEXT: [[CTLZ:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[V:%.*]], i1 false)
+; CHECK-NEXT: [[TMP1:%.*]] = sub nuw nsw i32 39, [[CTLZ]]
+; CHECK-NEXT: [[R:%.*]] = lshr i32 [[TMP1]], 3
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %ctlz = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %v, i1 false)
+ %sub = sub nuw nsw i32 32, %ctlz
+ %shr = lshr i32 %sub, 3
+ %and = and i32 %sub, 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add nuw nsw i32 %shr, %ext
+ ret i32 %r
+}
+
+; Vector version with known range
+define <2 x i32> @ceil_div_by_8_vec_range(<2 x i32> range(i32 0, 1000) %x) {
+; CHECK-LABEL: @ceil_div_by_8_vec_range(
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw <2 x i32> [[X:%.*]], splat (i32 7)
+; CHECK-NEXT: [[R:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3)
+; CHECK-NEXT: ret <2 x i32> [[R]]
+;
+ %shr = lshr <2 x i32> %x, <i32 3, i32 3>
+ %and = and <2 x i32> %x, <i32 7, i32 7>
+ %cmp = icmp ne <2 x i32> %and, <i32 0, i32 0>
+ %ext = zext <2 x i1> %cmp to <2 x i32>
+ %r = add <2 x i32> %shr, %ext
+ ret <2 x i32> %r
+}
+
+; Ceiling division by 16 with known range
+define i16 @ceil_div_by_16_i16(i16 range(i16 0, 1000) %x) {
+; CHECK-LABEL: @ceil_div_by_16_i16(
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i16 [[X:%.*]], 15
+; CHECK-NEXT: [[R:%.*]] = lshr i16 [[TMP1]], 4
+; CHECK-NEXT: ret i16 [[R]]
+;
+ %shr = lshr i16 %x, 4
+ %and = and i16 %x, 15
+ %cmp = icmp ne i16 %and, 0
+ %ext = zext i1 %cmp to i16
+ %r = add i16 %shr, %ext
+ ret i16 %r
+}
+
+; Negative test: no overflow guarantee - should NOT optimize
+define i32 @ceil_div_by_8_no_overflow_info(i32 %x) {
+; CHECK-LABEL: @ceil_div_by_8_no_overflow_info(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 3
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 7
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SHR]], [[EXT]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 3
+ %and = and i32 %x, 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add i32 %shr, %ext
+ ret i32 %r
+}
+
+; Negative test: nuw on final add doesn't help
+define i32 @ceil_div_by_8_only_nuw_on_add(i32 %x) {
+; CHECK-LABEL: @ceil_div_by_8_only_nuw_on_add(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 3
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 7
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SHR]], [[EXT]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 3
+ %and = and i32 %x, 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add nuw i32 %shr, %ext ; nuw here doesn't prove x+7 won't overflow
+ ret i32 %r
+}
+
+; Negative test: wrong mask
+define i32 @ceil_div_wrong_mask(i32 range(i32 0, 100) %x) {
+; CHECK-LABEL: @ceil_div_wrong_mask(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 3
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 6
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SHR]], [[EXT]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 3
+ %and = and i32 %x, 6 ; Wrong mask: should be 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add i32 %shr, %ext
+ ret i32 %r
+}
+
+; Negative test: wrong shift amount
+define i32 @ceil_div_wrong_shift(i32 range(i32 0, 100) %x) {
+; CHECK-LABEL: @ceil_div_wrong_shift(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 4
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 7
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SHR]], [[EXT]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 4 ; Shift by 4, but mask is 7 (should be 15)
+ %and = and i32 %x, 7
+ %cmp = icmp ne i32 %and, 0
+ %ext = zext i1 %cmp to i32
+ %r = add i32 %shr, %ext
+ ret i32 %r
+}
+
+; Negative test: wrong comparison
+define i32 @ceil_div_wrong_cmp(i32 range(i32 0, 100) %x) {
+; CHECK-LABEL: @ceil_div_wrong_cmp(
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[X:%.*]], 3
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 7
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i32 [[SHR]], [[EXT]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %shr = lshr i32 %x, 3
+ %and = and i32 %x, 7
+ %cmp = icmp eq i32 %and, 0 ; Wrong: should be ne
+ %ext = zext i1 %cmp to i32
+ %r = add i32 %shr, %ext
+ ret i32 %r
+}
declare void @fake_func(i32)
|
Hmm, unfortunately given the length of the logs, I'm not sure how to find the part that actually failed :-/ |
You can test this locally with the following command:git-clang-format --diff HEAD~1 HEAD --extensions cpp -- llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp View the diff from clang-format here.diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index a2f897080..489ebb208 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1802,9 +1802,8 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
Shift->hasOneUse() &&
match(Cmp, m_ICmp(Pred, m_And(m_Specific(X), m_LowBitMask(Mask)),
m_ZeroInt())) &&
- Pred == ICmpInst::ICMP_NE &&
- ShiftAmt && ShiftAmt->uge(1) && ShiftAmt->ult(BitWidth) &&
- Mask && Mask->popcount() == *ShiftAmt) {
+ Pred == ICmpInst::ICMP_NE && ShiftAmt && ShiftAmt->uge(1) &&
+ ShiftAmt->ult(BitWidth) && Mask && Mask->popcount() == *ShiftAmt) {
// Check if X + Mask doesn't overflow
Constant *MaskC = ConstantInt::get(X->getType(), *Mask);
@@ -1814,8 +1813,9 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
// (X + Mask) >> ShiftAmt
bool WillNotOverflowSigned = willNotOverflowSignedAdd(X, MaskC, I);
Value *Add = Builder.CreateAdd(X, MaskC, "", WillNotOverflowUnsigned,
- WillNotOverflowSigned);
- return BinaryOperator::CreateLShr(Add, ConstantInt::get(X->getType(), *ShiftAmt));
+ WillNotOverflowSigned);
+ return BinaryOperator::CreateLShr(
+ Add, ConstantInt::get(X->getType(), *ShiftAmt));
}
}
}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you please provide generalized Alive2 proof?
See also https://llvm.org/docs/InstCombineContributorGuide.html#proofs.
|
||
// Try matching with shift on left, zext on right | ||
bool Matched = false; | ||
if (match(Op0, m_LShr(m_Value(X), m_APInt(ShiftAmt))) && |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please use m_c_Add(m_LShr(xxx), m_ZExt(xxx))
instead.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing one-use check.
Oops, sorry the alive2 proof was on the issue: https://alive2.llvm.org/ce/z/Ys4qAy Let me know if that's not sufficient. Will make the other improvements later this evening. |
I mean "generalized" proof. The proof should replace specific constants (excluding bitwidth) with parameters and preconditions. See https://llvm.org/docs/InstCombineContributorGuide.html#use-generic-values-in-proofs |
https://alive2.llvm.org/ce/z/HRxA3c generalized proof for all integer powers of 2. Hopefully that looks good. Will work on your code review notes next. Thank you! |
Updated: https://alive2.llvm.org/ce/z/CeaHaH |
if (match(&I, m_c_Add(m_Value(Shift), m_ZExt(m_Value(Cmp)))) && | ||
match(Shift, m_LShr(m_Value(X), m_APInt(ShiftAmt))) && | ||
Shift->hasOneUse() && | ||
match(Cmp, m_ICmp(Pred, m_And(m_Specific(X), m_LowBitMask(Mask)), | ||
m_ZeroInt())) && | ||
Pred == ICmpInst::ICMP_NE && | ||
ShiftAmt && ShiftAmt->uge(1) && ShiftAmt->ult(BitWidth) && | ||
Mask && Mask->popcount() == *ShiftAmt) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
if (match(&I, m_c_Add(m_Value(Shift), m_ZExt(m_Value(Cmp)))) && | |
match(Shift, m_LShr(m_Value(X), m_APInt(ShiftAmt))) && | |
Shift->hasOneUse() && | |
match(Cmp, m_ICmp(Pred, m_And(m_Specific(X), m_LowBitMask(Mask)), | |
m_ZeroInt())) && | |
Pred == ICmpInst::ICMP_NE && | |
ShiftAmt && ShiftAmt->uge(1) && ShiftAmt->ult(BitWidth) && | |
Mask && Mask->popcount() == *ShiftAmt) { | |
if (match(&I, m_c_Add(m_OneUse(m_LShr(m_Value(X), m_APInt(ShiftAmt))), m_ZExt(m_SpecificICmp(ICmpInst::ICMP_NE, m_And(m_Deferred(X), m_LowBitMask(Mask)), | |
m_ZeroInt())))) && Mask->popcount() == *ShiftAmt) { |
If the match succeeds, all the out parameters are non-null.
@@ -4273,4 +4273,161 @@ define i32 @fold_zext_nneg_add_const_fail2(i8 %x) { | |||
} | |||
|
|||
declare void @llvm.assume(i1) | |||
declare i32 @llvm.ctlz.i32(i32, i1) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please add multi-use/commuted tests.
bool WillNotOverflowSigned = willNotOverflowSignedAdd(X, MaskC, I); | ||
Value *Add = Builder.CreateAdd(X, MaskC, "", WillNotOverflowUnsigned, | ||
WillNotOverflowSigned); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
bool WillNotOverflowSigned = willNotOverflowSignedAdd(X, MaskC, I); | |
Value *Add = Builder.CreateAdd(X, MaskC, "", WillNotOverflowUnsigned, | |
WillNotOverflowSigned); | |
Value *Add = Builder.CreateNUWAdd(X, MaskC); |
I don't think it is worthwhile to infer NSW flags here. InstCombine will do it later.
IR diff looks good :) |
fixes #142497
The contents of this pull request were substantially written using claude-code. I've reviewed to the best of my ability (it's been years since I did any compilers work).