-
Notifications
You must be signed in to change notification settings - Fork 12.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[ValueTracking] AllowEphemerals for alignment assumptions. #108632
Conversation
@llvm/pr-subscribers-llvm-analysis Author: Florian Hahn (fhahn) ChangesAllow AllowEphemerals in isValidAssumeForContext, as the CxtI might This allows using the assumption in the computeKnownBits call in We could extend the computeKnownBits API to allow callers to specify if Full diff: https://github.com/llvm/llvm-project/pull/108632.diff 2 Files Affected:
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index ba3ba7cc98136d..2e1b512ad9b0ad 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -820,9 +820,12 @@ void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known,
continue;
if (RetainedKnowledge RK = getKnowledgeFromBundle(
*I, I->bundle_op_info_begin()[Elem.Index])) {
+ // Allow AllowEphemerals in isValidAssumeForContext, as the CxtI might
+ // be the producer of the pointer in the bundle. At the moment, align
+ // assumptions aren't optimized away.
if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
isPowerOf2_64(RK.ArgValue) &&
- isValidAssumeForContext(I, Q.CxtI, Q.DT))
+ isValidAssumeForContext(I, Q.CxtI, Q.DT, /*AllowEphemerals*/ true))
Known.Zero.setLowBits(Log2_64(RK.ArgValue));
}
continue;
diff --git a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
index 37d6584b1e85f1..476cbc74e4c092 100644
--- a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
+++ b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll
@@ -1652,10 +1652,7 @@ exit:
ret void
}
-; TODO: It feels like we should be able to calculate the symbolic max
-; exit count for the loop.inc block here, in the same way as
-; ptr_induction_eq_1. The problem seems to be in howFarToZero when the
-; ControlsOnlyExit is set to false.
+; We are not able to compute exit counts, as %a and %b might not be multiples of 8.
define void @ptr_induction_early_exit_eq_1(ptr %a, ptr %b, ptr %c) {
; CHECK-LABEL: 'ptr_induction_early_exit_eq_1'
; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1
@@ -1693,6 +1690,179 @@ exit:
ret void
}
+; Variant of @ptr_induction_early_exit_eq_1 with alignment info via load metadata.
+define void @ptr_induction_early_exit_eq_1_with_align_on_load(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: 'ptr_induction_early_exit_eq_1_with_align_on_load'
+; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1_with_align_on_load
+; CHECK-NEXT: %a_ = load ptr, ptr %a, align 8, !align !0
+; CHECK-NEXT: --> %a_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %b_ = load ptr, ptr %b, align 8, !align !0
+; CHECK-NEXT: --> %b_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+; CHECK-NEXT: --> {%a_,+,8}<nuw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8
+; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+; CHECK-NEXT: --> {(8 + %a_),+,8}<nw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_1_with_align_on_load
+; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
+; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+;
+entry:
+ %a_ = load ptr, ptr %a, !align !{i64 8}
+ %b_ = load ptr, ptr %b, !align !{i64 8}
+ %cmp = icmp eq ptr %a_, %b_
+ br i1 %cmp, label %exit, label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+ %ld1 = load ptr, ptr %ptr.iv, align 8
+ %earlyexitcond = icmp eq ptr %ld1, %c
+ br i1 %earlyexitcond, label %exit, label %loop.inc
+
+loop.inc:
+ %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+ %exitcond = icmp eq ptr %ptr.iv.next, %b_
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Variant of @ptr_induction_early_exit_eq_1 with alignment info via arument attributes.
+define void @ptr_induction_early_exit_eq_1_with_align_on_arguments(ptr align 8 %a, ptr align 8 %b, ptr %c) {
+; CHECK-LABEL: 'ptr_induction_early_exit_eq_1_with_align_on_arguments'
+; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1_with_align_on_arguments
+; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ]
+; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8
+; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+; CHECK-NEXT: --> {(8 + %a),+,8}<nw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_1_with_align_on_arguments
+; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
+; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8)
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8)
+; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8)
+;
+entry:
+ %cmp = icmp eq ptr %a, %b
+ br i1 %cmp, label %exit, label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ]
+ %ld1 = load ptr, ptr %ptr.iv, align 8
+ %earlyexitcond = icmp eq ptr %ld1, %c
+ br i1 %earlyexitcond, label %exit, label %loop.inc
+
+loop.inc:
+ %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+ %exitcond = icmp eq ptr %ptr.iv.next, %b
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Variant of @ptr_induction_early_exit_eq_1 with alignment assumptions.
+define void @ptr_induction_early_exit_eq_1_align_assumption_1(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: 'ptr_induction_early_exit_eq_1_align_assumption_1'
+; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1_align_assumption_1
+; CHECK-NEXT: %a_ = load ptr, ptr %a, align 8
+; CHECK-NEXT: --> %a_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %b_ = load ptr, ptr %b, align 8
+; CHECK-NEXT: --> %b_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+; CHECK-NEXT: --> {%a_,+,8}<nuw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8
+; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+; CHECK-NEXT: --> {(8 + %a_),+,8}<nw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_1_align_assumption_1
+; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
+; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+;
+entry:
+ %a_ = load ptr, ptr %a
+ call void @llvm.assume(i1 true) [ "align"(ptr %a_, i64 8) ]
+ %b_ = load ptr, ptr %b
+ call void @llvm.assume(i1 true) [ "align"(ptr %b_, i64 8) ]
+ %cmp = icmp eq ptr %a_, %b_
+ br i1 %cmp, label %exit, label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+ %ld1 = load ptr, ptr %ptr.iv, align 8
+ %earlyexitcond = icmp eq ptr %ld1, %c
+ br i1 %earlyexitcond, label %exit, label %loop.inc
+
+loop.inc:
+ %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+ %exitcond = icmp eq ptr %ptr.iv.next, %b_
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @ptr_induction_early_exit_eq_1_align_assumption_2(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: 'ptr_induction_early_exit_eq_1_align_assumption_2'
+; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1_align_assumption_2
+; CHECK-NEXT: %a_ = load ptr, ptr %a, align 8
+; CHECK-NEXT: --> %a_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %b_ = load ptr, ptr %b, align 8
+; CHECK-NEXT: --> %b_ U: [0,-7) S: [-9223372036854775808,9223372036854775801)
+; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+; CHECK-NEXT: --> {%a_,+,8}<nuw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8
+; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+; CHECK-NEXT: --> {(8 + %a_),+,8}<nw><%loop> U: [0,-7) S: [-9223372036854775808,9223372036854775801) Exits: <<Unknown>> LoopDispositions: { %loop: Computable }
+; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_1_align_assumption_2
+; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count.
+; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951
+; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE***
+; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8)
+;
+entry:
+ %a_ = load ptr, ptr %a
+ %b_ = load ptr, ptr %b
+ call void @llvm.assume(i1 true) [ "align"(ptr %a_, i64 8) ]
+ call void @llvm.assume(i1 true) [ "align"(ptr %b_, i64 8) ]
+ %cmp = icmp eq ptr %a_, %b_
+ br i1 %cmp, label %exit, label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a_, %entry ]
+ %ld1 = load ptr, ptr %ptr.iv, align 8
+ %earlyexitcond = icmp eq ptr %ld1, %c
+ br i1 %earlyexitcond, label %exit, label %loop.inc
+
+loop.inc:
+ %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8
+ %exitcond = icmp eq ptr %ptr.iv.next, %b_
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
define void @ptr_induction_early_exit_eq_2(ptr %a, i64 %n, ptr %c) {
; CHECK-LABEL: 'ptr_induction_early_exit_eq_2'
; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_2
|
ret void | ||
} | ||
|
||
; Variant of @ptr_induction_early_exit_eq_1 with alignment assumptions. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a "via llvm.assume"?
ret void | ||
} | ||
|
||
define void @ptr_induction_early_exit_eq_1_align_assumption_2(ptr %a, ptr %b, ptr %c) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sorry, I didn't quite understand the difference between this test and the previous one: just seems like the llvm.assume is in a different place? Should that matter?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah I wondered that too. I understand that ordering of the assume and the Ctx (%a_
) matters to isValidAssumeForContext
, but in both tests the assume always comes after the context.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The intention is to make sure we use the alignment assumption even if it is not directly followed after the load
ret void | ||
} | ||
|
||
; Variant of @ptr_induction_early_exit_eq_1 with alignment info via arument attributes. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
s/arument/argument/?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, in the latest version the comment has been dropped, as @ptr_induction_early_exit_eq_1
has been moved to a different file
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like a nice improvement!
; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (-1 * (ptrtoint ptr %a_ to i64)) + (ptrtoint ptr %b_ to i64)) /u 8) | ||
; | ||
entry: | ||
%a_ = load ptr, ptr %a, !align !{i64 8} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What's the difference between , !align !{i64 8}
and , align 8
? Is the metadata essentially saying the result is aligned to 8 bytes, rather than the input pointer?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
align 8
is about the alignment of the pointer argument %a
, !align
is about the alignment of the return value %a_
.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OK thanks for explaining!
ret void | ||
} | ||
|
||
define void @ptr_induction_early_exit_eq_1_align_assumption_2(ptr %a, ptr %b, ptr %c) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah I wondered that too. I understand that ordering of the assume and the Ctx (%a_
) matters to isValidAssumeForContext
, but in both tests the assume always comes after the context.
if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment && | ||
isPowerOf2_64(RK.ArgValue) && | ||
isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||
isValidAssumeForContext(I, Q.CxtI, Q.DT, /*AllowEphemerals*/ true)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this should be safe, since computeKnownBitsFromContext
isn't actually changing any IR here. It's simply using information in the assume intrinsic to figure out the range of pointer values.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I suppose in the future instcombine (or elsewhere) could use computeKnownBitsForContext to remove redundant alignment assumptions, thus possibly using information from the assumption to remove the assumption itself. But not an issue for now
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A reluctant LGTM. I think overall it's reasonable to ignore ephemerals for operand bundles by default and add a flag if/when we actively want to simplify them.
Always a good time to review our long tradition of "trying to fix the ephemeral value problem in SCEV":
https://reviews.llvm.org/D93974
https://reviews.llvm.org/D97077
https://reviews.llvm.org/D97092
https://reviews.llvm.org/D97099
https://reviews.llvm.org/D155389
I think https://reviews.llvm.org/D97092 has our latest understanding of this problem, and the discussion there at the end was leaning towards something along the lines of this PR.
Allow AllowEphemerals in isValidAssumeForContext, as the CxtI might be the producer of the pointer in the bundle. At the moment, align assumptions aren't optimized away. This allows using the assumption in the computeKnownBits call in getConstantMultipleImpl. We could extend the computeKnownBits API to allow callers to specify if ephemerals are allowed, if the info from computeKnownBitsFromContext is used to remove alignment assumptions.
4352d98
to
aee8cb0
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A reluctant LGTM. I think overall it's reasonable to ignore ephemerals for operand bundles by default and add a flag if/when we actively want to simplify them.
Always a good time to review our long tradition of "trying to fix the ephemeral value problem in SCEV":
https://reviews.llvm.org/D93974 https://reviews.llvm.org/D97077 https://reviews.llvm.org/D97092 https://reviews.llvm.org/D97099 https://reviews.llvm.org/D155389
I think https://reviews.llvm.org/D97092 has our latest understanding of this problem, and the discussion there at the end was leaning towards something along the lines of this PR.
Thanks for sharing the extra context!
if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment && | ||
isPowerOf2_64(RK.ArgValue) && | ||
isValidAssumeForContext(I, Q.CxtI, Q.DT)) | ||
isValidAssumeForContext(I, Q.CxtI, Q.DT, /*AllowEphemerals*/ true)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I suppose in the future instcombine (or elsewhere) could use computeKnownBitsForContext to remove redundant alignment assumptions, thus possibly using information from the assumption to remove the assumption itself. But not an issue for now
ret void | ||
} | ||
|
||
define void @ptr_induction_early_exit_eq_1_align_assumption_2(ptr %a, ptr %b, ptr %c) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The intention is to make sure we use the alignment assumption even if it is not directly followed after the load
ret void | ||
} | ||
|
||
; Variant of @ptr_induction_early_exit_eq_1 with alignment info via arument attributes. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, in the latest version the comment has been dropped, as @ptr_induction_early_exit_eq_1
has been moved to a different file
Precommit tests from llvm#108632.
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/73/builds/6556 Here is the relevant piece of the build log for the reference
|
Precommit tests from llvm#108632.
Allow AllowEphemerals in isValidAssumeForContext, as the CxtI might be the producer of the pointer in the bundle. At the moment, align assumptions aren't optimized away. This allows using the assumption in the computeKnownBits call in getConstantMultipleImpl. We could extend the computeKnownBits API to allow callers to specify if ephemerals are allowed, if the info from computeKnownBitsFromContext is used to remove alignment assumptions. PR: llvm#108632
Allow AllowEphemerals in isValidAssumeForContext, as the CxtI might
be the producer of the pointer in the bundle. At the moment, align
assumptions aren't optimized away.
This allows using the assumption in the computeKnownBits call in
getConstantMultipleImpl.
We could extend the computeKnownBits API to allow callers to specify if
ephemerals are allowed, if the info from computeKnownBitsFromContext is
used to remove alignment assumptions.