Skip to content

Commit c490658

Browse files
committed
[VPlan] Use skipCostComputation when pre-computing induction costs.
This ensures we skip any instructions identified to be ignored by the legacy cost model as well. Fixes a divergence between legacy and VPlan-based cost model. Fixes #106417.
1 parent 66927fb commit c490658

File tree

2 files changed

+194
-1
lines changed

2 files changed

+194
-1
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7113,14 +7113,15 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
71137113
IVInsts.push_back(CI);
71147114
}
71157115
for (Instruction *IVInst : IVInsts) {
7116-
if (!CostCtx.SkipCostComputation.insert(IVInst).second)
7116+
if (CostCtx.skipCostComputation(IVInst, VF.isVector()))
71177117
continue;
71187118
InstructionCost InductionCost = CostCtx.getLegacyCost(IVInst, VF);
71197119
LLVM_DEBUG({
71207120
dbgs() << "Cost of " << InductionCost << " for VF " << VF
71217121
<< ": induction instruction " << *IVInst << "\n";
71227122
});
71237123
Cost += InductionCost;
7124+
CostCtx.SkipCostComputation.insert(IVInst);
71247125
}
71257126
}
71267127

Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt -p loop-vectorize -S %s | FileCheck %s
3+
4+
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
5+
target triple = "riscv64-unknown-linux-gnu"
6+
7+
; Test case for https://github.com/llvm/llvm-project/issues/106417.
8+
define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
9+
; CHECK-LABEL: define void @skip_free_iv_truncate(
10+
; CHECK-SAME: i16 [[X:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
11+
; CHECK-NEXT: [[ENTRY:.*]]:
12+
; CHECK-NEXT: [[X_I32:%.*]] = sext i16 [[X]] to i32
13+
; CHECK-NEXT: [[X_I64:%.*]] = sext i16 [[X]] to i64
14+
; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 -8
15+
; CHECK-NEXT: [[SMAX20:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
16+
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[SMAX20]], [[X_I64]]
17+
; CHECK-NEXT: [[UMIN21:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP0]], i64 1)
18+
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[SMAX20]], [[UMIN21]]
19+
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[X_I64]]
20+
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
21+
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]]
22+
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
23+
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
24+
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
25+
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 288, i64 [[TMP7]])
26+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]]
27+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
28+
; CHECK: [[VECTOR_SCEVCHECK]]:
29+
; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
30+
; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[SMAX]], [[X_I64]]
31+
; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP9]], i64 1)
32+
; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[SMAX]], [[UMIN]]
33+
; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], [[X_I64]]
34+
; CHECK-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP11]], 3
35+
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[UMIN]], [[TMP12]]
36+
; CHECK-NEXT: [[TMP14:%.*]] = shl nsw i64 [[X_I64]], 1
37+
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP14]]
38+
; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 6, i64 [[TMP13]])
39+
; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
40+
; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
41+
; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT]]
42+
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT]]
43+
; CHECK-NEXT: [[TMP17:%.*]] = icmp ult ptr [[TMP16]], [[SCEVGEP]]
44+
; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW]]
45+
; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i64 [[X_I64]], 3
46+
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP19]]
47+
; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]])
48+
; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
49+
; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
50+
; CHECK-NEXT: [[TMP20:%.*]] = sub i64 0, [[MUL_RESULT3]]
51+
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[MUL_RESULT3]]
52+
; CHECK-NEXT: [[TMP22:%.*]] = icmp ult ptr [[TMP21]], [[SCEVGEP1]]
53+
; CHECK-NEXT: [[TMP23:%.*]] = or i1 [[TMP22]], [[MUL_OVERFLOW4]]
54+
; CHECK-NEXT: [[TMP24:%.*]] = add nsw i64 [[TMP19]], -8
55+
; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP24]]
56+
; CHECK-NEXT: [[MUL6:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]])
57+
; CHECK-NEXT: [[MUL_RESULT7:%.*]] = extractvalue { i64, i1 } [[MUL6]], 0
58+
; CHECK-NEXT: [[MUL_OVERFLOW8:%.*]] = extractvalue { i64, i1 } [[MUL6]], 1
59+
; CHECK-NEXT: [[TMP25:%.*]] = sub i64 0, [[MUL_RESULT7]]
60+
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SCEVGEP5]], i64 [[MUL_RESULT7]]
61+
; CHECK-NEXT: [[TMP27:%.*]] = icmp ult ptr [[TMP26]], [[SCEVGEP5]]
62+
; CHECK-NEXT: [[TMP28:%.*]] = or i1 [[TMP27]], [[MUL_OVERFLOW8]]
63+
; CHECK-NEXT: [[TMP29:%.*]] = or i1 [[TMP18]], [[TMP23]]
64+
; CHECK-NEXT: [[TMP30:%.*]] = or i1 [[TMP29]], [[TMP28]]
65+
; CHECK-NEXT: br i1 [[TMP30]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
66+
; CHECK: [[VECTOR_MEMCHECK]]:
67+
; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
68+
; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]]
69+
; CHECK-NEXT: [[SMAX10:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
70+
; CHECK-NEXT: [[TMP32:%.*]] = sub i64 [[SMAX10]], [[X_I64]]
71+
; CHECK-NEXT: [[UMIN11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP32]], i64 1)
72+
; CHECK-NEXT: [[TMP33:%.*]] = sub i64 [[SMAX10]], [[UMIN11]]
73+
; CHECK-NEXT: [[TMP34:%.*]] = sub i64 [[TMP33]], [[X_I64]]
74+
; CHECK-NEXT: [[TMP35:%.*]] = udiv i64 [[TMP34]], 3
75+
; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[UMIN11]], [[TMP35]]
76+
; CHECK-NEXT: [[TMP37:%.*]] = mul i64 [[TMP36]], 6
77+
; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[TMP37]], [[TMP31]]
78+
; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 2
79+
; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP39]]
80+
; CHECK-NEXT: [[TMP40:%.*]] = shl nsw i64 [[X_I64]], 3
81+
; CHECK-NEXT: [[SCEVGEP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP40]]
82+
; CHECK-NEXT: [[TMP41:%.*]] = mul i64 [[TMP36]], 24
83+
; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[TMP40]]
84+
; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[TMP42]], 8
85+
; CHECK-NEXT: [[SCEVGEP14:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP43]]
86+
; CHECK-NEXT: [[TMP44:%.*]] = add nsw i64 [[TMP40]], -8
87+
; CHECK-NEXT: [[SCEVGEP15:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP44]]
88+
; CHECK-NEXT: [[SCEVGEP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP42]]
89+
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP14]]
90+
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP13]], [[SCEVGEP12]]
91+
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
92+
; CHECK-NEXT: [[BOUND017:%.*]] = icmp ult ptr [[SCEVGEP9]], [[SCEVGEP16]]
93+
; CHECK-NEXT: [[BOUND118:%.*]] = icmp ult ptr [[SCEVGEP15]], [[SCEVGEP12]]
94+
; CHECK-NEXT: [[FOUND_CONFLICT19:%.*]] = and i1 [[BOUND017]], [[BOUND118]]
95+
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT19]]
96+
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
97+
; CHECK: [[VECTOR_PH]]:
98+
; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
99+
; CHECK-NEXT: [[TMP46:%.*]] = mul i64 [[TMP45]], 8
100+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]]
101+
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
102+
; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]]
103+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]]
104+
; CHECK-NEXT: [[TMP49:%.*]] = mul i64 [[N_VEC]], 3
105+
; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]]
106+
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
107+
; CHECK-NEXT: [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3
108+
; CHECK-NEXT: [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]]
109+
; CHECK-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
110+
; CHECK-NEXT: [[TMP52:%.*]] = mul i64 [[TMP51]], 8
111+
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X_I64]], i64 0
112+
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
113+
; CHECK-NEXT: [[TMP53:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
114+
; CHECK-NEXT: [[TMP54:%.*]] = add <vscale x 8 x i64> [[TMP53]], zeroinitializer
115+
; CHECK-NEXT: [[TMP55:%.*]] = mul <vscale x 8 x i64> [[TMP54]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
116+
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP55]]
117+
; CHECK-NEXT: [[TMP56:%.*]] = call i64 @llvm.vscale.i64()
118+
; CHECK-NEXT: [[TMP57:%.*]] = mul i64 [[TMP56]], 8
119+
; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP57]]
120+
; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
121+
; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
122+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
123+
; CHECK: [[VECTOR_BODY]]:
124+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
125+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
126+
; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], <vscale x 8 x i64> [[VEC_IND]]
127+
; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP59]], i32 2, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
128+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]]
129+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT25]]
130+
; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
131+
; CHECK-NEXT: br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
132+
; CHECK: [[MIDDLE_BLOCK]]:
133+
; CHECK-NEXT: br label %[[SCALAR_PH]]
134+
; CHECK: [[SCALAR_PH]]:
135+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
136+
; CHECK-NEXT: [[BC_RESUME_VAL23:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
137+
; CHECK-NEXT: br label %[[LOOP:.*]]
138+
; CHECK: [[LOOP]]:
139+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
140+
; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL23]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
141+
; CHECK-NEXT: [[GEP_I64:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
142+
; CHECK-NEXT: [[TMP61:%.*]] = load i64, ptr [[GEP_I64]], align 8
143+
; CHECK-NEXT: [[TMP62:%.*]] = sext i32 [[IV_CONV]] to i64
144+
; CHECK-NEXT: [[GEP_CONV:%.*]] = getelementptr i64, ptr [[INVARIANT_GEP]], i64 [[TMP62]]
145+
; CHECK-NEXT: [[TMP63:%.*]] = load i64, ptr [[GEP_CONV]], align 8
146+
; CHECK-NEXT: [[GEP_I16:%.*]] = getelementptr i16, ptr [[A]], i64 [[IV]]
147+
; CHECK-NEXT: store i16 0, ptr [[GEP_I16]], align 2
148+
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
149+
; CHECK-NEXT: [[TMP64]] = trunc i64 [[IV_NEXT]] to i32
150+
; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[IV]], 99
151+
; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]]
152+
; CHECK: [[EXIT]]:
153+
; CHECK-NEXT: ret void
154+
;
155+
entry:
156+
%x.i32 = sext i16 %x to i32
157+
%x.i64 = sext i16 %x to i64
158+
%invariant.gep = getelementptr i8, ptr %A, i64 -8
159+
br label %loop
160+
161+
loop:
162+
%iv = phi i64 [ %x.i64, %entry ], [ %iv.next, %loop ]
163+
%iv.conv = phi i32 [ %x.i32, %entry ], [ %5, %loop ]
164+
%gep.i64 = getelementptr i64, ptr %A, i64 %iv
165+
%2 = load i64, ptr %gep.i64, align 8
166+
%3 = sext i32 %iv.conv to i64
167+
%gep.conv = getelementptr i64, ptr %invariant.gep, i64 %3
168+
%4 = load i64, ptr %gep.conv, align 8
169+
%gep.i16 = getelementptr i16, ptr %A, i64 %iv
170+
store i16 0, ptr %gep.i16, align 2
171+
%iv.next = add i64 %iv, 3
172+
%5 = trunc i64 %iv.next to i32
173+
%c = icmp slt i64 %iv, 99
174+
br i1 %c, label %loop, label %exit
175+
176+
exit:
177+
ret void
178+
}
179+
180+
attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
181+
;.
182+
; CHECK: [[META0]] = !{[[META1:![0-9]+]]}
183+
; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
184+
; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
185+
; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]}
186+
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
187+
; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]}
188+
; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
189+
; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
190+
; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
191+
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]}
192+
;.

0 commit comments

Comments
 (0)