Skip to content

Commit ea4a119

Browse files
authored
Reapply "[CodeGen] Fix register pressure computation in MachinePipeli… (#87312)
…ner (#87030)" Fix broken test. This reverts commit b8ead21.
1 parent 04dbf7a commit ea4a119

File tree

3 files changed

+182
-166
lines changed

3 files changed

+182
-166
lines changed

llvm/lib/CodeGen/MachinePipeliner.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1268,7 +1268,7 @@ class HighRegisterPressureDetector {
12681268
// Calculate the upper limit of each pressure set
12691269
void computePressureSetLimit(const RegisterClassInfo &RCI) {
12701270
for (unsigned PSet = 0; PSet < PSetNum; PSet++)
1271-
PressureSetLimit[PSet] = RCI.getRegPressureSetLimit(PSet);
1271+
PressureSetLimit[PSet] = TRI->getRegPressureSetLimit(MF, PSet);
12721272

12731273
// We assume fixed registers, such as stack pointer, are already in use.
12741274
// Therefore subtracting the weight of the fixed registers from the limit of
Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
# RUN: llc --verify-machineinstrs -mtriple=aarch64 -o - %s -run-pass pipeliner -aarch64-enable-pipeliner -pipeliner-max-mii=40 -pipeliner-register-pressure -pipeliner-ii-search-range=30 -debug-only=pipeliner 2>&1 | FileCheck %s
2+
3+
# REQUIRES: asserts
4+
5+
# Check that if the register pressure is too high, the schedule is rejected, II is incremented, and scheduling continues.
6+
# The specific value of II is not important.
7+
8+
# CHECK: {{^ *}}Try to schedule with {{[0-9]+$}}
9+
# CHECK: {{^ *}}Rejected the schedule because of too high register pressure{{$}}
10+
# CHECK: {{^ *}}Try to schedule with {{[0-9]+$}}
11+
# CHECK: {{^ *}}Schedule Found? 1 (II={{[0-9]+}}){{$}}
12+
13+
--- |
14+
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
15+
16+
define dso_local double @kernel(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %n) local_unnamed_addr {
17+
entry:
18+
%0 = load double, ptr %a, align 8
19+
%arrayidx1 = getelementptr inbounds i8, ptr %a, i64 8
20+
%1 = load double, ptr %arrayidx1, align 8
21+
%cmp133 = icmp sgt i32 %n, 0
22+
br i1 %cmp133, label %for.body.preheader, label %for.cond.cleanup
23+
24+
for.body.preheader: ; preds = %entry
25+
%wide.trip.count = zext nneg i32 %n to i64
26+
br label %for.body
27+
28+
for.cond.cleanup: ; preds = %for.body, %entry
29+
%res.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add54, %for.body ]
30+
ret double %res.0.lcssa
31+
32+
for.body: ; preds = %for.body.preheader, %for.body
33+
%lsr.iv137 = phi i64 [ %wide.trip.count, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
34+
%lsr.iv = phi ptr [ %b, %for.body.preheader ], [ %scevgep, %for.body ]
35+
%res.0135 = phi double [ 0.000000e+00, %for.body.preheader ], [ %add54, %for.body ]
36+
%2 = load double, ptr %lsr.iv, align 8
37+
%3 = tail call double @llvm.fmuladd.f64(double %0, double %2, double %0)
38+
%4 = tail call double @llvm.fmuladd.f64(double %3, double %2, double %3)
39+
%5 = tail call double @llvm.fmuladd.f64(double %4, double %2, double %4)
40+
%6 = tail call double @llvm.fmuladd.f64(double %5, double %2, double %5)
41+
%7 = tail call double @llvm.fmuladd.f64(double %6, double %2, double %6)
42+
%8 = tail call double @llvm.fmuladd.f64(double %7, double %2, double %7)
43+
%9 = tail call double @llvm.fmuladd.f64(double %8, double %2, double %8)
44+
%10 = tail call double @llvm.fmuladd.f64(double %9, double %2, double %9)
45+
%11 = tail call double @llvm.fmuladd.f64(double %10, double %2, double %10)
46+
%12 = tail call double @llvm.fmuladd.f64(double %11, double %2, double %11)
47+
%13 = tail call double @llvm.fmuladd.f64(double %12, double %2, double %12)
48+
%14 = tail call double @llvm.fmuladd.f64(double %13, double %2, double %13)
49+
%15 = tail call double @llvm.fmuladd.f64(double %14, double %2, double %14)
50+
%16 = tail call double @llvm.fmuladd.f64(double %15, double %2, double %15)
51+
%17 = tail call double @llvm.fmuladd.f64(double %16, double %2, double %16)
52+
%18 = tail call double @llvm.fmuladd.f64(double %17, double %2, double %17)
53+
%add = fadd double %17, %18
54+
%19 = tail call double @llvm.fmuladd.f64(double %18, double %2, double %add)
55+
%add35 = fadd double %10, %19
56+
%20 = tail call double @llvm.fmuladd.f64(double %3, double %2, double %add35)
57+
%add38 = fadd double %11, %20
58+
%21 = tail call double @llvm.fmuladd.f64(double %4, double %2, double %add38)
59+
%add41 = fadd double %12, %21
60+
%22 = tail call double @llvm.fmuladd.f64(double %5, double %2, double %add41)
61+
%add44 = fadd double %14, %15
62+
%add45 = fadd double %13, %add44
63+
%add46 = fadd double %add45, %22
64+
%23 = tail call double @llvm.fmuladd.f64(double %6, double %2, double %add46)
65+
%mul = fmul double %2, %7
66+
%mul51 = fmul double %1, %mul
67+
%24 = tail call double @llvm.fmuladd.f64(double %mul51, double %9, double %23)
68+
%25 = tail call double @llvm.fmuladd.f64(double %8, double %1, double %24)
69+
%add54 = fadd double %res.0135, %25
70+
%scevgep = getelementptr i8, ptr %lsr.iv, i64 8
71+
%lsr.iv.next = add nsw i64 %lsr.iv137, -1
72+
%exitcond.not = icmp eq i64 %lsr.iv.next, 0
73+
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
74+
}
75+
76+
declare double @llvm.fmuladd.f64(double, double, double)
77+
78+
...
79+
---
80+
name: kernel
81+
tracksRegLiveness: true
82+
liveins:
83+
- { reg: '$x0', virtual-reg: '%10' }
84+
- { reg: '$x1', virtual-reg: '%11' }
85+
- { reg: '$w2', virtual-reg: '%12' }
86+
body: |
87+
bb.0.entry:
88+
successors: %bb.1, %bb.4
89+
liveins: $x0, $x1, $w2
90+
91+
%12:gpr32common = COPY $w2
92+
%11:gpr64 = COPY $x1
93+
%10:gpr64common = COPY $x0
94+
dead $wzr = SUBSWri %12, 1, 0, implicit-def $nzcv
95+
Bcc 10, %bb.1, implicit $nzcv
96+
97+
bb.4:
98+
%13:fpr64 = FMOVD0
99+
B %bb.2
100+
101+
bb.1.for.body.preheader:
102+
%0:fpr64 = LDRDui %10, 0 :: (load (s64) from %ir.a)
103+
%1:fpr64 = LDRDui %10, 1 :: (load (s64) from %ir.arrayidx1)
104+
%16:gpr32 = ORRWrs $wzr, %12, 0
105+
%2:gpr64all = SUBREG_TO_REG 0, killed %16, %subreg.sub_32
106+
%15:fpr64 = FMOVD0
107+
B %bb.3
108+
109+
bb.2.for.cond.cleanup:
110+
%3:fpr64 = PHI %13, %bb.4, %7, %bb.3
111+
$d0 = COPY %3
112+
RET_ReallyLR implicit $d0
113+
114+
bb.3.for.body:
115+
successors: %bb.2, %bb.3
116+
117+
%4:gpr64sp = PHI %2, %bb.1, %9, %bb.3
118+
%5:gpr64sp = PHI %11, %bb.1, %8, %bb.3
119+
%6:fpr64 = PHI %15, %bb.1, %7, %bb.3
120+
early-clobber %17:gpr64sp, %18:fpr64 = LDRDpost %5, 8 :: (load (s64) from %ir.lsr.iv)
121+
%19:fpr64 = nofpexcept FMADDDrrr %0, %18, %0, implicit $fpcr
122+
%20:fpr64 = nofpexcept FMADDDrrr %19, %18, %19, implicit $fpcr
123+
%21:fpr64 = nofpexcept FMADDDrrr %20, %18, %20, implicit $fpcr
124+
%22:fpr64 = nofpexcept FMADDDrrr %21, %18, %21, implicit $fpcr
125+
%23:fpr64 = nofpexcept FMADDDrrr %22, %18, %22, implicit $fpcr
126+
%24:fpr64 = nofpexcept FMADDDrrr %23, %18, %23, implicit $fpcr
127+
%25:fpr64 = nofpexcept FMADDDrrr %24, %18, %24, implicit $fpcr
128+
%26:fpr64 = nofpexcept FMADDDrrr %25, %18, %25, implicit $fpcr
129+
%27:fpr64 = nofpexcept FMADDDrrr %26, %18, %26, implicit $fpcr
130+
%28:fpr64 = nofpexcept FMADDDrrr %27, %18, %27, implicit $fpcr
131+
%29:fpr64 = nofpexcept FMADDDrrr %28, %18, %28, implicit $fpcr
132+
%30:fpr64 = nofpexcept FMADDDrrr %29, %18, %29, implicit $fpcr
133+
%31:fpr64 = nofpexcept FMADDDrrr %30, %18, %30, implicit $fpcr
134+
%32:fpr64 = nofpexcept FMADDDrrr %31, %18, %31, implicit $fpcr
135+
%33:fpr64 = nofpexcept FMADDDrrr %32, %18, %32, implicit $fpcr
136+
%34:fpr64 = nofpexcept FMADDDrrr %33, %18, %33, implicit $fpcr
137+
%35:fpr64 = nofpexcept FADDDrr %33, %34, implicit $fpcr
138+
%36:fpr64 = nofpexcept FMADDDrrr %34, %18, killed %35, implicit $fpcr
139+
%37:fpr64 = nofpexcept FADDDrr %26, killed %36, implicit $fpcr
140+
%38:fpr64 = nofpexcept FMADDDrrr %19, %18, killed %37, implicit $fpcr
141+
%39:fpr64 = nofpexcept FADDDrr %27, killed %38, implicit $fpcr
142+
%40:fpr64 = nofpexcept FMADDDrrr %20, %18, killed %39, implicit $fpcr
143+
%41:fpr64 = nofpexcept FADDDrr %28, killed %40, implicit $fpcr
144+
%42:fpr64 = nofpexcept FMADDDrrr %21, %18, killed %41, implicit $fpcr
145+
%43:fpr64 = nofpexcept FADDDrr %30, %31, implicit $fpcr
146+
%44:fpr64 = nofpexcept FADDDrr %29, killed %43, implicit $fpcr
147+
%45:fpr64 = nofpexcept FADDDrr killed %44, killed %42, implicit $fpcr
148+
%46:fpr64 = nofpexcept FMADDDrrr %22, %18, killed %45, implicit $fpcr
149+
%47:fpr64 = nofpexcept FMULDrr %18, %23, implicit $fpcr
150+
%48:fpr64 = nofpexcept FMULDrr %1, killed %47, implicit $fpcr
151+
%49:fpr64 = nofpexcept FMADDDrrr killed %48, %25, killed %46, implicit $fpcr
152+
%50:fpr64 = nofpexcept FMADDDrrr %24, %1, killed %49, implicit $fpcr
153+
%7:fpr64 = nofpexcept FADDDrr %6, killed %50, implicit $fpcr
154+
%8:gpr64all = COPY %17
155+
%51:gpr64 = nsw SUBSXri %4, 1, 0, implicit-def $nzcv
156+
%9:gpr64all = COPY %51
157+
Bcc 0, %bb.2, implicit $nzcv
158+
B %bb.3
159+
160+
...

0 commit comments

Comments
 (0)