Skip to content

Commit 3a5cf6d

Browse files
authored
[X86] Rename AVX512 VEXTRACT/INSERT??x? to VEXTRACT/INSERT??X? (#116826)
Use uppercase in the subvector description ("32x2" -> "32X4" etc.) - matches what we already do in VBROADCAST??X?, and we try to use uppercase for all x86 instruction mnemonics anyway (and lowercase just for the arg description suffix).
1 parent 0394e08 commit 3a5cf6d

19 files changed

+272
-272
lines changed

llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1254,16 +1254,16 @@ bool X86InstructionSelector::selectExtract(MachineInstr &I,
12541254

12551255
if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
12561256
if (HasVLX)
1257-
I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rri));
1257+
I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
12581258
else if (HasAVX)
12591259
I.setDesc(TII.get(X86::VEXTRACTF128rri));
12601260
else
12611261
return false;
12621262
} else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
12631263
if (DstTy.getSizeInBits() == 128)
1264-
I.setDesc(TII.get(X86::VEXTRACTF32x4Zrri));
1264+
I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
12651265
else if (DstTy.getSizeInBits() == 256)
1266-
I.setDesc(TII.get(X86::VEXTRACTF64x4Zrri));
1266+
I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
12671267
else
12681268
return false;
12691269
} else
@@ -1387,16 +1387,16 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
13871387

13881388
if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
13891389
if (HasVLX)
1390-
I.setDesc(TII.get(X86::VINSERTF32x4Z256rri));
1390+
I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
13911391
else if (HasAVX)
13921392
I.setDesc(TII.get(X86::VINSERTF128rri));
13931393
else
13941394
return false;
13951395
} else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
13961396
if (InsertRegTy.getSizeInBits() == 128)
1397-
I.setDesc(TII.get(X86::VINSERTF32x4Zrri));
1397+
I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
13981398
else if (InsertRegTy.getSizeInBits() == 256)
1399-
I.setDesc(TII.get(X86::VINSERTF64x4Zrri));
1399+
I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
14001400
else
14011401
return false;
14021402
} else

llvm/lib/Target/X86/X86InstrAVX512.td

Lines changed: 103 additions & 103 deletions
Large diffs are not rendered by default.

llvm/lib/Target/X86/X86InstrInfo.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6293,16 +6293,16 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
62936293
get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
62946294
case X86::VMOVAPSZ128mr_NOVLX:
62956295
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
6296-
get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm);
6296+
get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
62976297
case X86::VMOVUPSZ128mr_NOVLX:
62986298
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
6299-
get(X86::VEXTRACTF32x4Zmri), X86::sub_xmm);
6299+
get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
63006300
case X86::VMOVAPSZ256mr_NOVLX:
63016301
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
6302-
get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm);
6302+
get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
63036303
case X86::VMOVUPSZ256mr_NOVLX:
63046304
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
6305-
get(X86::VEXTRACTF64x4Zmri), X86::sub_ymm);
6305+
get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
63066306
case X86::MOV32ri64: {
63076307
Register Reg = MIB.getReg(0);
63086308
Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);

llvm/lib/Target/X86/X86ReplaceableInstrs.def

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -110,30 +110,30 @@ ENTRY(VBROADCASTSDZ256rr, VBROADCASTSDZ256rr, VPBROADCASTQZ256rr)
110110
ENTRY(VBROADCASTSDZ256rm, VBROADCASTSDZ256rm, VPBROADCASTQZ256rm)
111111
ENTRY(VBROADCASTSDZrr, VBROADCASTSDZrr, VPBROADCASTQZrr)
112112
ENTRY(VBROADCASTSDZrm, VBROADCASTSDZrm, VPBROADCASTQZrm)
113-
ENTRY(VINSERTF32x4Zrri, VINSERTF32x4Zrri, VINSERTI32x4Zrri)
114-
ENTRY(VINSERTF32x4Zrmi, VINSERTF32x4Zrmi, VINSERTI32x4Zrmi)
115-
ENTRY(VINSERTF32x8Zrri, VINSERTF32x8Zrri, VINSERTI32x8Zrri)
116-
ENTRY(VINSERTF32x8Zrmi, VINSERTF32x8Zrmi, VINSERTI32x8Zrmi)
117-
ENTRY(VINSERTF64x2Zrri, VINSERTF64x2Zrri, VINSERTI64x2Zrri)
118-
ENTRY(VINSERTF64x2Zrmi, VINSERTF64x2Zrmi, VINSERTI64x2Zrmi)
119-
ENTRY(VINSERTF64x4Zrri, VINSERTF64x4Zrri, VINSERTI64x4Zrri)
120-
ENTRY(VINSERTF64x4Zrmi, VINSERTF64x4Zrmi, VINSERTI64x4Zrmi)
121-
ENTRY(VINSERTF32x4Z256rri, VINSERTF32x4Z256rri, VINSERTI32x4Z256rri)
122-
ENTRY(VINSERTF32x4Z256rmi, VINSERTF32x4Z256rmi, VINSERTI32x4Z256rmi)
123-
ENTRY(VINSERTF64x2Z256rri, VINSERTF64x2Z256rri, VINSERTI64x2Z256rri)
124-
ENTRY(VINSERTF64x2Z256rmi, VINSERTF64x2Z256rmi, VINSERTI64x2Z256rmi)
125-
ENTRY(VEXTRACTF32x4Zrri, VEXTRACTF32x4Zrri, VEXTRACTI32x4Zrri)
126-
ENTRY(VEXTRACTF32x4Zmri, VEXTRACTF32x4Zmri, VEXTRACTI32x4Zmri)
127-
ENTRY(VEXTRACTF32x8Zrri, VEXTRACTF32x8Zrri, VEXTRACTI32x8Zrri)
128-
ENTRY(VEXTRACTF32x8Zmri, VEXTRACTF32x8Zmri, VEXTRACTI32x8Zmri)
129-
ENTRY(VEXTRACTF64x2Zrri, VEXTRACTF64x2Zrri, VEXTRACTI64x2Zrri)
130-
ENTRY(VEXTRACTF64x2Zmri, VEXTRACTF64x2Zmri, VEXTRACTI64x2Zmri)
131-
ENTRY(VEXTRACTF64x4Zrri, VEXTRACTF64x4Zrri, VEXTRACTI64x4Zrri)
132-
ENTRY(VEXTRACTF64x4Zmri, VEXTRACTF64x4Zmri, VEXTRACTI64x4Zmri)
133-
ENTRY(VEXTRACTF32x4Z256rri, VEXTRACTF32x4Z256rri, VEXTRACTI32x4Z256rri)
134-
ENTRY(VEXTRACTF32x4Z256mri, VEXTRACTF32x4Z256mri, VEXTRACTI32x4Z256mri)
135-
ENTRY(VEXTRACTF64x2Z256rri, VEXTRACTF64x2Z256rri, VEXTRACTI64x2Z256rri)
136-
ENTRY(VEXTRACTF64x2Z256mri, VEXTRACTF64x2Z256mri, VEXTRACTI64x2Z256mri)
113+
ENTRY(VINSERTF32X4Zrri, VINSERTF32X4Zrri, VINSERTI32X4Zrri)
114+
ENTRY(VINSERTF32X4Zrmi, VINSERTF32X4Zrmi, VINSERTI32X4Zrmi)
115+
ENTRY(VINSERTF32X8Zrri, VINSERTF32X8Zrri, VINSERTI32X8Zrri)
116+
ENTRY(VINSERTF32X8Zrmi, VINSERTF32X8Zrmi, VINSERTI32X8Zrmi)
117+
ENTRY(VINSERTF64X2Zrri, VINSERTF64X2Zrri, VINSERTI64X2Zrri)
118+
ENTRY(VINSERTF64X2Zrmi, VINSERTF64X2Zrmi, VINSERTI64X2Zrmi)
119+
ENTRY(VINSERTF64X4Zrri, VINSERTF64X4Zrri, VINSERTI64X4Zrri)
120+
ENTRY(VINSERTF64X4Zrmi, VINSERTF64X4Zrmi, VINSERTI64X4Zrmi)
121+
ENTRY(VINSERTF32X4Z256rri, VINSERTF32X4Z256rri, VINSERTI32X4Z256rri)
122+
ENTRY(VINSERTF32X4Z256rmi, VINSERTF32X4Z256rmi, VINSERTI32X4Z256rmi)
123+
ENTRY(VINSERTF64X2Z256rri, VINSERTF64X2Z256rri, VINSERTI64X2Z256rri)
124+
ENTRY(VINSERTF64X2Z256rmi, VINSERTF64X2Z256rmi, VINSERTI64X2Z256rmi)
125+
ENTRY(VEXTRACTF32X4Zrri, VEXTRACTF32X4Zrri, VEXTRACTI32X4Zrri)
126+
ENTRY(VEXTRACTF32X4Zmri, VEXTRACTF32X4Zmri, VEXTRACTI32X4Zmri)
127+
ENTRY(VEXTRACTF32X8Zrri, VEXTRACTF32X8Zrri, VEXTRACTI32X8Zrri)
128+
ENTRY(VEXTRACTF32X8Zmri, VEXTRACTF32X8Zmri, VEXTRACTI32X8Zmri)
129+
ENTRY(VEXTRACTF64X2Zrri, VEXTRACTF64X2Zrri, VEXTRACTI64X2Zrri)
130+
ENTRY(VEXTRACTF64X2Zmri, VEXTRACTF64X2Zmri, VEXTRACTI64X2Zmri)
131+
ENTRY(VEXTRACTF64X4Zrri, VEXTRACTF64X4Zrri, VEXTRACTI64X4Zrri)
132+
ENTRY(VEXTRACTF64X4Zmri, VEXTRACTF64X4Zmri, VEXTRACTI64X4Zmri)
133+
ENTRY(VEXTRACTF32X4Z256rri, VEXTRACTF32X4Z256rri, VEXTRACTI32X4Z256rri)
134+
ENTRY(VEXTRACTF32X4Z256mri, VEXTRACTF32X4Z256mri, VEXTRACTI32X4Z256mri)
135+
ENTRY(VEXTRACTF64X2Z256rri, VEXTRACTF64X2Z256rri, VEXTRACTI64X2Z256rri)
136+
ENTRY(VEXTRACTF64X2Z256mri, VEXTRACTF64X2Z256mri, VEXTRACTI64X2Z256mri)
137137
ENTRY(VPERMILPSmi, VPERMILPSmi, VPSHUFDmi)
138138
ENTRY(VPERMILPSri, VPERMILPSri, VPSHUFDri)
139139
ENTRY(VPERMILPSZ128mi, VPERMILPSZ128mi, VPSHUFDZ128mi)

llvm/lib/Target/X86/X86SchedIceLake.td

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1591,14 +1591,14 @@ def: InstRW<[ICXWriteResGroup121, ReadAfterVecYLd],
15911591
"VBROADCASTI64X4Zrm(b?)",
15921592
"VBROADCASTSD(Z|Z256)rm(b?)",
15931593
"VBROADCASTSS(Z|Z256)rm(b?)",
1594-
"VINSERTF32x4(Z|Z256)rm(b?)",
1595-
"VINSERTF32x8Zrm(b?)",
1596-
"VINSERTF64x2(Z|Z256)rm(b?)",
1597-
"VINSERTF64x4Zrm(b?)",
1598-
"VINSERTI32x4(Z|Z256)rm(b?)",
1599-
"VINSERTI32x8Zrm(b?)",
1600-
"VINSERTI64x2(Z|Z256)rm(b?)",
1601-
"VINSERTI64x4Zrm(b?)",
1594+
"VINSERTF32X4(Z|Z256)rm(b?)",
1595+
"VINSERTF32X8Zrm(b?)",
1596+
"VINSERTF64X2(Z|Z256)rm(b?)",
1597+
"VINSERTF64X4Zrm(b?)",
1598+
"VINSERTI32X4(Z|Z256)rm(b?)",
1599+
"VINSERTI32X8Zrm(b?)",
1600+
"VINSERTI64X2(Z|Z256)rm(b?)",
1601+
"VINSERTI64X4Zrm(b?)",
16021602
"VMOVAPD(Z|Z256)rm(b?)",
16031603
"VMOVAPS(Z|Z256)rm(b?)",
16041604
"VMOVDDUP(Z|Z256)rm(b?)",

llvm/lib/Target/X86/X86SchedSapphireRapids.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1666,8 +1666,8 @@ def : InstRW<[SPRWriteResGroup131], (instregex "^VBROADCAST(F|I)32X(8|2)Zrmk(z?)
16661666
"^VMOVDQ(A|U)(32|64)Zrmk(z?)$",
16671667
"^VPBROADCAST(D|Q)Zrmk(z?)$")>;
16681668
def : InstRW<[SPRWriteResGroup131, ReadAfterVecLd], (instregex "^MMX_P(ADD|SUB)(B|D|Q|W)rm$")>;
1669-
def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)x4Zrmi((k|kz)?)$",
1670-
"^VINSERT(F|I)(32x8|64x2)Zrmi((k|kz)?)$",
1669+
def : InstRW<[SPRWriteResGroup131, ReadAfterVecYLd], (instregex "^VINSERT(F|I)(32|64)X4Zrmi((k|kz)?)$",
1670+
"^VINSERT(F|I)(32X8|64X2)Zrmi((k|kz)?)$",
16711671
"^VP(ADD|SUB)(B|D|Q|W)Zrm$",
16721672
"^VP(ADD|SUB)(D|Q)Zrm(b|k|kz)$",
16731673
"^VP(ADD|SUB)(D|Q)Zrmbk(z?)$",
@@ -2710,7 +2710,7 @@ def : InstRW<[SPRWriteResGroup262], (instregex "^VBROADCAST(F|I)32X(2|4)Z256rmk(
27102710
"^VMOVDQ(A|U)(32|64)Z256rmk(z?)$",
27112711
"^VPBROADCAST(D|Q)Z256rmk(z?)$")>;
27122712
def : InstRW<[SPRWriteResGroup262, ReadAfterVecYLd], (instregex "^VINSERT(F|I)128rmi$",
2713-
"^VINSERT(F|I)(32x4|64x2)Z256rmi((k|kz)?)$",
2713+
"^VINSERT(F|I)(32X4|64X2)Z256rmi((k|kz)?)$",
27142714
"^VP(ADD|SUB)(B|D|Q|W)(Y|Z256)rm$",
27152715
"^VP(ADD|SUB)(D|Q)Z256rm(b|k|kz)$",
27162716
"^VP(ADD|SUB)(D|Q)Z256rmbk(z?)$",

llvm/lib/Target/X86/X86SchedSkylakeServer.td

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1562,14 +1562,14 @@ def: InstRW<[SKXWriteResGroup121, ReadAfterVecYLd],
15621562
"VBROADCASTI64X4Zrm(b?)",
15631563
"VBROADCASTSD(Z|Z256)rm(b?)",
15641564
"VBROADCASTSS(Z|Z256)rm(b?)",
1565-
"VINSERTF32x4(Z|Z256)rm(b?)",
1566-
"VINSERTF32x8Zrm(b?)",
1567-
"VINSERTF64x2(Z|Z256)rm(b?)",
1568-
"VINSERTF64x4Zrm(b?)",
1569-
"VINSERTI32x4(Z|Z256)rm(b?)",
1570-
"VINSERTI32x8Zrm(b?)",
1571-
"VINSERTI64x2(Z|Z256)rm(b?)",
1572-
"VINSERTI64x4Zrm(b?)",
1565+
"VINSERTF32X4(Z|Z256)rm(b?)",
1566+
"VINSERTF32X8Zrm(b?)",
1567+
"VINSERTF64X2(Z|Z256)rm(b?)",
1568+
"VINSERTF64X4Zrm(b?)",
1569+
"VINSERTI32X4(Z|Z256)rm(b?)",
1570+
"VINSERTI32X8Zrm(b?)",
1571+
"VINSERTI64X2(Z|Z256)rm(b?)",
1572+
"VINSERTI64X4Zrm(b?)",
15731573
"VMOVAPD(Z|Z256)rm(b?)",
15741574
"VMOVAPS(Z|Z256)rm(b?)",
15751575
"VMOVDDUP(Z|Z256)rm(b?)",

llvm/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ registers:
6565
# AVX-NEXT: RET 0, implicit $xmm0
6666
#
6767
# AVX512VL: %0:vr256x = COPY $ymm1
68-
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rri %0, 1
68+
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32X4Z256rri %0, 1
6969
# AVX512VL-NEXT: $xmm0 = COPY %1
7070
# AVX512VL-NEXT: RET 0, implicit $xmm0
7171
body: |

llvm/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ registers:
5959
- { id: 0, class: vecr }
6060
- { id: 1, class: vecr }
6161
# ALL: %0:vr512 = COPY $zmm1
62-
# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrri %0, 1
62+
# ALL-NEXT: %1:vr128x = VEXTRACTF32X4Zrri %0, 1
6363
# ALL-NEXT: $xmm0 = COPY %1
6464
# ALL-NEXT: RET 0, implicit $xmm0
6565
body: |
@@ -111,7 +111,7 @@ registers:
111111
- { id: 0, class: vecr }
112112
- { id: 1, class: vecr }
113113
# ALL: %0:vr512 = COPY $zmm1
114-
# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrri %0, 1
114+
# ALL-NEXT: %1:vr256x = VEXTRACTF64X4Zrri %0, 1
115115
# ALL-NEXT: $ymm0 = COPY %1
116116
# ALL-NEXT: RET 0, implicit $ymm0
117117
body: |

llvm/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ registers:
3636
#
3737
# AVX512VL: %0:vr256x = COPY $ymm0
3838
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
39-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 0
39+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 0
4040
# AVX512VL-NEXT: $ymm0 = COPY %2
4141
# AVX512VL-NEXT: RET 0, implicit $ymm0
4242
body: |
@@ -98,7 +98,7 @@ registers:
9898
#
9999
# AVX512VL: %0:vr256x = COPY $ymm0
100100
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
101-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
101+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
102102
# AVX512VL-NEXT: $ymm0 = COPY %2
103103
# AVX512VL-NEXT: RET 0, implicit $ymm0
104104
body: |
@@ -129,7 +129,7 @@ registers:
129129
#
130130
# AVX512VL: %0:vr256x = IMPLICIT_DEF
131131
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
132-
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rri %0, %1, 1
132+
# AVX512VL-NEXT: %2:vr256x = VINSERTF32X4Z256rri %0, %1, 1
133133
# AVX512VL-NEXT: $ymm0 = COPY %2
134134
# AVX512VL-NEXT: RET 0, implicit $ymm0
135135
body: |

llvm/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ body: |
5151
; ALL-LABEL: name: test_insert_128_idx0
5252
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
5353
; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
54-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 0
55-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
54+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 0
55+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
5656
; ALL: RET 0, implicit $ymm0
5757
%0(<16 x s32>) = COPY $zmm0
5858
%1(<4 x s32>) = COPY $xmm1
@@ -102,8 +102,8 @@ body: |
102102
; ALL-LABEL: name: test_insert_128_idx1
103103
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
104104
; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
105-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[COPY]], [[COPY1]], 1
106-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
105+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[COPY]], [[COPY1]], 1
106+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
107107
; ALL: RET 0, implicit $ymm0
108108
%0(<16 x s32>) = COPY $zmm0
109109
%1(<4 x s32>) = COPY $xmm1
@@ -127,8 +127,8 @@ body: |
127127
; ALL-LABEL: name: test_insert_128_idx1_undef
128128
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
129129
; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
130-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[DEF]], [[COPY]], 1
131-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri]]
130+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[DEF]], [[COPY]], 1
131+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri]]
132132
; ALL: RET 0, implicit $ymm0
133133
%0(<16 x s32>) = IMPLICIT_DEF
134134
%1(<4 x s32>) = COPY $xmm1
@@ -152,8 +152,8 @@ body: |
152152
; ALL-LABEL: name: test_insert_256_idx0
153153
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
154154
; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
155-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 0
156-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
155+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 0
156+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
157157
; ALL: RET 0, implicit $ymm0
158158
%0(<16 x s32>) = COPY $zmm0
159159
%1(<8 x s32>) = COPY $ymm1
@@ -203,8 +203,8 @@ body: |
203203
; ALL-LABEL: name: test_insert_256_idx1
204204
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
205205
; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
206-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[COPY]], [[COPY1]], 1
207-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
206+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[COPY]], [[COPY1]], 1
207+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
208208
; ALL: RET 0, implicit $ymm0
209209
%0(<16 x s32>) = COPY $zmm0
210210
%1(<8 x s32>) = COPY $ymm1
@@ -228,8 +228,8 @@ body: |
228228
; ALL-LABEL: name: test_insert_256_idx1_undef
229229
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
230230
; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
231-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri [[DEF]], [[COPY]], 1
232-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
231+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri [[DEF]], [[COPY]], 1
232+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
233233
; ALL: RET 0, implicit $ymm0
234234
%0(<16 x s32>) = IMPLICIT_DEF
235235
%1(<8 x s32>) = COPY $ymm1

llvm/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ body: |
2929
; AVX512VL-LABEL: name: test_merge
3030
; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
3131
; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
32-
; AVX512VL: [[VINSERTF32x4Z256rri:%[0-9]+]]:vr256x = VINSERTF32x4Z256rri %2, [[DEF]], 1
33-
; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rri]]
32+
; AVX512VL: [[VINSERTF32X4Z256rri:%[0-9]+]]:vr256x = VINSERTF32X4Z256rri %2, [[DEF]], 1
33+
; AVX512VL: $ymm0 = COPY [[VINSERTF32X4Z256rri]]
3434
; AVX512VL: RET 0, implicit $ymm0
3535
%0(<4 x s32>) = IMPLICIT_DEF
3636
%1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>)

llvm/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ body: |
2424
; ALL-LABEL: name: test_merge_v128
2525
; ALL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
2626
; ALL: undef %2.sub_xmm:vr512 = COPY [[DEF]]
27-
; ALL: [[VINSERTF32x4Zrri:%[0-9]+]]:vr512 = VINSERTF32x4Zrri %2, [[DEF]], 1
28-
; ALL: [[VINSERTF32x4Zrri1:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri]], [[DEF]], 2
29-
; ALL: [[VINSERTF32x4Zrri2:%[0-9]+]]:vr512 = VINSERTF32x4Zrri [[VINSERTF32x4Zrri1]], [[DEF]], 3
30-
; ALL: $zmm0 = COPY [[VINSERTF32x4Zrri2]]
27+
; ALL: [[VINSERTF32X4Zrri:%[0-9]+]]:vr512 = VINSERTF32X4Zrri %2, [[DEF]], 1
28+
; ALL: [[VINSERTF32X4Zrri1:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri]], [[DEF]], 2
29+
; ALL: [[VINSERTF32X4Zrri2:%[0-9]+]]:vr512 = VINSERTF32X4Zrri [[VINSERTF32X4Zrri1]], [[DEF]], 3
30+
; ALL: $zmm0 = COPY [[VINSERTF32X4Zrri2]]
3131
; ALL: RET 0, implicit $zmm0
3232
%0(<4 x s32>) = IMPLICIT_DEF
3333
%1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
@@ -49,8 +49,8 @@ body: |
4949
; ALL-LABEL: name: test_merge_v256
5050
; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
5151
; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
52-
; ALL: [[VINSERTF64x4Zrri:%[0-9]+]]:vr512 = VINSERTF64x4Zrri %2, [[DEF]], 1
53-
; ALL: $zmm0 = COPY [[VINSERTF64x4Zrri]]
52+
; ALL: [[VINSERTF64X4Zrri:%[0-9]+]]:vr512 = VINSERTF64X4Zrri %2, [[DEF]], 1
53+
; ALL: $zmm0 = COPY [[VINSERTF64X4Zrri]]
5454
; ALL: RET 0, implicit $zmm0
5555
%0(<8 x s32>) = IMPLICIT_DEF
5656
%1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>)

llvm/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ body: |
3333
; AVX512VL-LABEL: name: test_unmerge
3434
; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
3535
; AVX512VL-NEXT: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
36-
; AVX512VL-NEXT: [[VEXTRACTF32x4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rri [[DEF]], 1
36+
; AVX512VL-NEXT: [[VEXTRACTF32X4Z256rri:%[0-9]+]]:vr128x = VEXTRACTF32X4Z256rri [[DEF]], 1
3737
; AVX512VL-NEXT: $xmm0 = COPY [[COPY]]
38-
; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32x4Z256rri]]
38+
; AVX512VL-NEXT: $xmm1 = COPY [[VEXTRACTF32X4Z256rri]]
3939
; AVX512VL-NEXT: RET 0, implicit $xmm0, implicit $xmm1
4040
%0(<8 x s32>) = IMPLICIT_DEF
4141
%1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)

0 commit comments

Comments
 (0)