Skip to content

AMDGPU/GlobalISel: Improve readanylane combines in regbanklegalize #142789

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: users/petar-avramovic/ral-tests
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
122 changes: 104 additions & 18 deletions llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "GCNSubtarget.h"
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineUniformityAnalysis.h"
#include "llvm/CodeGen/TargetPassConfig.h"
Expand Down Expand Up @@ -137,7 +138,109 @@ class AMDGPURegBankLegalizeCombiner {
return {MatchMI, MatchMI->getOperand(1).getReg()};
}

std::pair<GUnmerge *, int> tryMatchRALFromUnmerge(Register Src) {
MachineInstr *ReadAnyLane = MRI.getVRegDef(Src);
if (ReadAnyLane->getOpcode() == AMDGPU::G_AMDGPU_READANYLANE) {
Register RALSrc = ReadAnyLane->getOperand(1).getReg();
if (auto *UnMerge = getOpcodeDef<GUnmerge>(RALSrc, MRI))
return {UnMerge, UnMerge->findRegisterDefOperandIdx(RALSrc, nullptr)};
}
return {nullptr, -1};
}

Register getReadAnyLaneSrc(Register Src) {
// Src = G_AMDGPU_READANYLANE RALSrc
auto [RAL, RALSrc] = tryMatch(Src, AMDGPU::G_AMDGPU_READANYLANE);
if (RAL)
return RALSrc;

// LoVgpr, HiVgpr = G_UNMERGE_VALUES UnmergeSrc
// LoSgpr = G_AMDGPU_READANYLANE LoVgpr
// HiSgpr = G_AMDGPU_READANYLANE HiVgpr
// Src G_MERGE_VALUES LoSgpr, HiSgpr
auto *Merge = getOpcodeDef<GMergeLikeInstr>(Src, MRI);
if (Merge) {
unsigned NumElts = Merge->getNumSources();
auto [Unmerge, Idx] = tryMatchRALFromUnmerge(Merge->getSourceReg(0));
if (!Unmerge || Unmerge->getNumDefs() != NumElts || Idx != 0)
return {};

// check if all elements are from same unmerge and there is no shuffling
for (unsigned i = 1; i < NumElts; ++i) {
auto [UnmergeI, IdxI] = tryMatchRALFromUnmerge(Merge->getSourceReg(i));
if (UnmergeI != Unmerge || (unsigned)IdxI != i)
return {};
}
return Unmerge->getSourceReg();
}

// ..., VgprI, ... = G_UNMERGE_VALUES VgprLarge
// SgprI = G_AMDGPU_READANYLANE VgprI
// SgprLarge G_MERGE_VALUES ..., SgprI, ...
// ..., Src, ... = G_UNMERGE_VALUES SgprLarge
auto *UnMerge = getOpcodeDef<GUnmerge>(Src, MRI);
if (UnMerge) {
int Idx = UnMerge->findRegisterDefOperandIdx(Src, nullptr);
auto *Merge = getOpcodeDef<GMergeLikeInstr>(UnMerge->getSourceReg(), MRI);
if (Merge) {
auto [RAL, RALSrc] =
tryMatch(Merge->getSourceReg(Idx), AMDGPU::G_AMDGPU_READANYLANE);
if (RAL)
return RALSrc;
}
}

return {};
}

void replaceRegWithOrBuildCopy(Register Dst, Register Src) {
if (Dst.isVirtual())
MRI.replaceRegWith(Dst, Src);
else
B.buildCopy(Dst, Src);
}

bool tryEliminateReadAnyLane(MachineInstr &Copy) {
Register Dst = Copy.getOperand(0).getReg();
Register Src = Copy.getOperand(1).getReg();
if (!Src.isVirtual())
return false;

Register RALDst = Src;
MachineInstr &SrcMI = *MRI.getVRegDef(Src);
if (SrcMI.getOpcode() == AMDGPU::G_BITCAST)
RALDst = SrcMI.getOperand(1).getReg();

Register RALSrc = getReadAnyLaneSrc(RALDst);
if (!RALSrc)
return false;

B.setInstr(Copy);
if (SrcMI.getOpcode() != AMDGPU::G_BITCAST) {
// Src = READANYLANE RALSrc Src = READANYLANE RALSrc
// Dst = Copy Src $Dst = Copy Src
// -> ->
// Dst = RALSrc $Dst = Copy RALSrc
replaceRegWithOrBuildCopy(Dst, RALSrc);
} else {
// RALDst = READANYLANE RALSrc RALDst = READANYLANE RALSrc
// Src = G_BITCAST RALDst Src = G_BITCAST RALDst
// Dst = Copy Src Dst = Copy Src
// -> ->
// NewVgpr = G_BITCAST RALDst NewVgpr = G_BITCAST RALDst
// Dst = NewVgpr $Dst = Copy NewVgpr
auto Bitcast = B.buildBitcast({VgprRB, MRI.getType(Src)}, RALSrc);
replaceRegWithOrBuildCopy(Dst, Bitcast.getReg(0));
}

eraseInstr(Copy, MRI, nullptr);
return true;
}

void tryCombineCopy(MachineInstr &MI) {
if (tryEliminateReadAnyLane(MI))
return;

Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
// Skip copies of physical registers.
Expand All @@ -160,24 +263,7 @@ class AMDGPURegBankLegalizeCombiner {
auto One = B.buildConstant({SgprRB, S32}, 1);
auto BoolSrc = B.buildAnd({SgprRB, S32}, TruncS32Src, One);
B.buildInstr(AMDGPU::G_AMDGPU_COPY_VCC_SCC, {Dst}, {BoolSrc});
cleanUpAfterCombine(MI, Trunc);
return;
}

// Src = G_AMDGPU_READANYLANE RALSrc
// Dst = COPY Src
// ->
// Dst = RALSrc
if (MRI.getRegBankOrNull(Dst) == VgprRB &&
MRI.getRegBankOrNull(Src) == SgprRB) {
auto [RAL, RALSrc] = tryMatch(Src, AMDGPU::G_AMDGPU_READANYLANE);
if (!RAL)
return;

assert(MRI.getRegBank(RALSrc) == VgprRB);
MRI.replaceRegWith(Dst, RALSrc);
cleanUpAfterCombine(MI, RAL);
return;
eraseInstr(MI, MRI, nullptr);
}
}

Expand Down
25 changes: 2 additions & 23 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/readanylane-combines.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ define amdgpu_ps float @readanylane_to_physical_vgpr(ptr addrspace(1) inreg %ptr
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dword v0, v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: ; return to shader part epilog
%load = load volatile float, ptr addrspace(1) %ptr
ret float %load
Expand All @@ -33,8 +31,6 @@ define amdgpu_ps void @readanylane_to_bitcast_to_virtual_vgpr(ptr addrspace(1) i
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, s0
; CHECK-NEXT: global_store_dword v0, v1, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <2 x i16>, ptr addrspace(1) %ptr0
Expand All @@ -49,8 +45,6 @@ define amdgpu_ps float @readanylane_to_bitcast_to_physical_vgpr(ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dword v0, v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: ; return to shader part epilog
%load = load volatile <2 x i16>, ptr addrspace(1) %ptr0
%bitcast = bitcast <2 x i16> %load to float
Expand All @@ -63,10 +57,6 @@ define amdgpu_ps void @unmerge_readanylane_merge_to_virtual_vgpr(ptr addrspace(1
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile i64, ptr addrspace(1) %ptr0
Expand All @@ -85,10 +75,6 @@ define amdgpu_ps void @unmerge_readanylane_merge_bitcast_to_virtual_vgpr(ptr add
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_readfirstlane_b32 s1, v1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <2 x i32>, ptr addrspace(1) %ptr0
Expand All @@ -109,9 +95,7 @@ define amdgpu_ps void @unmerge_readanylane_merge_extract_to_virtual_vgpr(ptr add
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v2, v0, s[2:3]
; CHECK-NEXT: global_store_dword v2, v1, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <2 x i32>, ptr addrspace(1) %ptr0
%extracted = extractelement <2 x i32> %load, i32 1
Expand All @@ -125,8 +109,7 @@ define amdgpu_ps float @unmerge_readanylane_merge_extract_to_physical_vgpr(ptr a
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v1
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: v_mov_b32_e32 v0, v1
; CHECK-NEXT: ; return to shader part epilog
%load = load volatile <2 x float>, ptr addrspace(1) %ptr0
%extracted = extractelement <2 x float> %load, i32 1
Expand All @@ -139,8 +122,6 @@ define amdgpu_ps void @unmerge_readanylane_merge_extract_bitcast_to_virtual_vgpr
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: global_store_dword v2, v0, s[2:3]
; CHECK-NEXT: s_endpgm
%load = load volatile <4 x i16>, ptr addrspace(1) %ptr0
Expand All @@ -156,8 +137,6 @@ define amdgpu_ps float @unmerge_readanylane_merge_extract_bitcast_to_physical_vg
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: global_load_dwordx2 v[0:1], v0, s[0:1] glc dlc
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: v_mov_b32_e32 v0, s0
; CHECK-NEXT: ; return to shader part epilog
%load = load volatile <4 x i16>, ptr addrspace(1) %ptr0
%extracted = shufflevector <4 x i16> %load, <4 x i16> %load, <2 x i32> <i32 0, i32 1>
Expand Down
Loading
Loading