Skip to content
This repository was archived by the owner on Apr 23, 2020. It is now read-only.

Commit db070bb

Browse files
committed
[AMDGPU] Increased vector length for global/constant loads.
Summary: GCN ISA supports instructions that can read 16 consecutive dwords from memory through the scalar data cache; loadstoreVectorizer should take advantage of the wider vector length and pack 16/8 elements of dwords/quadwords. Author: FarhanaAleen Reviewed By: rampitec Subscribers: llvm-commits, AMDGPU Differential Revision: https://reviews.llvm.org/D43275 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@325518 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 7c93b6c commit db070bb

File tree

5 files changed

+105
-3
lines changed

5 files changed

+105
-3
lines changed

lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,12 +233,38 @@ unsigned AMDGPUTTIImpl::getMinVectorRegisterBitWidth() const {
233233
return 32;
234234
}
235235

236+
unsigned AMDGPUTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
237+
unsigned ChainSizeInBytes,
238+
VectorType *VecTy) const {
239+
unsigned VecRegBitWidth = VF * LoadSize;
240+
if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
241+
// TODO: Support element-size less than 32bit?
242+
return 128 / LoadSize;
243+
244+
return VF;
245+
}
246+
247+
unsigned AMDGPUTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
248+
unsigned ChainSizeInBytes,
249+
VectorType *VecTy) const {
250+
unsigned VecRegBitWidth = VF * StoreSize;
251+
if (VecRegBitWidth > 128)
252+
return 128 / StoreSize;
253+
254+
return VF;
255+
}
256+
236257
unsigned AMDGPUTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
237258
AMDGPUAS AS = ST->getAMDGPUAS();
238259
if (AddrSpace == AS.GLOBAL_ADDRESS ||
239260
AddrSpace == AS.CONSTANT_ADDRESS ||
240-
AddrSpace == AS.CONSTANT_ADDRESS_32BIT ||
241-
AddrSpace == AS.FLAT_ADDRESS)
261+
AddrSpace == AS.CONSTANT_ADDRESS_32BIT) {
262+
if (ST->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
263+
return 128;
264+
return 512;
265+
}
266+
267+
if (AddrSpace == AS.FLAT_ADDRESS)
242268
return 128;
243269
if (AddrSpace == AS.LOCAL_ADDRESS ||
244270
AddrSpace == AS.REGION_ADDRESS)

lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,12 @@ class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
118118
unsigned getNumberOfRegisters(bool Vector) const;
119119
unsigned getRegisterBitWidth(bool Vector) const;
120120
unsigned getMinVectorRegisterBitWidth() const;
121+
unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
122+
unsigned ChainSizeInBytes,
123+
VectorType *VecTy) const;
124+
unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
125+
unsigned ChainSizeInBytes,
126+
VectorType *VecTy) const;
121127
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
122128

123129
bool isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
2+
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
3+
4+
; Tests whether a load chain of 8 constants gets vectorized into a wider load.
5+
; FUNC-LABEL: {{^}}constant_load_v8f32:
6+
; GCN: s_load_dwordx8
7+
; EG: VTX_READ_128
8+
; EG: VTX_READ_128
9+
define amdgpu_kernel void @constant_load_v8f32(float addrspace(4)* noalias nocapture readonly %weights, float addrspace(1)* noalias nocapture %out_ptr) {
10+
entry:
11+
%out_ptr.promoted = load float, float addrspace(1)* %out_ptr, align 4
12+
%tmp = load float, float addrspace(4)* %weights, align 4
13+
%add = fadd float %tmp, %out_ptr.promoted
14+
%arrayidx.1 = getelementptr inbounds float, float addrspace(4)* %weights, i64 1
15+
%tmp1 = load float, float addrspace(4)* %arrayidx.1, align 4
16+
%add.1 = fadd float %tmp1, %add
17+
%arrayidx.2 = getelementptr inbounds float, float addrspace(4)* %weights, i64 2
18+
%tmp2 = load float, float addrspace(4)* %arrayidx.2, align 4
19+
%add.2 = fadd float %tmp2, %add.1
20+
%arrayidx.3 = getelementptr inbounds float, float addrspace(4)* %weights, i64 3
21+
%tmp3 = load float, float addrspace(4)* %arrayidx.3, align 4
22+
%add.3 = fadd float %tmp3, %add.2
23+
%arrayidx.4 = getelementptr inbounds float, float addrspace(4)* %weights, i64 4
24+
%tmp4 = load float, float addrspace(4)* %arrayidx.4, align 4
25+
%add.4 = fadd float %tmp4, %add.3
26+
%arrayidx.5 = getelementptr inbounds float, float addrspace(4)* %weights, i64 5
27+
%tmp5 = load float, float addrspace(4)* %arrayidx.5, align 4
28+
%add.5 = fadd float %tmp5, %add.4
29+
%arrayidx.6 = getelementptr inbounds float, float addrspace(4)* %weights, i64 6
30+
%tmp6 = load float, float addrspace(4)* %arrayidx.6, align 4
31+
%add.6 = fadd float %tmp6, %add.5
32+
%arrayidx.7 = getelementptr inbounds float, float addrspace(4)* %weights, i64 7
33+
%tmp7 = load float, float addrspace(4)* %arrayidx.7, align 4
34+
%add.7 = fadd float %tmp7, %add.6
35+
store float %add.7, float addrspace(1)* %out_ptr, align 4
36+
ret void
37+
}

test/CodeGen/AMDGPU/load-constant-f64.ll

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,3 +13,36 @@ define amdgpu_kernel void @constant_load_f64(double addrspace(1)* %out, double a
1313
}
1414

1515
attributes #0 = { nounwind }
16+
17+
; Tests whether a load-chain of 8 constants of 64bit each gets vectorized into a wider load.
18+
; FUNC-LABEL: {{^}}constant_load_2v4f64:
19+
; GCN: s_load_dwordx16
20+
define amdgpu_kernel void @constant_load_2v4f64(double addrspace(4)* noalias nocapture readonly %weights, double addrspace(1)* noalias nocapture %out_ptr) {
21+
entry:
22+
%out_ptr.promoted = load double, double addrspace(1)* %out_ptr, align 4
23+
%tmp = load double, double addrspace(4)* %weights, align 4
24+
%add = fadd double %tmp, %out_ptr.promoted
25+
%arrayidx.1 = getelementptr inbounds double, double addrspace(4)* %weights, i64 1
26+
%tmp1 = load double, double addrspace(4)* %arrayidx.1, align 4
27+
%add.1 = fadd double %tmp1, %add
28+
%arrayidx.2 = getelementptr inbounds double, double addrspace(4)* %weights, i64 2
29+
%tmp2 = load double, double addrspace(4)* %arrayidx.2, align 4
30+
%add.2 = fadd double %tmp2, %add.1
31+
%arrayidx.3 = getelementptr inbounds double, double addrspace(4)* %weights, i64 3
32+
%tmp3 = load double, double addrspace(4)* %arrayidx.3, align 4
33+
%add.3 = fadd double %tmp3, %add.2
34+
%arrayidx.4 = getelementptr inbounds double, double addrspace(4)* %weights, i64 4
35+
%tmp4 = load double, double addrspace(4)* %arrayidx.4, align 4
36+
%add.4 = fadd double %tmp4, %add.3
37+
%arrayidx.5 = getelementptr inbounds double, double addrspace(4)* %weights, i64 5
38+
%tmp5 = load double, double addrspace(4)* %arrayidx.5, align 4
39+
%add.5 = fadd double %tmp5, %add.4
40+
%arrayidx.6 = getelementptr inbounds double, double addrspace(4)* %weights, i64 6
41+
%tmp6 = load double, double addrspace(4)* %arrayidx.6, align 4
42+
%add.6 = fadd double %tmp6, %add.5
43+
%arrayidx.7 = getelementptr inbounds double, double addrspace(4)* %weights, i64 7
44+
%tmp7 = load double, double addrspace(4)* %arrayidx.7, align 4
45+
%add.7 = fadd double %tmp7, %add.6
46+
store double %add.7, double addrspace(1)* %out_ptr, align 4
47+
ret void
48+
}

test/CodeGen/AMDGPU/waitcnt-looptest.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s
1+
; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global -amdgpu-load-store-vectorizer=0 | FileCheck --check-prefix=GCN %s
22

33
; Check that the waitcnt insertion algorithm correctly propagates wait counts
44
; from before a loop to the loop header.

0 commit comments

Comments
 (0)