Skip to content

[Attributor] Skip AS specialization for volatile memory instructions #107250

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 37 additions & 19 deletions llvm/lib/Transforms/IPO/AttributorAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12492,6 +12492,33 @@ struct AAIndirectCallInfoCallSite : public AAIndirectCallInfo {

/// ------------------------ Address Space ------------------------------------
namespace {

template <typename InstType>
static bool makeChange(Attributor &A, InstType *MemInst, const Use &U,
Value *OriginalValue, PointerType *NewPtrTy,
bool UseOriginalValue) {
if (U.getOperandNo() != InstType::getPointerOperandIndex())
return false;

if (MemInst->isVolatile()) {
auto *TTI = A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(
*MemInst->getFunction());
unsigned NewAS = NewPtrTy->getPointerAddressSpace();
if (!TTI || !TTI->hasVolatileVariant(MemInst, NewAS))
return false;
}

if (UseOriginalValue) {
A.changeUseAfterManifest(const_cast<Use &>(U), *OriginalValue);
return true;
}

Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy);
CastInst->insertBefore(MemInst);
A.changeUseAfterManifest(const_cast<Use &>(U), *CastInst);
return true;
}

struct AAAddressSpaceImpl : public AAAddressSpace {
AAAddressSpaceImpl(const IRPosition &IRP, Attributor &A)
: AAAddressSpace(IRP, A) {}
Expand Down Expand Up @@ -12535,25 +12562,15 @@ struct AAAddressSpaceImpl : public AAAddressSpace {
getAssociatedType()->getPointerAddressSpace())
return ChangeStatus::UNCHANGED;

Type *NewPtrTy = PointerType::get(getAssociatedType()->getContext(),
static_cast<uint32_t>(getAddressSpace()));
PointerType *NewPtrTy =
PointerType::get(getAssociatedType()->getContext(),
static_cast<uint32_t>(getAddressSpace()));
bool UseOriginalValue =
OriginalValue->getType()->getPointerAddressSpace() ==
static_cast<uint32_t>(getAddressSpace());

bool Changed = false;

auto MakeChange = [&](Instruction *I, Use &U) {
Changed = true;
if (UseOriginalValue) {
A.changeUseAfterManifest(U, *OriginalValue);
return;
}
Instruction *CastInst = new AddrSpaceCastInst(OriginalValue, NewPtrTy);
CastInst->insertBefore(cast<Instruction>(I));
A.changeUseAfterManifest(U, *CastInst);
};

auto Pred = [&](const Use &U, bool &) {
if (U.get() != AssociatedValue)
return true;
Expand All @@ -12564,12 +12581,13 @@ struct AAAddressSpaceImpl : public AAAddressSpace {
// CGSCC if the AA is run on CGSCC instead of the entire module.
if (!A.isRunOn(Inst->getFunction()))
return true;
if (isa<LoadInst>(Inst))
MakeChange(Inst, const_cast<Use &>(U));
if (isa<StoreInst>(Inst)) {
// We only make changes if the use is the pointer operand.
if (U.getOperandNo() == 1)
MakeChange(Inst, const_cast<Use &>(U));
if (auto *LI = dyn_cast<LoadInst>(Inst)) {
Changed |=
makeChange(A, LI, U, OriginalValue, NewPtrTy, UseOriginalValue);
}
if (auto *SI = dyn_cast<StoreInst>(Inst)) {
Changed |=
makeChange(A, SI, U, OriginalValue, NewPtrTy, UseOriginalValue);
}
return true;
};
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@ target triple = "amdgcn-amd-amdhsa"
; Make sure flat_scratch_init is set

; GCN-LABEL: {{^}}stack_object_addrspacecast_in_kernel_no_calls:
; RW-FLAT: s_add_u32 s0, s0, s7
; RW-FLAT: s_addc_u32 s1, s1, 0
; RW-FLAT: s_add_u32 flat_scratch_lo, s4, s7
; RW-FLAT: s_addc_u32 flat_scratch_hi, s5, 0
; RO-FLAT-NOT: flat_scratch
; RW-FLAT: buffer_store_dword
; RO-FLAT: scratch_store_dword
; GCN: flat_store_dword
; RO-FLAT-NOT: .amdhsa_user_sgpr_private_segment_buffer
; RW-FLAT: .amdhsa_user_sgpr_flat_scratch_init 1
; RO-FLAT-NOT: .amdhsa_user_sgpr_flat_scratch_init
Expand Down
88 changes: 88 additions & 0 deletions llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor -S %s -o - | FileCheck %s

@g1 = protected addrspace(1) externally_initialized global i32 0, align 4

define internal void @volatile_load_store_as_0(ptr %p) {
; CHECK-LABEL: define internal void @volatile_load_store_as_0(
; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[VAL_0:%.*]] = load i32, ptr addrspace(1) @g1, align 4
; CHECK-NEXT: [[VAL_1:%.*]] = load volatile i32, ptr [[P]], align 4
; CHECK-NEXT: store i32 [[VAL_1]], ptr addrspace(1) @g1, align 4
; CHECK-NEXT: store volatile i32 [[VAL_0]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
%val.0 = load i32, ptr addrspace(1) @g1, align 4
%val.1 = load volatile i32, ptr %p, align 4
store i32 %val.1, ptr addrspace(1) @g1, align 4
store volatile i32 %val.0, ptr %p, align 4
ret void
}

define void @call_volatile_load_store_as_0(ptr %p1, ptr %p2) {
; CHECK-LABEL: define void @call_volatile_load_store_as_0(
; CHECK-SAME: ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: call void @volatile_load_store_as_0(ptr [[P1]])
; CHECK-NEXT: call void @volatile_load_store_as_0(ptr [[P2]])
; CHECK-NEXT: ret void
;
call void @volatile_load_store_as_0(ptr %p1)
call void @volatile_load_store_as_0(ptr %p2)
ret void
}

define internal void @volatile_load_store_as_1(ptr %p) {
; CHECK-LABEL: define internal void @volatile_load_store_as_1(
; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[VAL_0:%.*]] = load i32, ptr addrspace(1) @g1, align 4
; CHECK-NEXT: [[VAL_1:%.*]] = load volatile i32, ptr [[P]], align 4
; CHECK-NEXT: store i32 [[VAL_1]], ptr addrspace(1) @g1, align 4
; CHECK-NEXT: store volatile i32 [[VAL_0]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
%val.0 = load i32, ptr addrspace(1) @g1, align 4
%val.1 = load volatile i32, ptr %p, align 4
store i32 %val.1, ptr addrspace(1) @g1, align 4
store volatile i32 %val.0, ptr %p, align 4
ret void
}

define void @call_volatile_load_store_as_1(ptr addrspace(1) %p1, ptr addrspace(1) %p2) {
; CHECK-LABEL: define void @call_volatile_load_store_as_1(
; CHECK-SAME: ptr addrspace(1) [[P1:%.*]], ptr addrspace(1) [[P2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[P1_CAST:%.*]] = addrspacecast ptr addrspace(1) [[P1]] to ptr
; CHECK-NEXT: [[P2_CAST:%.*]] = addrspacecast ptr addrspace(1) [[P2]] to ptr
; CHECK-NEXT: call void @volatile_load_store_as_1(ptr [[P1_CAST]])
; CHECK-NEXT: call void @volatile_load_store_as_1(ptr [[P2_CAST]])
; CHECK-NEXT: ret void
;
%p1.cast = addrspacecast ptr addrspace(1) %p1 to ptr
%p2.cast = addrspacecast ptr addrspace(1) %p2 to ptr
call void @volatile_load_store_as_1(ptr %p1.cast)
call void @volatile_load_store_as_1(ptr %p2.cast)
ret void
}

define internal void @volatile_load_store_as_4(ptr %p) {
%val.0 = load i32, ptr addrspace(1) @g1, align 4
%val.1 = load volatile i32, ptr %p, align 4
store i32 %val.1, ptr addrspace(1) @g1, align 4
store volatile i32 %val.0, ptr %p, align 4
ret void
}

define void @call_volatile_load_store_as_4(ptr addrspace(4) %p1, ptr addrspace(4) %p2) {
; CHECK-LABEL: define void @call_volatile_load_store_as_4(
; CHECK-SAME: ptr addrspace(4) [[P1:%.*]], ptr addrspace(4) [[P2:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[P1_CAST:%.*]] = addrspacecast ptr addrspace(4) [[P1]] to ptr
; CHECK-NEXT: [[P2_CAST:%.*]] = addrspacecast ptr addrspace(4) [[P2]] to ptr
; CHECK-NEXT: call void @volatile_load_store_as_1(ptr [[P1_CAST]])
; CHECK-NEXT: call void @volatile_load_store_as_1(ptr [[P2_CAST]])
; CHECK-NEXT: ret void
;
%p1.cast = addrspacecast ptr addrspace(4) %p1 to ptr
%p2.cast = addrspacecast ptr addrspace(4) %p2 to ptr
call void @volatile_load_store_as_1(ptr %p1.cast)
call void @volatile_load_store_as_1(ptr %p2.cast)
ret void
}
120 changes: 73 additions & 47 deletions llvm/test/CodeGen/AMDGPU/addrspacecast.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,22 @@ target triple = "amdgcn-amd-amdhsa"

; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know why this test is running amdgpu-attributor but it probably shouldn't be? This ought to just be testing the codegen of addrspacecast, optimizing out cases just makes it more confusing

Anyway this is unbreaking the test since the cast now remains


; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x0{{$}}
; GFX9-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x0{{$}}
; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], [[APERTURE]]
; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0

; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base

; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA-DAG: ds_write_b32 [[PTR]], [[K]]
; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}

; GFX9: s_cmp_lg_u32 [[PTR]], -1
; GFX9-DAG: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
; GFX9-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0

; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]

; HSA: .amdhsa_user_sgpr_private_segment_buffer 1
; HSA: .amdhsa_user_sgpr_dispatch_ptr 0
Expand All @@ -28,8 +39,22 @@ define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr

; Test handling inside a non-kernel
; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast_func:
; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x10{{$}}
; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
; CI-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0

; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base

; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA-DAG: ds_write_b32 v0, [[K]]

; GFX9-DAG: v_mov_b32_e32 v[[VREG_HIBASE:[0-9]+]], s[[HIBASE]]
; GFX9-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0, vcc
; GFX9-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, v[[VREG_HIBASE]], vcc

; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {
%stof = addrspacecast ptr addrspace(3) %ptr to ptr
store volatile i32 7, ptr %stof
Expand All @@ -38,16 +63,23 @@ define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {

; HSA-LABEL: {{^}}use_private_to_flat_addrspacecast:

; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x0{{$}}
; GFX9-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x0{{$}}
; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], [[APERTURE]]
; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
; SI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s9
; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; HSA: buffer_store_dword [[K]], [[PTR]], s[[[BASELO]]:[[RSRCHI]]], 0 offen
; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}

; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0

; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_private_base

; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; GFX9: s_cmp_lg_u32 [[PTR]], -1
; GFX9: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
; GFX9: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0

; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]

; HSA: .amdhsa_user_sgpr_private_segment_buffer 1
; HSA: .amdhsa_user_sgpr_dispatch_ptr 0
Expand All @@ -65,12 +97,10 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %p
; HSA-LABEL: {{^}}use_global_to_flat_addrspacecast:

; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
; CI-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
; CI-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
; CI: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
; GFX9-DAG: v_mov_b32_e32 [[ADDR:v[0-9]+]], 0
; GFX9: global_store_dword [[ADDR]], [[K]], s[[[PTRLO]]:[[PTRHI]]]
; HSA: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]

; HSA: .amdhsa_user_sgpr_queue_ptr 0
define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #0 {
Expand All @@ -82,7 +112,9 @@ define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %pt
; no-op
; HSA-LABEL: {{^}}use_constant_to_flat_addrspacecast:
; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
; HSA-DAG: s_load_dword s0, s[[[PTRLO]]:[[PTRHI]]], 0x0
; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
; HSA: flat_load_dword v{{[0-9]+}}, v[[[VPTRLO]]:[[VPTRHI]]]
define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #0 {
%stof = addrspacecast ptr addrspace(4) %ptr to ptr
%ld = load volatile i32, ptr %stof
Expand Down Expand Up @@ -183,9 +215,11 @@ define amdgpu_kernel void @use_flat_to_constant_addrspacecast(ptr %ptr) #0 {
}

; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast:

; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA: ds_write_b32 v[[LO]], v[[K]]
; HSA: flat_store_dword v[[[LO]]:[[HI]]], v[[K]]
define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(3) null to ptr
store volatile i32 7, ptr %cast
Expand All @@ -203,9 +237,10 @@ define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 {
}

; HSA-LABEL: {{^}}cast_neg1_group_to_flat_addrspacecast:
; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], -1
; HSA: ds_write_b32 v[[LO]], v[[K]]
; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr
store volatile i32 7, ptr %cast
Expand All @@ -224,13 +259,10 @@ define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 {

; FIXME: Shouldn't need to enable queue ptr
; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast:
; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
; CI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s5
; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA: buffer_store_dword v[[K]], off, s[[[BASELO]]:[[RSRCHI]]], 0
; HSA: flat_store_dword v[[[LO]]:[[HI]]], v[[K]]
define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 {
%cast = addrspacecast ptr addrspace(5) null to ptr
store volatile i32 7, ptr %cast
Expand All @@ -249,14 +281,10 @@ define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 {

; HSA-LABEL: {{^}}cast_neg1_private_to_flat_addrspacecast:

; HSA-DAG: s_mov_b64 s[{{[0-9]+}}:[[RSRCHI:[0-9]+]]], s[2:3]
; HSA-DAG: s_mov_b64 s[[[BASELO:[0-9]+]]:[[BASEHI:[0-9]+]]], s[0:1]
; CI-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s7
; GFX9-DAG: s_add_u32 s[[BASELO]], s[[BASELO]], s5
; HSA-DAG: s_addc_u32 s[[BASEHI]], s[[BASEHI]], 0
; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
; HSA: buffer_store_dword v[[K]], [[PTR]], s[[[BASELO]]:[[RSRCHI]]], 0 offen
; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]

; CI: .amdhsa_user_sgpr_queue_ptr 1
; GFX9: .amdhsa_user_sgpr_queue_ptr 0
Expand Down Expand Up @@ -306,18 +334,16 @@ end:

; Check for prologue initializing special SGPRs pointing to scratch.
; HSA-LABEL: {{^}}store_flat_scratch:
; CI-DAG: s_mov_b32 flat_scratch_lo, s9
; CI-DAG: s_add_i32 [[ADD:s[0-9]+]], s8, s11
; CI-DAG: s_lshr_b32 flat_scratch_hi, [[ADD]], 8
; HSA: buffer_store_dword
; HSA: s_barrier
; HSA: buffer_load_dword [[K:v[0-9]+]], v{{[0-9]+}}, s[0:3], 0 offen glc
; HSA-DAG: s_load_dwordx2
; CI-DAG: s_mov_b32 flat_scratch_lo, s9
; CI-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], s4
; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], s5
; GFX9-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], 0
; CI: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
; GFX9: global_store_dword [[PTR]], [[K]]

; GFX9: s_add_u32 flat_scratch_lo, s6, s9
; GFX9: s_addc_u32 flat_scratch_hi, s7, 0

; HSA: {{flat|global}}_store_dword
; HSA: s_barrier
; HSA: {{flat|global}}_load_dword
define amdgpu_kernel void @store_flat_scratch(ptr addrspace(1) noalias %out, i32) #0 {
%alloca = alloca i32, i32 9, align 4, addrspace(5)
%x = call i32 @llvm.amdgcn.workitem.id.x() #2
Expand Down
Loading
Loading