|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
2 |
| -; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope --check-prefixes=SAMEC,CHECK %s |
3 |
| -; RUN: llc -global-isel -stop-after=irtranslator -attributor-assume-closed-world -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope --check-prefixes=SAMEC,CWRLD %s |
| 2 | +; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s |
4 | 3 |
|
5 | 4 | define amdgpu_kernel void @test_indirect_call_sgpr_ptr(ptr %fptr) {
|
6 |
| - ; SAMEC-LABEL: name: test_indirect_call_sgpr_ptr |
7 |
| - ; SAMEC: bb.1 (%ir-block.0): |
8 |
| - ; SAMEC-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9 |
9 |
| - ; SAMEC-NEXT: {{ $}} |
10 |
| - ; SAMEC-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2 |
11 |
| - ; SAMEC-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 |
12 |
| - ; SAMEC-NEXT: [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 |
13 |
| - ; SAMEC-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr14 |
14 |
| - ; SAMEC-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr13 |
15 |
| - ; SAMEC-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr12 |
16 |
| - ; SAMEC-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9 |
17 |
| - ; SAMEC-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 |
18 |
| - ; SAMEC-NEXT: [[COPY8:%[0-9]+]]:_(p4) = COPY $sgpr6_sgpr7 |
19 |
| - ; SAMEC-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr) |
20 |
| - ; SAMEC-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load (p0) from %ir.fptr.kernarg.offset1, align 16, addrspace 4) |
21 |
| - ; SAMEC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc |
22 |
| - ; SAMEC-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]] |
23 |
| - ; SAMEC-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF |
24 |
| - ; SAMEC-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4) |
25 |
| - ; SAMEC-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
26 |
| - ; SAMEC-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C]](s64) |
27 |
| - ; SAMEC-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY6]] |
28 |
| - ; SAMEC-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY5]] |
29 |
| - ; SAMEC-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]] |
30 |
| - ; SAMEC-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]] |
31 |
| - ; SAMEC-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF |
32 |
| - ; SAMEC-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) |
33 |
| - ; SAMEC-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) |
34 |
| - ; SAMEC-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 |
35 |
| - ; SAMEC-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY16]], [[C1]](s32) |
36 |
| - ; SAMEC-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY15]], [[SHL]] |
37 |
| - ; SAMEC-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) |
38 |
| - ; SAMEC-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 |
39 |
| - ; SAMEC-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY17]], [[C2]](s32) |
40 |
| - ; SAMEC-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] |
41 |
| - ; SAMEC-NEXT: [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg |
42 |
| - ; SAMEC-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>) |
43 |
| - ; SAMEC-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4) |
44 |
| - ; SAMEC-NEXT: $sgpr6_sgpr7 = COPY [[DEF]](p4) |
45 |
| - ; SAMEC-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4) |
46 |
| - ; SAMEC-NEXT: $sgpr10_sgpr11 = COPY [[COPY11]](s64) |
47 |
| - ; SAMEC-NEXT: $sgpr12 = COPY [[COPY12]](s32) |
48 |
| - ; SAMEC-NEXT: $sgpr13 = COPY [[COPY13]](s32) |
49 |
| - ; SAMEC-NEXT: $sgpr14 = COPY [[COPY14]](s32) |
50 |
| - ; SAMEC-NEXT: $sgpr15 = COPY [[DEF1]](s32) |
51 |
| - ; SAMEC-NEXT: $vgpr31 = COPY [[OR1]](s32) |
52 |
| - ; SAMEC-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[LOAD]](p0), 0, csr_amdgpu, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31 |
53 |
| - ; SAMEC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc |
54 |
| - ; SAMEC-NEXT: S_ENDPGM 0 |
| 5 | + ; CHECK-LABEL: name: test_indirect_call_sgpr_ptr |
| 6 | + ; CHECK: bb.1 (%ir-block.0): |
| 7 | + ; CHECK-NEXT: liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9 |
| 8 | + ; CHECK-NEXT: {{ $}} |
| 9 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2 |
| 10 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 |
| 11 | + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 |
| 12 | + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr14 |
| 13 | + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr13 |
| 14 | + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr12 |
| 15 | + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9 |
| 16 | + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 |
| 17 | + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(p4) = COPY $sgpr6_sgpr7 |
| 18 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr) |
| 19 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load (p0) from %ir.fptr.kernarg.offset1, align 16, addrspace 4) |
| 20 | + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc |
| 21 | + ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]] |
| 22 | + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF |
| 23 | + ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]](p4) |
| 24 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| 25 | + ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY10]], [[C]](s64) |
| 26 | + ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY6]] |
| 27 | + ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY5]] |
| 28 | + ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY4]] |
| 29 | + ; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]] |
| 30 | + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF |
| 31 | + ; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) |
| 32 | + ; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) |
| 33 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 |
| 34 | + ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY16]], [[C1]](s32) |
| 35 | + ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY15]], [[SHL]] |
| 36 | + ; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) |
| 37 | + ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 |
| 38 | + ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY17]], [[C2]](s32) |
| 39 | + ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] |
| 40 | + ; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg |
| 41 | + ; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>) |
| 42 | + ; CHECK-NEXT: $sgpr4_sgpr5 = COPY [[COPY9]](p4) |
| 43 | + ; CHECK-NEXT: $sgpr6_sgpr7 = COPY [[DEF]](p4) |
| 44 | + ; CHECK-NEXT: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4) |
| 45 | + ; CHECK-NEXT: $sgpr10_sgpr11 = COPY [[COPY11]](s64) |
| 46 | + ; CHECK-NEXT: $sgpr12 = COPY [[COPY12]](s32) |
| 47 | + ; CHECK-NEXT: $sgpr13 = COPY [[COPY13]](s32) |
| 48 | + ; CHECK-NEXT: $sgpr14 = COPY [[COPY14]](s32) |
| 49 | + ; CHECK-NEXT: $sgpr15 = COPY [[DEF1]](s32) |
| 50 | + ; CHECK-NEXT: $vgpr31 = COPY [[OR1]](s32) |
| 51 | + ; CHECK-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[LOAD]](p0), 0, csr_amdgpu, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $vgpr31 |
| 52 | + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc |
| 53 | + ; CHECK-NEXT: S_ENDPGM 0 |
55 | 54 | call void %fptr()
|
56 | 55 | ret void
|
57 | 56 | }
|
58 | 57 |
|
59 | 58 | define amdgpu_gfx void @test_gfx_indirect_call_sgpr_ptr(ptr %fptr) {
|
60 |
| - ; SAMEC-LABEL: name: test_gfx_indirect_call_sgpr_ptr |
61 |
| - ; SAMEC: bb.1 (%ir-block.0): |
62 |
| - ; SAMEC-NEXT: liveins: $vgpr0, $vgpr1 |
63 |
| - ; SAMEC-NEXT: {{ $}} |
64 |
| - ; SAMEC-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
65 |
| - ; SAMEC-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
66 |
| - ; SAMEC-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
67 |
| - ; SAMEC-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc |
68 |
| - ; SAMEC-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 |
69 |
| - ; SAMEC-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>) |
70 |
| - ; SAMEC-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[MV]](p0), 0, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3 |
71 |
| - ; SAMEC-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc |
72 |
| - ; SAMEC-NEXT: SI_RETURN |
| 59 | + ; CHECK-LABEL: name: test_gfx_indirect_call_sgpr_ptr |
| 60 | + ; CHECK: bb.1 (%ir-block.0): |
| 61 | + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1 |
| 62 | + ; CHECK-NEXT: {{ $}} |
| 63 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 |
| 64 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 |
| 65 | + ; CHECK-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| 66 | + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc |
| 67 | + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3 |
| 68 | + ; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>) |
| 69 | + ; CHECK-NEXT: $sgpr30_sgpr31 = noconvergent G_SI_CALL [[MV]](p0), 0, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3 |
| 70 | + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc |
| 71 | + ; CHECK-NEXT: SI_RETURN |
73 | 72 | call amdgpu_gfx void %fptr()
|
74 | 73 | ret void
|
75 | 74 | }
|
|
0 commit comments