Skip to content

[Attributor] Add support for atomic operations in AAAddressSpace #106927

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 6, 2024

Conversation

shiltian
Copy link
Contributor

@shiltian shiltian commented Sep 1, 2024

No description provided.

@shiltian shiltian marked this pull request as ready for review September 1, 2024 22:53
Copy link
Contributor Author

shiltian commented Sep 1, 2024

This stack of pull requests is managed by Graphite. Learn more about stacking.

Join @shiltian and the rest of your teammates on Graphite Graphite

@llvmbot
Copy link
Member

llvmbot commented Sep 1, 2024

@llvm/pr-subscribers-llvm-transforms

Author: Shilei Tian (shiltian)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/106927.diff

3 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp (+7-2)
  • (modified) llvm/lib/Transforms/IPO/AttributorAttributes.cpp (+2)
  • (added) llvm/test/CodeGen/AMDGPU/aa-as-infer.ll (+164)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index 72049f0aa6b86e..97d438d9703c23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -1084,10 +1084,15 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
       if (auto *LI = dyn_cast<LoadInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*LI->getPointerOperand()));
-      }
-      if (auto *SI = dyn_cast<StoreInst>(&I)) {
+      } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*SI->getPointerOperand()));
+      } else if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*AI->getPointerOperand()));
+      } else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*CmpX->getPointerOperand()));
       }
     }
   }
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 69d29b6c042349..cf23bd57b5a670 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -12588,6 +12588,8 @@ struct AAAddressSpaceImpl : public AAAddressSpace {
         if (U.getOperandNo() == 1)
           MakeChange(Inst, const_cast<Use &>(U));
       }
+      if (isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst))
+        MakeChange(Inst, const_cast<Use &>(U));
       return true;
     };
 
diff --git a/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
new file mode 100644
index 00000000000000..f244011863eec7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor -S %s -o - | FileCheck %s
+
+@g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+@g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+
+define internal void @can_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr addrspace(1) [[TMP1]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr addrspace(1) [[TMP2]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr addrspace(1) [[TMP3]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr addrspace(1) [[TMP4]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr addrspace(1) [[TMP7]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr addrspace(1) [[TMP8]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr [[WORD]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr addrspace(1) [[TMP1]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr addrspace(1) [[TMP2]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr addrspace(1) [[TMP3]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr addrspace(1) [[TMP4]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr addrspace(1) [[TMP5]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr addrspace(1) [[TMP6]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr addrspace(1) [[TMP7]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr addrspace(1) [[TMP8]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr addrspace(1) [[TMP9]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP10]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr addrspace(1) [[TMP11]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr [[WORD]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr [[WORD]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr [[WORD]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr [[WORD]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr [[WORD]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr [[WORD]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr [[WORD]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr [[WORD]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr [[WORD]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define void @foo(ptr addrspace(3) %val) {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: ptr addrspace(3) [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:    [[VAL_CAST:%.*]] = addrspacecast ptr addrspace(3) [[VAL]] to ptr
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr [[VAL_CAST]])
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr [[VAL_CAST]])
+; CHECK-NEXT:    ret void
+;
+  %g1.cast = addrspacecast ptr addrspace(1) @g1 to ptr
+  %g2.cast = addrspacecast ptr addrspace(1) @g2 to ptr
+  %val.cast = addrspacecast ptr addrspace(3) %val to ptr
+  call void @can_infer_cmpxchg(ptr %g1.cast)
+  call void @can_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %g1.cast)
+  call void @can_not_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %val.cast)
+  call void @can_infer_atomicrmw(ptr %g1.cast)
+  call void @can_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %g1.cast)
+  call void @can_not_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %val.cast)
+  ret void
+}

@llvmbot
Copy link
Member

llvmbot commented Sep 1, 2024

@llvm/pr-subscribers-backend-amdgpu

Author: Shilei Tian (shiltian)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/106927.diff

3 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp (+7-2)
  • (modified) llvm/lib/Transforms/IPO/AttributorAttributes.cpp (+2)
  • (added) llvm/test/CodeGen/AMDGPU/aa-as-infer.ll (+164)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index 72049f0aa6b86e..97d438d9703c23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -1084,10 +1084,15 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
       if (auto *LI = dyn_cast<LoadInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*LI->getPointerOperand()));
-      }
-      if (auto *SI = dyn_cast<StoreInst>(&I)) {
+      } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*SI->getPointerOperand()));
+      } else if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*AI->getPointerOperand()));
+      } else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*CmpX->getPointerOperand()));
       }
     }
   }
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 69d29b6c042349..cf23bd57b5a670 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -12588,6 +12588,8 @@ struct AAAddressSpaceImpl : public AAAddressSpace {
         if (U.getOperandNo() == 1)
           MakeChange(Inst, const_cast<Use &>(U));
       }
+      if (isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst))
+        MakeChange(Inst, const_cast<Use &>(U));
       return true;
     };
 
diff --git a/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
new file mode 100644
index 00000000000000..f244011863eec7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor -S %s -o - | FileCheck %s
+
+@g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+@g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+
+define internal void @can_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr addrspace(1) [[TMP1]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr addrspace(1) [[TMP2]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr addrspace(1) [[TMP3]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr addrspace(1) [[TMP4]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr addrspace(1) [[TMP7]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr addrspace(1) [[TMP8]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr [[WORD]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr addrspace(1) [[TMP1]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr addrspace(1) [[TMP2]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr addrspace(1) [[TMP3]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr addrspace(1) [[TMP4]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr addrspace(1) [[TMP5]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr addrspace(1) [[TMP6]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr addrspace(1) [[TMP7]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr addrspace(1) [[TMP8]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr addrspace(1) [[TMP9]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP10]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr addrspace(1) [[TMP11]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr [[WORD]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr [[WORD]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr [[WORD]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr [[WORD]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr [[WORD]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr [[WORD]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr [[WORD]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr [[WORD]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr [[WORD]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define void @foo(ptr addrspace(3) %val) {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: ptr addrspace(3) [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:    [[VAL_CAST:%.*]] = addrspacecast ptr addrspace(3) [[VAL]] to ptr
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr [[VAL_CAST]])
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr [[VAL_CAST]])
+; CHECK-NEXT:    ret void
+;
+  %g1.cast = addrspacecast ptr addrspace(1) @g1 to ptr
+  %g2.cast = addrspacecast ptr addrspace(1) @g2 to ptr
+  %val.cast = addrspacecast ptr addrspace(3) %val to ptr
+  call void @can_infer_cmpxchg(ptr %g1.cast)
+  call void @can_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %g1.cast)
+  call void @can_not_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %val.cast)
+  call void @can_infer_atomicrmw(ptr %g1.cast)
+  call void @can_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %g1.cast)
+  call void @can_not_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %val.cast)
+  ret void
+}

@shiltian shiltian force-pushed the users/shiltian/as-atomic branch 2 times, most recently from 1501ad6 to c9e48a9 Compare September 1, 2024 22:58
@shiltian shiltian requested a review from ssahasra September 4, 2024 03:39
@shiltian shiltian force-pushed the users/shiltian/as-atomic branch 2 times, most recently from 66b49c7 to 63b8051 Compare September 6, 2024 15:31
; CHECK-NEXT: [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
; CHECK-NEXT: [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
; CHECK-NEXT: [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
; CHECK-NEXT: [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not ideal this ends up creating the same addrspacecast for every instance

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, need a follow-up patch to optimize this

@shiltian shiltian force-pushed the users/shiltian/as-atomic branch from 63b8051 to 78ede9f Compare September 6, 2024 16:45
@shiltian shiltian merged commit ce2e386 into main Sep 6, 2024
5 of 7 checks passed
@shiltian shiltian deleted the users/shiltian/as-atomic branch September 6, 2024 16:45
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants