@@ -35,11 +35,11 @@ _CLC_OVERLOAD _CLC_DECL void __spirv_MemoryBarrier(unsigned int, unsigned int);
35
35
}
36
36
37
37
#define __CLC_NVVM_ATOMIC_STORE_IMPL ( \
38
- TYPE , TYPE_MANGLED , TYPE_NV , TYPE_MANGLED_NV , ADDR_SPACE , \
38
+ TYPE , TYPE_MANGLED , SUBSTITUTION , TYPE_NV , TYPE_MANGLED_NV , ADDR_SPACE , \
39
39
POINTER_AND_ADDR_SPACE_MANGLED , ADDR_SPACE_NV ) \
40
40
__attribute__((always_inline)) _CLC_DECL void _Z19__spirv_\
41
41
AtomicStore##POINTER_AND_ADDR_SPACE_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagEN\
42
- S1_19MemorySemanticsMask4FlagE## TYPE_MANGLED( \
42
+ S##SUBSTITUTION##_19MemorySemanticsMask4FlagE## TYPE_MANGLED( \
43
43
volatile ADDR_SPACE TYPE *pointer, enum Scope scope, \
44
44
enum MemorySemanticsMask semantics, TYPE value) { \
45
45
/* Semantics mask may include memory order, storage class and other info \
@@ -74,12 +74,12 @@ Memory order is stored in the lowest 5 bits */ \
74
74
}
75
75
76
76
#define __CLC_NVVM_ATOMIC_STORE (TYPE , TYPE_MANGLED , TYPE_NV , TYPE_MANGLED_NV ) \
77
- __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, TYPE_NV, TYPE_MANGLED_NV, \
77
+ __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, 1, TYPE_NV, TYPE_MANGLED_NV,\
78
78
__global, PU3AS1, _global_) \
79
- __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, TYPE_NV, TYPE_MANGLED_NV, \
79
+ __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, 1, TYPE_NV, TYPE_MANGLED_NV,\
80
80
__local, PU3AS3, _shared_) \
81
- __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, TYPE_NV, TYPE_MANGLED_NV, , \
82
- P, _gen_)
81
+ __CLC_NVVM_ATOMIC_STORE_IMPL(TYPE, TYPE_MANGLED, 0, TYPE_NV, TYPE_MANGLED_NV,\
82
+ , P, _gen_)
83
83
84
84
__CLC_NVVM_ATOMIC_STORE (int , i , int , i )
85
85
__CLC_NVVM_ATOMIC_STORE (uint , j , int , i )
0 commit comments