|  | 
|  | 1 | +#pragma once | 
|  | 2 | + | 
|  | 3 | +#ifndef __CUDACC_RTC__ | 
|  | 4 | +#include <cuda_runtime.h> | 
|  | 5 | +#endif | 
|  | 6 | + | 
|  | 7 | +#include <cuda/atomic> | 
|  | 8 | +#include <cutlass/numeric_types.h> | 
|  | 9 | + | 
|  | 10 | +using cutlass::bfloat16_t; | 
|  | 11 | +using cutlass::half_t; | 
|  | 12 | + | 
|  | 13 | +#define TL_DEVICE __forceinline__ __device__ | 
|  | 14 | + | 
|  | 15 | +template <typename T> struct normalize_atomic_type { | 
|  | 16 | +  using type = T; | 
|  | 17 | +}; | 
|  | 18 | + | 
|  | 19 | +template <> struct normalize_atomic_type<half_t> { | 
|  | 20 | +  using type = half; | 
|  | 21 | +}; | 
|  | 22 | + | 
|  | 23 | +#if (defined(__CUDA_ARCH_LIST__) && (__CUDA_ARCH_LIST__ > 750)) | 
|  | 24 | +template <> struct normalize_atomic_type<bfloat16_t> { | 
|  | 25 | +  using type = __nv_bfloat16; | 
|  | 26 | +}; | 
|  | 27 | +#endif | 
|  | 28 | + | 
|  | 29 | +template <typename T1, typename T2> TL_DEVICE T1 cuda_cast(T2 val) { | 
|  | 30 | +  return T1(val); | 
|  | 31 | +} | 
|  | 32 | + | 
|  | 33 | +template <> TL_DEVICE half cuda_cast<half, float>(float val) { | 
|  | 34 | +  return __float2half(val); | 
|  | 35 | +} | 
|  | 36 | + | 
|  | 37 | +#if (defined(__CUDA_ARCH_LIST__) && (__CUDA_ARCH_LIST__ > 750)) | 
|  | 38 | +template <> TL_DEVICE __nv_bfloat16 cuda_cast<__nv_bfloat16, float>(float val) { | 
|  | 39 | +  return __float2bfloat16(val); | 
|  | 40 | +} | 
|  | 41 | +#endif | 
|  | 42 | + | 
|  | 43 | +template <typename T1, typename T2> | 
|  | 44 | +TL_DEVICE void AtomicMax(T1 &ref, T2 val, | 
|  | 45 | +                         int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 46 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 47 | +  T1 *address = &ref; | 
|  | 48 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 49 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 50 | +    atomicMax(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val)); | 
|  | 51 | +  } else { | 
|  | 52 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 53 | +    aref.fetch_max(cuda_cast<NT1>(val), cuda::memory_order(memory_order)); | 
|  | 54 | +  } | 
|  | 55 | +} | 
|  | 56 | + | 
|  | 57 | +template <typename T1, typename T2> | 
|  | 58 | +TL_DEVICE T1 AtomicMaxRet(T1 &ref, T2 val, | 
|  | 59 | +                          int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 60 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 61 | +  T1 *address = &ref; | 
|  | 62 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 63 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 64 | +    return static_cast<T1>( | 
|  | 65 | +        atomicMax(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val))); | 
|  | 66 | +  } else { | 
|  | 67 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 68 | +    return static_cast<T1>( | 
|  | 69 | +        aref.fetch_max(cuda_cast<NT1>(val), cuda::memory_order(memory_order))); | 
|  | 70 | +  } | 
|  | 71 | +} | 
|  | 72 | + | 
|  | 73 | +template <typename T1, typename T2> | 
|  | 74 | +TL_DEVICE void AtomicMin(T1 &ref, T2 val, | 
|  | 75 | +                         int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 76 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 77 | +  T1 *address = &ref; | 
|  | 78 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 79 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 80 | +    atomicMin(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val)); | 
|  | 81 | +  } else { | 
|  | 82 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 83 | +    aref.fetch_min(cuda_cast<NT1>(val), cuda::memory_order(memory_order)); | 
|  | 84 | +  } | 
|  | 85 | +} | 
|  | 86 | + | 
|  | 87 | +template <typename T1, typename T2> | 
|  | 88 | +TL_DEVICE T1 AtomicMinRet(T1 &ref, T2 val, | 
|  | 89 | +                          int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 90 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 91 | +  T1 *address = &ref; | 
|  | 92 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 93 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 94 | +    return static_cast<T1>( | 
|  | 95 | +        atomicMin(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val))); | 
|  | 96 | +  } else { | 
|  | 97 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 98 | +    return static_cast<T1>( | 
|  | 99 | +        aref.fetch_min(cuda_cast<NT1>(val), cuda::memory_order(memory_order))); | 
|  | 100 | +  } | 
|  | 101 | +} | 
|  | 102 | + | 
|  | 103 | +template <typename T1, typename T2> | 
|  | 104 | +TL_DEVICE void AtomicAdd(T1 &ref, T2 val, | 
|  | 105 | +                         int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 106 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 107 | +  T1 *address = &ref; | 
|  | 108 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 109 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 110 | +    atomicAdd(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val)); | 
|  | 111 | +  } else { | 
|  | 112 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 113 | +    aref.fetch_add(cuda_cast<NT1>(val), cuda::memory_order(memory_order)); | 
|  | 114 | +  } | 
|  | 115 | +} | 
|  | 116 | + | 
|  | 117 | +template <typename T1, typename T2> | 
|  | 118 | +TL_DEVICE T1 AtomicAddRet(T1 &ref, T2 val, | 
|  | 119 | +                          int memory_order = int(cuda::memory_order_relaxed)) { | 
|  | 120 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 121 | +  T1 *address = &ref; | 
|  | 122 | +  if constexpr (std::is_same_v<NT1, half> || | 
|  | 123 | +                std::is_same_v<NT1, __nv_bfloat16>) { | 
|  | 124 | +    return static_cast<T1>( | 
|  | 125 | +        atomicAdd(reinterpret_cast<NT1 *>(address), static_cast<NT1>(val))); | 
|  | 126 | +  } else { | 
|  | 127 | +    cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(*address); | 
|  | 128 | +    return static_cast<T1>( | 
|  | 129 | +        aref.fetch_add(cuda_cast<NT1>(val), cuda::memory_order(memory_order))); | 
|  | 130 | +  } | 
|  | 131 | +} | 
|  | 132 | + | 
|  | 133 | +TL_DEVICE void AtomicAddx2(half_t *ref, half_t *val) { | 
|  | 134 | +  atomicAdd(reinterpret_cast<half2 *>(ref), | 
|  | 135 | +            static_cast<half2>(*reinterpret_cast<half2 *>(val))); | 
|  | 136 | +} | 
|  | 137 | + | 
|  | 138 | +TL_DEVICE half2 AtomicAddx2Ret(half_t *ref, half_t *val) { | 
|  | 139 | +  return atomicAdd(reinterpret_cast<half2 *>(ref), | 
|  | 140 | +                   static_cast<half2>(*reinterpret_cast<half2 *>(val))); | 
|  | 141 | +} | 
|  | 142 | + | 
|  | 143 | +#if (defined(__CUDA_ARCH_LIST__) && (__CUDA_ARCH_LIST__ > 750)) | 
|  | 144 | +TL_DEVICE void AtomicAddx2(bfloat16_t *ref, bfloat16_t *val) { | 
|  | 145 | +  atomicAdd( | 
|  | 146 | +      reinterpret_cast<__nv_bfloat162 *>(ref), | 
|  | 147 | +      static_cast<__nv_bfloat162>(*reinterpret_cast<__nv_bfloat162 *>(val))); | 
|  | 148 | +} | 
|  | 149 | + | 
|  | 150 | +TL_DEVICE __nv_bfloat162 AtomicAddx2Ret(bfloat16_t *ref, bfloat16_t *val) { | 
|  | 151 | +  return atomicAdd( | 
|  | 152 | +      reinterpret_cast<__nv_bfloat162 *>(ref), | 
|  | 153 | +      static_cast<__nv_bfloat162>(*reinterpret_cast<__nv_bfloat162 *>(val))); | 
|  | 154 | +} | 
|  | 155 | +#endif | 
|  | 156 | + | 
|  | 157 | +#if (defined(__CUDA_ARCH_LIST__) && (__CUDA_ARCH_LIST__ >= 900)) | 
|  | 158 | +TL_DEVICE void AtomicAddx2(float *ref, float *val) { | 
|  | 159 | +  atomicAdd(reinterpret_cast<float2 *>(ref), | 
|  | 160 | +            static_cast<float2>(*reinterpret_cast<float2 *>(val))); | 
|  | 161 | +} | 
|  | 162 | + | 
|  | 163 | +TL_DEVICE float2 AtomicAddx2Ret(float *ref, float *val) { | 
|  | 164 | +  return atomicAdd(reinterpret_cast<float2 *>(ref), | 
|  | 165 | +                   static_cast<float2>(*reinterpret_cast<float2 *>(val))); | 
|  | 166 | +} | 
|  | 167 | + | 
|  | 168 | +TL_DEVICE void AtomicAddx4(float *ref, float *val) { | 
|  | 169 | +  atomicAdd(reinterpret_cast<float4 *>(ref), | 
|  | 170 | +            static_cast<float4>(*reinterpret_cast<float4 *>(val))); | 
|  | 171 | +} | 
|  | 172 | + | 
|  | 173 | +TL_DEVICE float4 AtomicAddx4Ret(float *ref, float *val) { | 
|  | 174 | +  return atomicAdd(reinterpret_cast<float4 *>(ref), | 
|  | 175 | +                   static_cast<float4>(*reinterpret_cast<float4 *>(val))); | 
|  | 176 | +} | 
|  | 177 | +#endif | 
|  | 178 | + | 
|  | 179 | +template <typename T> TL_DEVICE T AtomicLoad(T &ref, int memory_order) { | 
|  | 180 | +  cuda::atomic_ref<T, cuda::thread_scope_device> aref(ref); | 
|  | 181 | +  return aref.load(cuda::memory_order(memory_order)); | 
|  | 182 | +} | 
|  | 183 | + | 
|  | 184 | +template <typename T1, typename T2> | 
|  | 185 | +TL_DEVICE void AtomicStore(T1 &ref, T2 value, int memory_order) { | 
|  | 186 | +  using NT1 = typename normalize_atomic_type<T1>::type; | 
|  | 187 | +  cuda::atomic_ref<NT1, cuda::thread_scope_device> aref(ref); | 
|  | 188 | +  aref.store(cuda_cast<NT1>(value), cuda::memory_order(memory_order)); | 
|  | 189 | +} | 
0 commit comments