|
1 | 1 | #include "malloc.h"
|
2 |
| -#include "default_allocator.h" |
| 2 | +#include <glog/logging.h> |
3 | 3 |
|
4 | 4 | #ifndef PADDLE_ONLY_CPU
|
5 | 5 | #include <cuda.h>
|
6 | 6 | #endif
|
7 | 7 |
|
| 8 | +#define CHECK_CUDA(cudaFunc) \ |
| 9 | + do { \ |
| 10 | + cudaError_t cudaStat = cudaFunc; \ |
| 11 | + CHECK_EQ(cudaSuccess, cudaStat) \ |
| 12 | + << "Cuda Error: " << cudaGetErrorString(cudaStat); \ |
| 13 | + } while (0) |
| 14 | + |
8 | 15 | namespace majel {
|
9 | 16 | namespace malloc {
|
| 17 | +namespace detail { |
| 18 | +class DefaultAllocator { |
| 19 | +public: |
| 20 | + static void* malloc(majel::Place place, size_t size); |
| 21 | + |
| 22 | + static void free(majel::Place, void* ptr); |
| 23 | +}; |
| 24 | + |
| 25 | +class DefaultAllocatorMallocVisitor : public boost::static_visitor<void*> { |
| 26 | +public: |
| 27 | + DefaultAllocatorMallocVisitor(size_t size) : size_(size) {} |
| 28 | + |
| 29 | + void* operator()(majel::CpuPlace p) { |
| 30 | + void* address; |
| 31 | + CHECK_EQ(posix_memalign(&address, 32ul, size_), 0); |
| 32 | + CHECK(address) << "Fail to allocate CPU memory: size=" << size_; |
| 33 | + return address; |
| 34 | + } |
| 35 | + |
| 36 | +#ifndef PADDLE_ONLY_CPU |
| 37 | + void* operator()(majel::GpuPlace p) { |
| 38 | + void* address = hl_malloc_device(size_); |
| 39 | + CHECK(address) << "Fail to allocate GPU memory " << size_ << " bytes"; |
| 40 | + return address; |
| 41 | + } |
| 42 | +#else |
| 43 | + void* operator()(majel::GpuPlace p) { |
| 44 | + CHECK(majel::is_cpu_place(p)) << "GPU Place not supported"; |
| 45 | + return nullptr; |
| 46 | + } |
| 47 | +#endif |
| 48 | + |
| 49 | +private: |
| 50 | + size_t size_; |
| 51 | +}; |
| 52 | + |
| 53 | +class DefaultAllocatorFreeVisitor : public boost::static_visitor<void> { |
| 54 | +public: |
| 55 | + DefaultAllocatorFreeVisitor(void* ptr) : ptr_(ptr) {} |
| 56 | + void operator()(majel::CpuPlace p) { |
| 57 | + if (ptr_) { |
| 58 | + ::free(ptr_); |
| 59 | + } |
| 60 | + } |
| 61 | + |
| 62 | +#ifndef PADDLE_ONLY_CPU |
| 63 | + void operator()(majel::GpuPlace p) { |
| 64 | + if (ptr_) { |
| 65 | + hl_free_mem_device(ptr_); |
| 66 | + } |
| 67 | + } |
| 68 | + |
| 69 | +#else |
| 70 | + void operator()(majel::GpuPlace p) { |
| 71 | + CHECK(majel::is_cpu_place(p)) << "GPU Place not supported"; |
| 72 | + } |
| 73 | +#endif |
| 74 | + |
| 75 | +private: |
| 76 | + void* ptr_; |
| 77 | +}; |
| 78 | + |
| 79 | +void* DefaultAllocator::malloc(majel::Place place, size_t size) { |
| 80 | + DefaultAllocatorMallocVisitor visitor(size); |
| 81 | + return boost::apply_visitor(visitor, place); |
| 82 | +} |
10 | 83 |
|
| 84 | +void DefaultAllocator::free(majel::Place place, void* ptr) { |
| 85 | + DefaultAllocatorFreeVisitor visitor(ptr); |
| 86 | + boost::apply_visitor(visitor, place); |
| 87 | +} |
| 88 | + |
| 89 | +} // namespace detail |
| 90 | + |
| 91 | +#ifndef PADDLE_ONLY_CPU |
| 92 | +void* hl_malloc_device(size_t size) { |
| 93 | + void* dest_d; |
| 94 | + |
| 95 | + CHECK(size) << __func__ << ": the size for device memory is 0, please check."; |
| 96 | + CHECK_CUDA(cudaMalloc((void**)&dest_d, size)); |
| 97 | + |
| 98 | + return dest_d; |
| 99 | +} |
| 100 | + |
| 101 | +void hl_free_mem_device(void* dest_d) { |
| 102 | + CHECK_NOTNULL(dest_d); |
| 103 | + |
| 104 | + cudaError_t err = cudaFree(dest_d); |
| 105 | + CHECK(cudaSuccess == err || cudaErrorCudartUnloading == err) |
| 106 | + << hl_get_device_error_string(); |
| 107 | +} |
| 108 | + |
| 109 | +const char* hl_get_device_error_string() { |
| 110 | + cudaError_t err = cudaGetLastError(); |
| 111 | + return cudaGetErrorString(err); |
| 112 | +} |
| 113 | + |
| 114 | +const char* hl_get_device_error_string(size_t err) { |
| 115 | + return cudaGetErrorString((cudaError_t)err); |
| 116 | +} |
| 117 | +#endif |
11 | 118 | void* malloc(majel::Place place, size_t size) {
|
12 | 119 | return detail::DefaultAllocator::malloc(place, size);
|
13 | 120 | }
|
|
0 commit comments