Skip to content

Commit

Permalink
Enhance \memory_summary call, returning structure and get_server_status
Browse files Browse the repository at this point in the history
API.
  • Loading branch information
Vraj Pandya authored and asuhan committed Sep 13, 2017
1 parent ce946f9 commit dd7d49b
Show file tree
Hide file tree
Showing 10 changed files with 403 additions and 81 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ build/
Debug/
.settings/

# VS code settings files
.vscode/

# Thrift-generated files
java/src/gen/
gen-js/
Expand Down
16 changes: 16 additions & 0 deletions DataMgr/BufferMgr/BufferMgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,10 @@ bool BufferMgr::isAllocationCapped() {
return allocationsCapped_;
}

size_t BufferMgr::getPageSize() {
return pageSize_;
}

// return the size of the chunks in use in bytes
size_t BufferMgr::getInUseSize() {
size_t inUse = 0;
Expand Down Expand Up @@ -782,6 +786,14 @@ size_t BufferMgr::size() {
return numPagesAllocated_;
}

size_t BufferMgr::getMaxBufferSize() {
return maxBufferSize_;
}

size_t BufferMgr::getMaxSlabSize() {
return maxSlabSize_;
}

void BufferMgr::getChunkMetadataVec(std::vector<std::pair<ChunkKey, ChunkMetadata>>& chunkMetadataVec) {
LOG(FATAL) << "getChunkMetadataVec not supported for BufferMgr.";
}
Expand All @@ -790,4 +802,8 @@ void BufferMgr::getChunkMetadataVecForKeyPrefix(std::vector<std::pair<ChunkKey,
const ChunkKey& keyPrefix) {
LOG(FATAL) << "getChunkMetadataVecForPrefix not supported for BufferMgr.";
}

const std::vector<BufferList>& BufferMgr::getSlabSegments() {
return slabSegments_;
}
}
4 changes: 4 additions & 0 deletions DataMgr/BufferMgr/BufferMgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,11 @@ class BufferMgr : public AbstractBufferMgr { // implements
size_t getInUseSize();
size_t getMaxSize();
size_t getAllocated();
size_t getMaxBufferSize();
size_t getMaxSlabSize();
size_t getPageSize();
bool isAllocationCapped();
const std::vector<BufferList>& getSlabSegments();

/// Creates a chunk with the specified key and page size.
virtual AbstractBuffer* createBuffer(const ChunkKey& key, const size_t pageSize = 0, const size_t initialSize = 0);
Expand Down
71 changes: 66 additions & 5 deletions DataMgr/DataMgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,9 +174,68 @@ void DataMgr::createTopLevelMetadata() const { // create metadata shared by all
fm_top->createTopLevelMetadata();
}

memorySummary DataMgr::getMemorySummary() {
memorySummary ms;
ms.cpuMemoryInUse = bufferMgrs_[MemoryLevel::CPU_LEVEL][0]->getInUseSize();
std::vector<MemoryInfo> DataMgr::getMemoryInfo(const MemoryLevel memLevel) {
// TODO (vraj) : Reduce the duplicate code
std::vector<MemoryInfo> memInfo;
if (memLevel == MemoryLevel::CPU_LEVEL) {
CpuBufferMgr* cpuBuffer = dynamic_cast<CpuBufferMgr*>(bufferMgrs_[MemoryLevel::CPU_LEVEL][0]);
MemoryInfo mi;

mi.pageSize = cpuBuffer->getPageSize();
mi.maxNumPages = cpuBuffer->getMaxSize() / mi.pageSize;
mi.isAllocationCapped = cpuBuffer->isAllocationCapped();
mi.numPageAllocated = cpuBuffer->getAllocated() / mi.pageSize;

const std::vector<BufferList> slab_segments = cpuBuffer->getSlabSegments();
size_t numSlabs = slab_segments.size();

for (size_t slabNum = 0; slabNum != numSlabs; ++slabNum) {
for (auto segIt : slab_segments[slabNum]) {
MemoryData md;
md.slabNum = slabNum;
md.startPage = segIt.startPage;
md.numPages = segIt.numPages;
md.touch = segIt.lastTouched;
md.isFree = segIt.memStatus;
md.chunk_key.insert(md.chunk_key.end(), segIt.chunkKey.begin(), segIt.chunkKey.end());
mi.nodeMemoryData.push_back(md);
}
}
memInfo.push_back(mi);
} else if (hasGpus_) {
int numGpus = cudaMgr_->getDeviceCount();
for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
GpuCudaBufferMgr* gpuBuffer = dynamic_cast<GpuCudaBufferMgr*>(bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]);
MemoryInfo mi;

mi.pageSize = gpuBuffer->getPageSize();
mi.maxNumPages = gpuBuffer->getMaxSize() / mi.pageSize;
mi.isAllocationCapped = gpuBuffer->isAllocationCapped();
mi.numPageAllocated = gpuBuffer->getAllocated() / mi.pageSize;
const std::vector<BufferList> slab_segments = gpuBuffer->getSlabSegments();
size_t numSlabs = slab_segments.size();

for (size_t slabNum = 0; slabNum != numSlabs; ++slabNum) {
for (auto segIt : slab_segments[slabNum]) {
MemoryData md;
md.slabNum = slabNum;
md.startPage = segIt.startPage;
md.numPages = segIt.numPages;
md.touch = segIt.lastTouched;
md.chunk_key.insert(md.chunk_key.end(), segIt.chunkKey.begin(), segIt.chunkKey.end());
md.isFree = segIt.memStatus;
mi.nodeMemoryData.push_back(md);
}
}
memInfo.push_back(mi);
}
}
return memInfo;
}

/*
std::vector<MemoryData> DataMgr::getGpuMemory() {
std::vector<MemoryData> memInfo;
if (hasGpus_) {
int numGpus = cudaMgr_->getDeviceCount();
for (int gpuNum = 0; gpuNum < numGpus; ++gpuNum) {
Expand All @@ -185,11 +244,13 @@ memorySummary DataMgr::getMemorySummary() {
gms.inUse = bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]->getInUseSize();
gms.allocated = bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]->getAllocated();
gms.isAllocationCapped = bufferMgrs_[MemoryLevel::GPU_LEVEL][gpuNum]->isAllocationCapped();
ms.gpuSummary.push_back(gms);
memInfo.push_back(gms);
}
}
return ms;
return memInfo;
}
*/
// std::ostringstream tss;
// size_t mb = 1024 * 1024;
// tss << std::endl;
Expand Down
25 changes: 16 additions & 9 deletions DataMgr/DataMgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@

#include "AbstractBuffer.h"
#include "AbstractBufferMgr.h"
#include "BufferMgr/Buffer.h"
#include "BufferMgr/BufferMgr.h"
#include "MemoryLevel.h"
#include "../Shared/mapd_shared_mutex.h"

Expand All @@ -42,16 +44,21 @@ class CudaMgr;

namespace Data_Namespace {

struct gpuMemorySummary {
int64_t max;
int64_t inUse;
int64_t allocated;
bool isAllocationCapped; // mean allocation request failed
struct MemoryData {
size_t slabNum;
int32_t startPage;
size_t numPages;
u_int32_t touch;
std::vector<int32_t> chunk_key;
Buffer_Namespace::MemStatus isFree;
};

struct memorySummary {
int64_t cpuMemoryInUse;
std::vector<gpuMemorySummary> gpuSummary;
struct MemoryInfo {
size_t pageSize;
size_t maxNumPages;
size_t numPageAllocated;
bool isAllocationCapped;
std::vector<MemoryData> nodeMemoryData;
};

class DataMgr {
Expand Down Expand Up @@ -81,7 +88,7 @@ class DataMgr {
// copies one buffer to another
void copy(AbstractBuffer* destBuffer, AbstractBuffer* srcBuffer);
bool isBufferOnDevice(const ChunkKey& key, const MemoryLevel memLevel, const int deviceId);
memorySummary getMemorySummary();
std::vector<MemoryInfo> getMemoryInfo(const MemoryLevel memLevel);
std::string dumpLevel(const MemoryLevel memLevel);
void clearMemory(const MemoryLevel memLevel);

Expand Down
11 changes: 11 additions & 0 deletions LeafAggregator.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#include "LeafHostInfo.h"
#include "gen-cpp/MapD.h"
#include "DataMgr/MemoryLevel.h"
#include "QueryEngine/CompilationOptions.h"
#include "QueryEngine/TargetMetaInfo.h"

Expand Down Expand Up @@ -57,6 +58,16 @@ class LeafAggregator {

void interrupt(const TSessionId session) { CHECK(false); }

std::vector<TServerStatus> getLeafStatus(TSessionId session) {
CHECK(false);
return {};
}

std::vector<TNodeMemoryInfo> getLeafMemoryInfo(TSessionId session, Data_Namespace::MemoryLevel memory_level) {
CHECK(false);
return {};
}

size_t leafCount() const { return 0; }

void set_execution_mode(const TSessionId session, const TExecuteMode::type mode) { CHECK(false); }
Expand Down
Loading

0 comments on commit dd7d49b

Please sign in to comment.