Skip to content

Fixes issue #75 #76

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
271 changes: 172 additions & 99 deletions cachelib/allocator/CacheAllocator-inl.h

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -1045,6 +1045,9 @@ class CacheAllocator : public CacheBase {
// get cache name
const std::string getCacheName() const override final;

// combined pool size for all memory tiers
size_t getPoolSize(PoolId pid) const;

// pool stats by pool id
PoolStats getPoolStats(PoolId pid) const override final;

Expand Down
6 changes: 4 additions & 2 deletions cachelib/allocator/memory/MemoryAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ void* MemoryAllocator::allocateZeroedSlab(PoolId id) {
PoolId MemoryAllocator::addPool(folly::StringPiece name,
size_t size,
const std::set<uint32_t>& allocSizes,
bool ensureProvisionable) {
bool ensureProvisionable,
size_t* extraBytes) {
const std::set<uint32_t>& poolAllocSizes =
allocSizes.empty() ? config_.allocSizes : allocSizes;

Expand All @@ -100,7 +101,8 @@ PoolId MemoryAllocator::addPool(folly::StringPiece name,
size));
}

return memoryPoolManager_.createNewPool(name, size, poolAllocSizes);
return memoryPoolManager_.createNewPool(name, size, poolAllocSizes,
extraBytes);
}

PoolId MemoryAllocator::getPoolId(const std::string& name) const noexcept {
Expand Down
17 changes: 8 additions & 9 deletions cachelib/allocator/memory/MemoryAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,8 @@ class MemoryAllocator {
PoolId addPool(folly::StringPiece name,
size_t size,
const std::set<uint32_t>& allocSizes = {},
bool ensureProvisionable = false);
bool ensureProvisionable = false,
size_t* extraBytes = nullptr);

// shrink the existing pool by _bytes_ .
// @param id the id for the pool
Expand Down Expand Up @@ -515,14 +516,12 @@ class MemoryAllocator {

using CompressedPtr = facebook::cachelib::CompressedPtr;
template <typename PtrType>
using PtrCompressor =
facebook::cachelib::PtrCompressor<PtrType,
std::vector<std::unique_ptr<MemoryAllocator>>>;
using PtrCompressor = facebook::cachelib::
PtrCompressor<PtrType, std::vector<std::unique_ptr<MemoryAllocator>>>;

template <typename PtrType>
using SingleTierPtrCompressor =
facebook::cachelib::PtrCompressor<PtrType,
SlabAllocator>;
facebook::cachelib::PtrCompressor<PtrType, SlabAllocator>;

// compress a given pointer to a valid allocation made out of this allocator
// through an allocate() or nullptr. Calling this otherwise with invalid
Expand Down Expand Up @@ -636,9 +635,9 @@ class MemoryAllocator {

// returns ture if ptr points to memory which is managed by this
// allocator
bool isMemoryInAllocator(const void *ptr) {
return ptr && ptr >= slabAllocator_.getSlabMemoryBegin()
&& ptr < slabAllocator_.getSlabMemoryEnd();
bool isMemoryInAllocator(const void* ptr) {
return ptr && ptr >= slabAllocator_.getSlabMemoryBegin() &&
ptr < slabAllocator_.getSlabMemoryEnd();
}

private:
Expand Down
13 changes: 12 additions & 1 deletion cachelib/allocator/memory/MemoryPoolManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ size_t MemoryPoolManager::getRemainingSizeLocked() const noexcept {

PoolId MemoryPoolManager::createNewPool(folly::StringPiece name,
size_t poolSize,
const std::set<uint32_t>& allocSizes) {
const std::set<uint32_t>& allocSizes,
size_t* extraBytes) {
folly::SharedMutex::WriteHolder l(lock_);
if (poolsByName_.find(name) != poolsByName_.end()) {
throw std::invalid_argument("Duplicate pool");
Expand All @@ -109,6 +110,16 @@ PoolId MemoryPoolManager::createNewPool(folly::StringPiece name,
poolSize));
}

if (extraBytes && (*extraBytes)) {
if (remaining >= poolSize + *extraBytes) {
poolSize += *extraBytes;
*extraBytes = 0;
} else {
poolSize += (remaining - poolSize);
*extraBytes -= (remaining - poolSize);
}
}

const PoolId id = nextPoolId_;
pools_[id].reset(new MemoryPool(id, poolSize, slabAlloc_, allocSizes));
poolsByName_.insert({name.str(), id});
Expand Down
3 changes: 2 additions & 1 deletion cachelib/allocator/memory/MemoryPoolManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ class MemoryPoolManager {
// std::logic_error if we have run out the allowed number of pools.
PoolId createNewPool(folly::StringPiece name,
size_t size,
const std::set<uint32_t>& allocSizes);
const std::set<uint32_t>& allocSizes,
size_t* extraBytes = nullptr);

// shrink the existing pool by _bytes_ .
// @param bytes the number of bytes to be taken away from the pool
Expand Down
84 changes: 59 additions & 25 deletions cachelib/allocator/tests/MemoryTiersTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,18 @@ namespace tests {
using LruAllocatorConfig = CacheAllocatorConfig<LruAllocator>;
using LruMemoryTierConfigs = LruAllocatorConfig::MemoryTierConfigs;
using Strings = std::vector<std::string>;

constexpr size_t MB = 1024ULL * 1024ULL;
constexpr size_t GB = MB * 1024ULL;

using SizePair = std::tuple<size_t, size_t>;
using SizePairs = std::vector<SizePair>;

const size_t defaultTotalCacheSize{1 * 1024 * 1024 * 1024};
const size_t defaultTotalCacheSize{1 * GB};
const std::string defaultCacheDir{"/var/metadataDir"};
const std::string defaultPmemPath{"/dev/shm/p1"};
const std::string defaultDaxPath{"/dev/dax0.0"};

const size_t metaDataSize = 4194304;
constexpr size_t MB = 1024ULL * 1024ULL;
constexpr size_t GB = MB * 1024ULL;

template <typename Allocator>
class MemoryTiersTest : public AllocatorTest<Allocator> {
public:
Expand Down Expand Up @@ -124,6 +124,13 @@ class MemoryTiersTest : public AllocatorTest<Allocator> {
dramConfig.setCacheSize(totalCacheSize);
return dramConfig;
}

void validatePoolSize(PoolId poolId,
std::unique_ptr<LruAllocator>& allocator,
size_t expectedSize) {
size_t actualSize = allocator->getPoolSize(poolId);
EXPECT_EQ(actualSize, expectedSize);
}
};

using LruMemoryTiersTest = MemoryTiersTest<LruAllocator>;
Expand Down Expand Up @@ -225,30 +232,57 @@ TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigSizesNeCacheSize) {
std::invalid_argument);
}

TEST_F(LruMemoryTiersTest, TestTieredCacheSize) {
size_t totalSizes[] = {50 * MB, 77 * MB, 100 * MB, 101 * MB + MB / 2,
1 * GB, 4 * GB, 8 * GB, 9 * GB};
size_t numTiers[] = {2, 3, 4};

auto getCacheSize = [&](size_t cacheSize, size_t tiers) {
std::unique_ptr<LruAllocator> alloc;
if (tiers < 2) {
alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(createDramCacheConfig(cacheSize)));
TEST_F(LruMemoryTiersTest, TestPoolAllocations) {
std::vector<size_t> totalCacheSizes = {48 * MB, 51 * MB, 256 * MB,
1 * GB, 5 * GB, 8 * GB};
std::vector<size_t> numTiers = {2, 3, 4};
std::vector<SizePairs> sizePairs = {
{},
{std::make_tuple(1, 0)},
{std::make_tuple(3, 0), std::make_tuple(2, 0)},
{std::make_tuple(2, 0), std::make_tuple(2, 0), std::make_tuple(1, 0)},
{std::make_tuple(1, 0), std::make_tuple(5, 0), std::make_tuple(1, 0),
std::make_tuple(2, 0)}};
const std::string path = "/tmp/tier";
std::vector<Strings> paths = {
{},
{path + "0"},
{path + "0", path + "1"},
{path + "0", path + "1", path + "2"},
{path + "0", path + "1", path + "2", path + "3"}};

auto testAddPool = [&](LruAllocatorConfig& config, size_t poolSize,
bool isSizeValid = true, bool isTestMaxSize = false) {
if (isTestMaxSize) {
std::unique_ptr<LruAllocator> alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(LruAllocator::SharedMemNew, config));
auto pool =
alloc->addPool("maxPoolSize", alloc->getCacheMemoryStats().cacheSize);
validatePoolSize(pool, alloc, alloc->getCacheMemoryStats().cacheSize);
}
std::unique_ptr<LruAllocator> alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(LruAllocator::SharedMemNew, config));
if (isSizeValid) {
auto pool = alloc->addPool("validPoolSize", poolSize);
validatePoolSize(pool, alloc, poolSize);
} else {
alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(LruAllocator::SharedMemNew,
createTieredCacheConfig(cacheSize, tiers)));
EXPECT_THROW(alloc->addPool("invalidPoolSize", poolSize),
std::invalid_argument);
}
return alloc->getCacheMemoryStats().cacheSize;
};

for (auto totalSize : totalSizes) {
auto dramCacheSize = getCacheSize(totalSize, 1);
for (auto n : numTiers) {
auto tieredCacheSize = getCacheSize(totalSize, n);
EXPECT_GT(dramCacheSize, tieredCacheSize);
EXPECT_GE(metaDataSize * n * 2, dramCacheSize - tieredCacheSize);
for (auto nTiers : numTiers) {
for (auto totalCacheSize : totalCacheSizes) {
if (totalCacheSize <= nTiers * Slab::kSize * 4)
continue;
LruAllocatorConfig cfg =
createTestCacheConfig(paths[nTiers], sizePairs[nTiers],
/* usePoisx */ true, totalCacheSize);
basicCheck(cfg, paths[nTiers], totalCacheSize);
testAddPool(cfg, 0, /* isSizeValid */ true, /* isTestMaxSize */ true);
testAddPool(cfg, 1);
testAddPool(cfg, totalCacheSize, /* isSizeValid */ false);
testAddPool(cfg, totalCacheSize / nTiers);
}
}
}
Expand Down