Skip to content

Commit

Permalink
Revert "[scudo] Add partial chunk heuristic to retrieval algorithm." …
Browse files Browse the repository at this point in the history
…(#104894)

Reverts llvm/llvm-project#104807
  • Loading branch information
ChiaHungDuan authored Aug 20, 2024
1 parent a9ce181 commit f9031f0
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 123 deletions.
121 changes: 30 additions & 91 deletions compiler-rt/lib/scudo/standalone/secondary.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,6 @@ namespace {
struct CachedBlock {
static constexpr u16 CacheIndexMax = UINT16_MAX;
static constexpr u16 InvalidEntry = CacheIndexMax;
// * ReleaseMemoryUpperBound default is currently 16 KB
// - We arrived at this value after noticing that mapping
// in larger memory regions performs better than releasing
// memory and forcing a cache hit. According to the data,
// it suggests that beyond 16KB, the release execution time is
// longer than the map execution time. In this way, the default
// is dependent on the platform.
static constexpr uptr ReleaseMemoryUpperBound = 1 << 14;

uptr CommitBase = 0;
uptr CommitSize = 0;
Expand All @@ -98,9 +90,8 @@ struct CachedBlock {
template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
UNUSED uptr Alignment, UNUSED uptr HeadersSize,
UNUSED uptr &EntryHeaderPos) {
CachedBlock retrieve(UNUSED uptr Size, UNUSED uptr Alignment,
UNUSED uptr HeadersSize, UNUSED uptr &EntryHeaderPos) {
return {};
}
void store(UNUSED Options Options, UNUSED uptr CommitBase,
Expand Down Expand Up @@ -343,103 +334,61 @@ class MapAllocatorCache {
}
}

CachedBlock retrieve(uptr MaxAllowedFragmentedBytes, uptr Size,
uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
EXCLUDES(Mutex) {
CachedBlock retrieve(uptr Size, uptr Alignment, uptr HeadersSize,
uptr &EntryHeaderPos) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
// 10% of the requested size proved to be the optimal choice for
// retrieving cached blocks after testing several options.
constexpr u32 FragmentedBytesDivisor = 10;
bool FoundOptimalFit = false;
bool Found = false;
CachedBlock Entry;
EntryHeaderPos = 0;
{
ScopedLock L(Mutex);
CallsToRetrieve++;
if (EntriesCount == 0)
return {};
u16 RetrievedIndex = CachedBlock::InvalidEntry;
u32 OptimalFitIndex = 0;
uptr MinDiff = UINTPTR_MAX;

// Since allocation sizes don't always match cached memory chunk sizes
// we allow some memory to be unused (called fragmented bytes). The
// amount of unused bytes is exactly EntryHeaderPos - CommitBase.
//
// CommitBase CommitBase + CommitSize
// V V
// +---+------------+-----------------+---+
// | | | | |
// +---+------------+-----------------+---+
// ^ ^ ^
// Guard EntryHeaderPos Guard-page-end
// page-begin
//
// [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
// well as the header metadata. If EntryHeaderPos - CommitBase exceeds
// MaxAllowedFragmentedBytes, the cached memory chunk is not considered
// valid for retrieval.
for (u16 I = LRUHead; I != CachedBlock::InvalidEntry;
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
I = Entries[I].Next) {
const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
roundDown(CommitBase + CommitSize - Size, Alignment);
const uptr HeaderPos = AllocPos - HeadersSize;
if (HeaderPos > CommitBase + CommitSize || HeaderPos < CommitBase)
if (HeaderPos > CommitBase + CommitSize)
continue;

const uptr Diff = roundDown(HeaderPos, PageSize) - CommitBase;

if (Diff > MaxAllowedFragmentedBytes || Diff >= MinDiff)
if (HeaderPos < CommitBase ||
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
continue;

MinDiff = Diff;
RetrievedIndex = I;
EntryHeaderPos = HeaderPos;

const uptr OptimalFitThesholdBytes =
}
Found = true;
const uptr Diff = HeaderPos - CommitBase;
// immediately use a cached block if it's size is close enough to the
// requested size.
const uptr MaxAllowedFragmentedBytes =
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
if (Diff <= OptimalFitThesholdBytes) {
FoundOptimalFit = true;
if (Diff <= MaxAllowedFragmentedBytes) {
OptimalFitIndex = I;
EntryHeaderPos = HeaderPos;
break;
}
// keep track of the smallest cached block
// that is greater than (AllocSize + HeaderSize)
if (Diff > MinDiff)
continue;
OptimalFitIndex = I;
MinDiff = Diff;
EntryHeaderPos = HeaderPos;
}
if (RetrievedIndex != CachedBlock::InvalidEntry) {
Entry = Entries[RetrievedIndex];
remove(RetrievedIndex);
if (Found) {
Entry = Entries[OptimalFitIndex];
remove(OptimalFitIndex);
SuccessfulRetrieves++;
}
}

// The difference between the retrieved memory chunk and the request
// size is at most MaxAllowedFragmentedBytes
//
// / MaxAllowedFragmentedBytes \
// +--------------------------+-----------+
// | | |
// +--------------------------+-----------+
// \ Bytes to be released / ^
// |
// (may or may not be commited)
//
// The maximum number of bytes released to the OS is capped by
// ReleaseMemoryUpperBound
//
// TODO : Consider making ReleaseMemoryUpperBound configurable since
// the release to OS API can vary across systems.
if (!FoundOptimalFit && Entry.Time != 0) {
const uptr FragmentedBytes =
roundDown(EntryHeaderPos, PageSize) - Entry.CommitBase;
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * PageSize;
if (FragmentedBytes > MaxUnusedCacheBytes) {
uptr BytesToRelease =
roundUp(Min<uptr>(CachedBlock::ReleaseMemoryUpperBound,
FragmentedBytes - MaxUnusedCacheBytes),
PageSize);
Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, BytesToRelease);
}
}

return Entry;
}

Expand Down Expand Up @@ -710,18 +659,8 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
FillContentsMode FillContents) {
CachedBlock Entry;
uptr EntryHeaderPos;
uptr MaxAllowedFragmentedBytes;
const uptr PageSize = getPageSizeCached();

if (LIKELY(!useMemoryTagging<Config>(Options))) {
MaxAllowedFragmentedBytes =
MaxUnusedCachePages * PageSize + CachedBlock::ReleaseMemoryUpperBound;
} else {
MaxAllowedFragmentedBytes = MaxUnusedCachePages * PageSize;
}

Entry = Cache.retrieve(MaxAllowedFragmentedBytes, Size, Alignment,
getHeadersSize(), EntryHeaderPos);
Entry = Cache.retrieve(Size, Alignment, getHeadersSize(), EntryHeaderPos);
if (!Entry.isValid())
return nullptr;

Expand Down
36 changes: 4 additions & 32 deletions compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,8 @@ struct MapAllocatorCacheTest : public Test {
std::unique_ptr<CacheT> Cache = std::make_unique<CacheT>();

const scudo::uptr PageSize = scudo::getPageSizeCached();
// The current test allocation size is set to the maximum
// cache entry size
// The current test allocation size is set to the minimum size
// needed for the scudo allocator to fall back to the secondary allocator
static constexpr scudo::uptr TestAllocSize =
CacheConfig::getDefaultMaxEntrySize();

Expand Down Expand Up @@ -327,8 +327,7 @@ TEST_F(MapAllocatorCacheTest, CacheOrder) {
for (scudo::uptr I = CacheConfig::getEntriesArraySize(); I > 0; I--) {
scudo::uptr EntryHeaderPos;
scudo::CachedBlock Entry =
Cache->retrieve(scudo::MaxUnusedCachePages * PageSize, TestAllocSize,
PageSize, 0, EntryHeaderPos);
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos);
EXPECT_EQ(Entry.MemMap.getBase(), MemMaps[I - 1].getBase());
}

Expand All @@ -337,32 +336,6 @@ TEST_F(MapAllocatorCacheTest, CacheOrder) {
MemMap.unmap();
}

TEST_F(MapAllocatorCacheTest, PartialChunkHeuristicRetrievalTest) {
const scudo::uptr MaxUnusedCacheBytes = PageSize;
const scudo::uptr FragmentedBytes =
MaxUnusedCacheBytes + scudo::CachedBlock::ReleaseMemoryUpperBound;

scudo::uptr EntryHeaderPos;
scudo::CachedBlock Entry;
scudo::MemMapT MemMap = allocate(PageSize + FragmentedBytes);
Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
MemMap.getBase(), MemMap);

// FragmentedBytes > MaxAllowedFragmentedBytes so PageSize
// cannot be retrieved from the cache
Entry = Cache->retrieve(/*MaxAllowedFragmentedBytes=*/0, PageSize, PageSize,
0, EntryHeaderPos);
EXPECT_FALSE(Entry.isValid());

// FragmentedBytes <= MaxAllowedFragmentedBytes so PageSize
// can be retrieved from the cache
Entry =
Cache->retrieve(FragmentedBytes, PageSize, PageSize, 0, EntryHeaderPos);
EXPECT_TRUE(Entry.isValid());

MemMap.unmap();
}

TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
std::vector<scudo::MemMapT> MemMaps;
// Fill the cache above MaxEntriesCount to force an eviction
Expand All @@ -378,8 +351,7 @@ TEST_F(MapAllocatorCacheTest, MemoryLeakTest) {
for (scudo::uptr I = CacheConfig::getDefaultMaxEntriesCount(); I > 0; I--) {
scudo::uptr EntryHeaderPos;
RetrievedEntries.push_back(
Cache->retrieve(scudo::MaxUnusedCachePages * PageSize, TestAllocSize,
PageSize, 0, EntryHeaderPos));
Cache->retrieve(TestAllocSize, PageSize, 0, EntryHeaderPos));
EXPECT_EQ(MemMaps[I].getBase(), RetrievedEntries.back().MemMap.getBase());
}

Expand Down

0 comments on commit f9031f0

Please sign in to comment.