Skip to content

[scudo] Support the mode of disabling primary cache #121351

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions compiler-rt/lib/scudo/standalone/allocator_config.def
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)

// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
//
PRIMARY_OPTIONAL(const bool, EnableCache, true)

// The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale).
PRIMARY_OPTIONAL(const uptr, CompactPtrScale, SCUDO_MIN_ALIGNMENT_LOG)

Expand Down
147 changes: 142 additions & 5 deletions compiler-rt/lib/scudo/standalone/local_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}
}

void destroyBatch(uptr ClassId, void *B) {
if (ClassId != BatchClassId)
deallocate(BatchClassId, B);
}

NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
const u16 NumBlocksRefilled =
Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
Expand All @@ -184,6 +179,148 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}
};

template <class SizeClassAllocator> struct NoCache {
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;

void init(GlobalStats *S, SizeClassAllocator *A) {
Stats.init();
if (LIKELY(S))
S->link(&Stats);
Allocator = A;
initCache();
}

void destroy(GlobalStats *S) {
if (LIKELY(S))
S->unlink(&Stats);
}

void *allocate(uptr ClassId) {
CompactPtrT CompactPtr;
uptr NumBlocksPopped = Allocator->popBlocks(this, ClassId, &CompactPtr, 1U);
if (NumBlocksPopped == 0)
return nullptr;
DCHECK_EQ(NumBlocksPopped, 1U);
const PerClass *C = &PerClassArray[ClassId];
Stats.add(StatAllocated, C->ClassSize);
Stats.sub(StatFree, C->ClassSize);
return Allocator->decompactPtr(ClassId, CompactPtr);
}

bool deallocate(uptr ClassId, void *P) {
CHECK_LT(ClassId, NumClasses);

if (ClassId == BatchClassId)
return deallocateBatchClassBlock(P);

CompactPtrT CompactPtr =
Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Allocator->pushBlocks(this, ClassId, &CompactPtr, 1U);
PerClass *C = &PerClassArray[ClassId];
Stats.sub(StatAllocated, C->ClassSize);
Stats.add(StatFree, C->ClassSize);

// The following adopts the same strategy of allocator draining as
// SizeClassAllocatorLocalCache so that they have the same hint for doing
// page release.
++C->Count;
const bool SuggestDraining = C->Count == C->MaxCount;
if (SuggestDraining)
C->Count = 0;
return SuggestDraining;
}

void *getBatchClassBlock() {
PerClass *C = &PerClassArray[BatchClassId];
if (C->Count == 0) {
const u16 NumBlocksRefilled = Allocator->popBlocks(
this, BatchClassId, BatchClassStorage, C->MaxCount);
if (NumBlocksRefilled == 0)
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
DCHECK_LE(NumBlocksRefilled, SizeClassMap::MaxNumCachedHint);
C->Count = NumBlocksRefilled;
}

const uptr ClassSize = C->ClassSize;
CompactPtrT CompactP = BatchClassStorage[--C->Count];
Stats.add(StatAllocated, ClassSize);
Stats.sub(StatFree, ClassSize);

return Allocator->decompactPtr(BatchClassId, CompactP);
}

LocalStats &getStats() { return Stats; }

void getStats(ScopedString *Str) { Str->append(" No block is cached.\n"); }

bool isEmpty() const {
const PerClass *C = &PerClassArray[BatchClassId];
return C->Count == 0;
}
void drain() {
PerClass *C = &PerClassArray[BatchClassId];
if (C->Count > 0) {
Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
C->Count = 0;
}
}

static u16 getMaxCached(uptr Size) {
return Min(SizeClassMap::MaxNumCachedHint,
SizeClassMap::getMaxCachedHint(Size));
}

private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
u16 Count = 0;
u16 MaxCount;
// Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
};
PerClass PerClassArray[NumClasses] = {};
// Popping BatchClass blocks requires taking a certain amount of blocks at
// once. This restriction comes from how we manage the storing of BatchClass
// in the primary allocator. See more details in `popBlocksImpl` in the
// primary allocator.
CompactPtrT BatchClassStorage[SizeClassMap::MaxNumCachedHint];
LocalStats Stats;
SizeClassAllocator *Allocator = nullptr;

bool deallocateBatchClassBlock(void *P) {
PerClass *C = &PerClassArray[BatchClassId];
// Drain all the blocks.
if (C->Count == C->MaxCount) {
Allocator->pushBlocks(this, BatchClassId, BatchClassStorage, C->Count);
C->Count = 0;
}
BatchClassStorage[C->Count++] =
Allocator->compactPtr(BatchClassId, reinterpret_cast<uptr>(P));

// Currently, BatchClass doesn't support page releasing, so we always return
// false.
return false;
}

NOINLINE void initCache() {
for (uptr I = 0; I < NumClasses; I++) {
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
if (I != BatchClassId) {
P->ClassSize = Size;
P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
} else {
// ClassSize in this struct is only used for malloc/free stats, which
// should only track user allocations, not internal movements.
P->ClassSize = 0;
P->MaxCount = SizeClassMap::MaxNumCachedHint;
}
}
}
};

} // namespace scudo

#endif // SCUDO_LOCAL_CACHE_H_
4 changes: 3 additions & 1 deletion compiler-rt/lib/scudo/standalone/primary64.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,11 @@ template <typename Config> class SizeClassAllocator64 {
"Group size shouldn't be greater than the region size");
static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef TransferBatch<ThisT> TransferBatchT;
typedef BatchGroup<ThisT> BatchGroupT;
using CacheT = typename Conditional<Config::getEnableCache(),
SizeClassAllocatorLocalCache<ThisT>,
NoCache<ThisT>>::type;

// BachClass is used to store internal metadata so it needs to be at least as
// large as the largest data structure.
Expand Down
44 changes: 43 additions & 1 deletion compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,47 @@ struct TestConditionVariableConfig {
};
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
};

struct TestNoCacheConfig {
static const bool MaySupportMemoryTagging = true;
template <class A>
using TSDRegistryT =
scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.

struct Primary {
using SizeClassMap = scudo::AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
static const scudo::uptr RegionSizeLog = 28U;
typedef scudo::u32 CompactPtrT;
static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
static const scudo::uptr GroupSizeLog = 20U;
static const bool EnableRandomOffset = true;
static const scudo::uptr MapSizeIncrement = 1UL << 18;
#else
static const scudo::uptr RegionSizeLog = 18U;
static const scudo::uptr GroupSizeLog = 18U;
typedef scudo::uptr CompactPtrT;
#endif
static const bool EnableCache = false;
static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
};

#if SCUDO_CAN_USE_PRIMARY64
template <typename Config>
using PrimaryT = scudo::SizeClassAllocator64<Config>;
#else
template <typename Config>
using PrimaryT = scudo::SizeClassAllocator32<Config>;
#endif

struct Secondary {
template <typename Config>
using CacheT = scudo::MapAllocatorNoCache<Config>;
};
template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
};

} // namespace scudo

#if SCUDO_FUCHSIA
Expand All @@ -219,7 +260,8 @@ struct TestConditionVariableConfig {
#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig) \
SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestNoCacheConfig)
#endif

#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
Expand Down
8 changes: 8 additions & 0 deletions compiler-rt/lib/scudo/standalone/type_traits.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,14 @@ template <typename T> struct isPointer<T *> {
static constexpr bool value = true;
};

template <bool Cond, typename L, typename R> struct Conditional {
using type = L;
};

template <typename L, typename R> struct Conditional<false, L, R> {
using type = R;
};

} // namespace scudo

#endif // SCUDO_TYPE_TRAITS_H_
Loading