Skip to content

Commit fdc4955

Browse files
authored
Revert "[scudo] Separated committed and decommitted entries." (#104045)
Reverts #101409 This caused some memory usage regressions and it has a known bug in page releasing.
1 parent 234cb4c commit fdc4955

File tree

1 file changed

+76
-144
lines changed

1 file changed

+76
-144
lines changed

compiler-rt/lib/scudo/standalone/secondary.h

Lines changed: 76 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -184,14 +184,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
184184
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185185
class MapAllocatorCache {
186186
public:
187-
typedef enum { COMMITTED = 0, DECOMMITTED = 1, NONE } EntryListT;
188-
189-
// TODO: Refactor the intrusive list to support non-pointer link type
190-
typedef struct {
191-
u16 Head;
192-
u16 Tail;
193-
} ListInfo;
194-
195187
void getStats(ScopedString *Str) {
196188
ScopedLock L(Mutex);
197189
uptr Integral;
@@ -209,18 +201,13 @@ class MapAllocatorCache {
209201
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
210202
Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
211203

212-
auto printList = [&](EntryListT ListType) REQUIRES(Mutex) {
213-
for (u32 I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
214-
I = Entries[I].Next) {
215-
CachedBlock &Entry = Entries[I];
216-
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
217-
"BlockSize: %zu %s\n",
218-
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
219-
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
220-
}
221-
};
222-
printList(COMMITTED);
223-
printList(DECOMMITTED);
204+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
205+
CachedBlock &Entry = Entries[I];
206+
Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
207+
"BlockSize: %zu %s\n",
208+
Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
209+
Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
210+
}
224211
}
225212

226213
// Ensure the default maximum specified fits the array.
@@ -244,10 +231,8 @@ class MapAllocatorCache {
244231
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
245232

246233
// The cache is initially empty
247-
EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
248-
EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
249-
EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
250-
EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
234+
LRUHead = CachedBlock::InvalidEntry;
235+
LRUTail = CachedBlock::InvalidEntry;
251236

252237
// Available entries will be retrieved starting from the beginning of the
253238
// Entries array
@@ -265,6 +250,7 @@ class MapAllocatorCache {
265250
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
266251
u64 Time;
267252
CachedBlock Entry;
253+
268254
Entry.CommitBase = CommitBase;
269255
Entry.CommitSize = CommitSize;
270256
Entry.BlockBegin = BlockBegin;
@@ -326,27 +312,18 @@ class MapAllocatorCache {
326312
Entry = PrevEntry;
327313
}
328314

329-
// All excess entries are evicted from the cache.
330-
// DECOMMITTED entries, being older than the COMMITTED
331-
// entries, are evicted first in least recently used (LRU)
332-
// fashioned followed by the COMMITTED entries
315+
// All excess entries are evicted from the cache
333316
while (needToEvict()) {
334-
EntryListT EvictionListType;
335-
if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
336-
EvictionListType = COMMITTED;
337-
else
338-
EvictionListType = DECOMMITTED;
339317
// Save MemMaps of evicted entries to perform unmap outside of lock
340-
EvictionMemMaps.push_back(
341-
Entries[EntryLists[EvictionListType].Tail].MemMap);
342-
remove(EntryLists[EvictionListType].Tail, EvictionListType);
318+
EvictionMemMaps.push_back(Entries[LRUTail].MemMap);
319+
remove(LRUTail);
343320
}
344321

345-
insert(Entry, (Entry.Time == 0) ? DECOMMITTED : COMMITTED);
322+
insert(Entry);
346323

347324
if (OldestTime == 0)
348325
OldestTime = Entry.Time;
349-
} while (0); // ScopedLock L(Mutex);
326+
} while (0);
350327

351328
for (MemMapT &EvictMemMap : EvictionMemMaps)
352329
unmapCallBack(EvictMemMap);
@@ -363,14 +340,17 @@ class MapAllocatorCache {
363340
// 10% of the requested size proved to be the optimal choice for
364341
// retrieving cached blocks after testing several options.
365342
constexpr u32 FragmentedBytesDivisor = 10;
343+
bool Found = false;
366344
CachedBlock Entry;
367-
uptr OptimalFitIndex = CachedBlock::InvalidEntry;
368-
uptr MinDiff = UINTPTR_MAX;
369-
EntryListT OptimalFitListType = NONE;
370345
EntryHeaderPos = 0;
371-
372-
auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES(Mutex) {
373-
for (uptr I = EntryLists[ListType].Head; I != CachedBlock::InvalidEntry;
346+
{
347+
ScopedLock L(Mutex);
348+
CallsToRetrieve++;
349+
if (EntriesCount == 0)
350+
return {};
351+
u32 OptimalFitIndex = 0;
352+
uptr MinDiff = UINTPTR_MAX;
353+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
374354
I = Entries[I].Next) {
375355
const uptr CommitBase = Entries[I].CommitBase;
376356
const uptr CommitSize = Entries[I].CommitSize;
@@ -380,48 +360,34 @@ class MapAllocatorCache {
380360
if (HeaderPos > CommitBase + CommitSize)
381361
continue;
382362
if (HeaderPos < CommitBase ||
383-
AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
363+
AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
384364
continue;
385-
365+
}
366+
Found = true;
386367
const uptr Diff = HeaderPos - CommitBase;
387-
// immediately use a cached block if it's size is close enough to
388-
// the requested size.
368+
// immediately use a cached block if it's size is close enough to the
369+
// requested size.
389370
const uptr MaxAllowedFragmentedBytes =
390371
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
391372
if (Diff <= MaxAllowedFragmentedBytes) {
392373
OptimalFitIndex = I;
393374
EntryHeaderPos = HeaderPos;
394-
OptimalFitListType = ListType;
395-
return true;
375+
break;
396376
}
397-
398377
// keep track of the smallest cached block
399378
// that is greater than (AllocSize + HeaderSize)
400379
if (Diff > MinDiff)
401380
continue;
402381
OptimalFitIndex = I;
403382
MinDiff = Diff;
404-
OptimalFitListType = ListType;
405383
EntryHeaderPos = HeaderPos;
406384
}
407-
return (OptimalFitIndex != CachedBlock::InvalidEntry);
408-
};
409-
410-
{
411-
ScopedLock L(Mutex);
412-
CallsToRetrieve++;
413-
if (EntriesCount == 0)
414-
return {};
415-
416-
// Prioritize valid fit from COMMITTED entries over
417-
// optimal fit from DECOMMITTED entries
418-
if (!FindAvailableEntry(COMMITTED) && !FindAvailableEntry(DECOMMITTED))
419-
return {};
420-
421-
Entry = Entries[OptimalFitIndex];
422-
remove(OptimalFitIndex, OptimalFitListType);
423-
SuccessfulRetrieves++;
424-
} // ScopedLock L(Mutex);
385+
if (Found) {
386+
Entry = Entries[OptimalFitIndex];
387+
remove(OptimalFitIndex);
388+
SuccessfulRetrieves++;
389+
}
390+
}
425391

426392
return Entry;
427393
}
@@ -466,15 +432,10 @@ class MapAllocatorCache {
466432
Quarantine[I].invalidate();
467433
}
468434
}
469-
auto disableLists = [&](EntryListT EntryList) REQUIRES(Mutex) {
470-
for (u32 I = EntryLists[EntryList].Head; I != CachedBlock::InvalidEntry;
471-
I = Entries[I].Next) {
472-
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
473-
Entries[I].CommitSize, 0);
474-
}
475-
};
476-
disableLists(COMMITTED);
477-
disableLists(DECOMMITTED);
435+
for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next) {
436+
Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
437+
Entries[I].CommitSize, 0);
438+
}
478439
QuarantinePos = -1U;
479440
}
480441

@@ -489,7 +450,7 @@ class MapAllocatorCache {
489450
return (EntriesCount >= atomic_load_relaxed(&MaxEntriesCount));
490451
}
491452

492-
void insert(const CachedBlock &Entry, EntryListT ListType) REQUIRES(Mutex) {
453+
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
493454
DCHECK_LT(EntriesCount, atomic_load_relaxed(&MaxEntriesCount));
494455

495456
// Cache should be populated with valid entries when not empty
@@ -498,86 +459,66 @@ class MapAllocatorCache {
498459
u32 FreeIndex = AvailableHead;
499460
AvailableHead = Entries[AvailableHead].Next;
500461

462+
if (EntriesCount == 0) {
463+
LRUTail = static_cast<u16>(FreeIndex);
464+
} else {
465+
// Check list order
466+
if (EntriesCount > 1)
467+
DCHECK_GE(Entries[LRUHead].Time, Entries[Entries[LRUHead].Next].Time);
468+
Entries[LRUHead].Prev = static_cast<u16>(FreeIndex);
469+
}
470+
501471
Entries[FreeIndex] = Entry;
502-
pushFront(FreeIndex, ListType);
472+
Entries[FreeIndex].Next = LRUHead;
473+
Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
474+
LRUHead = static_cast<u16>(FreeIndex);
503475
EntriesCount++;
504476

505-
if (Entries[EntryLists[ListType].Head].Next != CachedBlock::InvalidEntry) {
506-
DCHECK_GE(Entries[EntryLists[ListType].Head].Time,
507-
Entries[Entries[EntryLists[ListType].Head].Next].Time);
508-
}
509477
// Availability stack should not have available entries when all entries
510478
// are in use
511479
if (EntriesCount == Config::getEntriesArraySize())
512480
DCHECK_EQ(AvailableHead, CachedBlock::InvalidEntry);
513481
}
514482

515-
// Joins the entries adjacent to Entries[I], effectively
516-
// unlinking Entries[I] from the list
517-
void unlink(uptr I, EntryListT ListType) REQUIRES(Mutex) {
518-
if (I == EntryLists[ListType].Head)
519-
EntryLists[ListType].Head = Entries[I].Next;
483+
void remove(uptr I) REQUIRES(Mutex) {
484+
DCHECK(Entries[I].isValid());
485+
486+
Entries[I].invalidate();
487+
488+
if (I == LRUHead)
489+
LRUHead = Entries[I].Next;
520490
else
521491
Entries[Entries[I].Prev].Next = Entries[I].Next;
522492

523-
if (I == EntryLists[ListType].Tail)
524-
EntryLists[ListType].Tail = Entries[I].Prev;
493+
if (I == LRUTail)
494+
LRUTail = Entries[I].Prev;
525495
else
526496
Entries[Entries[I].Next].Prev = Entries[I].Prev;
527-
}
528-
529-
// Invalidates Entries[I], removes Entries[I] from list, and pushes
530-
// Entries[I] onto the stack of available entries
531-
void remove(uptr I, EntryListT ListType) REQUIRES(Mutex) {
532-
DCHECK(Entries[I].isValid());
533-
534-
Entries[I].invalidate();
535497

536-
unlink(I, ListType);
537498
Entries[I].Next = AvailableHead;
538499
AvailableHead = static_cast<u16>(I);
539500
EntriesCount--;
540501

541502
// Cache should not have valid entries when not empty
542503
if (EntriesCount == 0) {
543-
DCHECK_EQ(EntryLists[COMMITTED].Head, CachedBlock::InvalidEntry);
544-
DCHECK_EQ(EntryLists[COMMITTED].Tail, CachedBlock::InvalidEntry);
545-
DCHECK_EQ(EntryLists[DECOMMITTED].Head, CachedBlock::InvalidEntry);
546-
DCHECK_EQ(EntryLists[DECOMMITTED].Tail, CachedBlock::InvalidEntry);
504+
DCHECK_EQ(LRUHead, CachedBlock::InvalidEntry);
505+
DCHECK_EQ(LRUTail, CachedBlock::InvalidEntry);
547506
}
548507
}
549508

550-
inline void pushFront(uptr I, EntryListT ListType) REQUIRES(Mutex) {
551-
if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
552-
EntryLists[ListType].Tail = static_cast<u16>(I);
553-
else
554-
Entries[EntryLists[ListType].Head].Prev = static_cast<u16>(I);
555-
556-
Entries[I].Next = EntryLists[ListType].Head;
557-
Entries[I].Prev = CachedBlock::InvalidEntry;
558-
EntryLists[ListType].Head = static_cast<u16>(I);
559-
}
560-
561509
void empty() {
562510
MemMapT MapInfo[Config::getEntriesArraySize()];
563511
uptr N = 0;
564512
{
565513
ScopedLock L(Mutex);
566-
auto emptyList = [&](EntryListT ListType) REQUIRES(Mutex) {
567-
for (uptr I = EntryLists[ListType].Head;
568-
I != CachedBlock::InvalidEntry;) {
569-
uptr ToRemove = I;
570-
I = Entries[I].Next;
571-
MapInfo[N] = Entries[ToRemove].MemMap;
572-
remove(ToRemove, ListType);
573-
N++;
574-
}
575-
};
576-
emptyList(COMMITTED);
577-
emptyList(DECOMMITTED);
514+
for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
515+
if (!Entries[I].isValid())
516+
continue;
517+
MapInfo[N] = Entries[I].MemMap;
518+
remove(I);
519+
N++;
520+
}
578521
EntriesCount = 0;
579-
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
580-
DCHECK(!Entries[I].isValid());
581522
}
582523
for (uptr I = 0; I < N; I++) {
583524
MemMapT &MemMap = MapInfo[I];
@@ -604,14 +545,8 @@ class MapAllocatorCache {
604545
OldestTime = 0;
605546
for (uptr I = 0; I < Config::getQuarantineSize(); I++)
606547
releaseIfOlderThan(Quarantine[I], Time);
607-
for (u16 I = EntryLists[COMMITTED].Head; I != CachedBlock::InvalidEntry;
608-
I = Entries[I].Next) {
609-
if (Entries[I].Time && Entries[I].Time <= Time) {
610-
unlink(I, COMMITTED);
611-
pushFront(I, DECOMMITTED);
612-
}
548+
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
613549
releaseIfOlderThan(Entries[I], Time);
614-
}
615550
}
616551

617552
HybridMutex Mutex;
@@ -628,12 +563,10 @@ class MapAllocatorCache {
628563
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
629564
Quarantine GUARDED_BY(Mutex) = {};
630565

631-
// EntryLists stores the head and tail indices of all
632-
// lists being used to store valid cache entries.
633-
// Currently there are lists storing COMMITTED and DECOMMITTED entries.
634-
// COMMITTED entries have memory chunks that have not been released to the OS
635-
// DECOMMITTED entries have memory chunks that have been released to the OS
636-
ListInfo EntryLists[2] GUARDED_BY(Mutex) = {};
566+
// The LRUHead of the cache is the most recently used cache entry
567+
u16 LRUHead GUARDED_BY(Mutex) = 0;
568+
// The LRUTail of the cache is the least recently used cache entry
569+
u16 LRUTail GUARDED_BY(Mutex) = 0;
637570
// The AvailableHead is the top of the stack of available entries
638571
u16 AvailableHead GUARDED_BY(Mutex) = 0;
639572
};
@@ -773,7 +706,6 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
773706
}
774707
return Ptr;
775708
}
776-
777709
// As with the Primary, the size passed to this function includes any desired
778710
// alignment, so that the frontend can align the user allocation. The hint
779711
// parameter allows us to unmap spurious memory when dealing with larger

0 commit comments

Comments
 (0)