@@ -180,6 +180,14 @@ template <typename T> class NonZeroLengthArray<T, 0> {
180
180
181
181
template <typename Config> class MapAllocatorCache {
182
182
public:
183
+ typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
184
+
185
+ // TODO: Refactor the intrusive list to support non-pointer link type
186
+ typedef struct {
187
+ u16 Head;
188
+ u16 Tail;
189
+ } ListInfo;
190
+
183
191
void getStats (ScopedString *Str) {
184
192
ScopedLock L (Mutex);
185
193
uptr Integral;
@@ -197,13 +205,18 @@ template <typename Config> class MapAllocatorCache {
197
205
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
198
206
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
199
207
200
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
201
- CachedBlock &Entry = Entries[I];
202
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
203
- " BlockSize: %zu %s\n " ,
204
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
205
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
206
- }
208
+ auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
209
+ for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
210
+ I = Entries[I].Next ) {
211
+ CachedBlock &Entry = Entries[I];
212
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
213
+ " BlockSize: %zu %s\n " ,
214
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
215
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
216
+ }
217
+ };
218
+ printList (COMMITTED);
219
+ printList (DECOMMITTED);
207
220
}
208
221
209
222
// Ensure the default maximum specified fits the array.
@@ -227,8 +240,10 @@ template <typename Config> class MapAllocatorCache {
227
240
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
228
241
229
242
// The cache is initially empty
230
- LRUHead = CachedBlock::InvalidEntry;
231
- LRUTail = CachedBlock::InvalidEntry;
243
+ EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
244
+ EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
245
+ EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
246
+ EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
232
247
233
248
// Available entries will be retrieved starting from the beginning of the
234
249
// Entries array
@@ -309,16 +324,22 @@ template <typename Config> class MapAllocatorCache {
309
324
310
325
// All excess entries are evicted from the cache
311
326
while (needToEvict ()) {
327
+ EntryListT EvictionListType;
328
+ if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
329
+ EvictionListType = COMMITTED;
330
+ else
331
+ EvictionListType = DECOMMITTED;
312
332
// Save MemMaps of evicted entries to perform unmap outside of lock
313
- EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
314
- remove (LRUTail);
333
+ EvictionMemMaps.push_back (
334
+ Entries[EntryLists[EvictionListType].Tail ].MemMap );
335
+ remove (EntryLists[EvictionListType].Tail , EvictionListType);
315
336
}
316
337
317
- insert (Entry);
338
+ insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
318
339
319
340
if (OldestTime == 0 )
320
341
OldestTime = Entry.Time ;
321
- } while (0 );
342
+ } while (0 ); // ScopedLock L(Mutex);
322
343
323
344
for (MemMapT &EvictMemMap : EvictionMemMaps)
324
345
EvictMemMap.unmap (EvictMemMap.getBase (), EvictMemMap.getCapacity ());
@@ -335,17 +356,14 @@ template <typename Config> class MapAllocatorCache {
335
356
// 10% of the requested size proved to be the optimal choice for
336
357
// retrieving cached blocks after testing several options.
337
358
constexpr u32 FragmentedBytesDivisor = 10 ;
338
- bool Found = false ;
339
359
CachedBlock Entry;
340
360
uptr EntryHeaderPos = 0 ;
341
- {
342
- ScopedLock L (Mutex);
343
- CallsToRetrieve++;
344
- if (EntriesCount == 0 )
345
- return false ;
346
- u32 OptimalFitIndex = 0 ;
347
- uptr MinDiff = UINTPTR_MAX;
348
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
361
+ uptr OptimalFitIndex = CachedBlock::InvalidEntry;
362
+ uptr MinDiff = UINTPTR_MAX;
363
+ EntryListT OptimalFitListType = NONE;
364
+
365
+ auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
366
+ for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
349
367
I = Entries[I].Next ) {
350
368
const uptr CommitBase = Entries[I].CommitBase ;
351
369
const uptr CommitSize = Entries[I].CommitSize ;
@@ -355,36 +373,48 @@ template <typename Config> class MapAllocatorCache {
355
373
if (HeaderPos > CommitBase + CommitSize)
356
374
continue ;
357
375
if (HeaderPos < CommitBase ||
358
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
376
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
359
377
continue ;
360
- }
361
- Found = true ;
378
+
362
379
const uptr Diff = HeaderPos - CommitBase;
363
- // immediately use a cached block if it's size is close enough to the
364
- // requested size.
380
+ // immediately use a cached block if it's size is close enough to
381
+ // the requested size.
365
382
const uptr MaxAllowedFragmentedBytes =
366
383
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
367
384
if (Diff <= MaxAllowedFragmentedBytes) {
368
385
OptimalFitIndex = I;
369
386
EntryHeaderPos = HeaderPos;
370
- break ;
387
+ OptimalFitListType = ListType;
388
+ return true ;
371
389
}
390
+
372
391
// keep track of the smallest cached block
373
392
// that is greater than (AllocSize + HeaderSize)
374
393
if (Diff > MinDiff)
375
394
continue ;
376
395
OptimalFitIndex = I;
377
396
MinDiff = Diff;
397
+ OptimalFitListType = ListType;
378
398
EntryHeaderPos = HeaderPos;
379
399
}
380
- if (Found) {
381
- Entry = Entries[OptimalFitIndex];
382
- remove (OptimalFitIndex);
383
- SuccessfulRetrieves++;
384
- }
385
- }
386
- if (!Found)
387
- return false ;
400
+ return (OptimalFitIndex != CachedBlock::InvalidEntry);
401
+ };
402
+
403
+ {
404
+ ScopedLock L (Mutex);
405
+ CallsToRetrieve++;
406
+ if (EntriesCount == 0 )
407
+ return false ;
408
+
409
+ // Prioritize valid fit from COMMITTED entries over
410
+ // optimal fit from DECOMMITTED entries
411
+ if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
412
+ return false ;
413
+
414
+ Entry = Entries[OptimalFitIndex];
415
+ remove (OptimalFitIndex, OptimalFitListType);
416
+ SuccessfulRetrieves++;
417
+ } // ScopedLock L(Mutex);
388
418
389
419
*H = reinterpret_cast <LargeBlock::Header *>(
390
420
LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
@@ -448,10 +478,15 @@ template <typename Config> class MapAllocatorCache {
448
478
Quarantine[I].invalidate ();
449
479
}
450
480
}
451
- for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
452
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
453
- Entries[I].CommitSize , 0 );
454
- }
481
+ auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
482
+ for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
483
+ I = Entries[I].Next ) {
484
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
485
+ Entries[I].CommitSize , 0 );
486
+ }
487
+ };
488
+ disableLists (COMMITTED);
489
+ disableLists (DECOMMITTED);
455
490
QuarantinePos = -1U ;
456
491
}
457
492
@@ -466,7 +501,7 @@ template <typename Config> class MapAllocatorCache {
466
501
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
467
502
}
468
503
469
- void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
504
+ void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
470
505
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
471
506
472
507
// Cache should be populated with valid entries when not empty
@@ -475,66 +510,86 @@ template <typename Config> class MapAllocatorCache {
475
510
u32 FreeIndex = AvailableHead;
476
511
AvailableHead = Entries[AvailableHead].Next ;
477
512
478
- if (EntriesCount == 0 ) {
479
- LRUTail = static_cast <u16>(FreeIndex);
480
- } else {
481
- // Check list order
482
- if (EntriesCount > 1 )
483
- DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
484
- Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
485
- }
486
-
487
513
Entries[FreeIndex] = Entry;
488
- Entries[FreeIndex].Next = LRUHead;
489
- Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
490
- LRUHead = static_cast <u16>(FreeIndex);
514
+ pushFront (FreeIndex, ListType);
491
515
EntriesCount++;
492
516
517
+ if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
518
+ DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
519
+ Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
520
+ }
493
521
// Availability stack should not have available entries when all entries
494
522
// are in use
495
523
if (EntriesCount == Config::getEntriesArraySize ())
496
524
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
497
525
}
498
526
499
- void remove (uptr I) REQUIRES(Mutex) {
500
- DCHECK (Entries[I].isValid ());
501
-
502
- Entries[I].invalidate ();
503
-
504
- if (I == LRUHead)
505
- LRUHead = Entries[I].Next ;
527
+ // Joins the entries adjacent to Entries[I], effectively
528
+ // unlinking Entries[I] from the list
529
+ void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
530
+ if (I == EntryLists[ListType].Head )
531
+ EntryLists[ListType].Head = Entries[I].Next ;
506
532
else
507
533
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
508
534
509
- if (I == LRUTail )
510
- LRUTail = Entries[I].Prev ;
535
+ if (I == EntryLists[ListType]. Tail )
536
+ EntryLists[ListType]. Tail = Entries[I].Prev ;
511
537
else
512
538
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
539
+ }
513
540
541
+ // Invalidates Entries[I], removes Entries[I] from list, and pushes
542
+ // Entries[I] onto the stack of available entries
543
+ void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
544
+ DCHECK (Entries[I].isValid ());
545
+
546
+ Entries[I].invalidate ();
547
+
548
+ unlink (I, ListType);
514
549
Entries[I].Next = AvailableHead;
515
550
AvailableHead = static_cast <u16>(I);
516
551
EntriesCount--;
517
552
518
553
// Cache should not have valid entries when not empty
519
554
if (EntriesCount == 0 ) {
520
- DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
521
- DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
555
+ DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
556
+ DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
557
+ DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
558
+ DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
522
559
}
523
560
}
524
561
562
+ inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
563
+ if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
564
+ EntryLists[ListType].Tail = static_cast <u16>(I);
565
+ else
566
+ Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
567
+
568
+ Entries[I].Next = EntryLists[ListType].Head ;
569
+ Entries[I].Prev = CachedBlock::InvalidEntry;
570
+ EntryLists[ListType].Head = static_cast <u16>(I);
571
+ }
572
+
525
573
void empty () {
526
574
MemMapT MapInfo[Config::getEntriesArraySize ()];
527
575
uptr N = 0 ;
528
576
{
529
577
ScopedLock L (Mutex);
530
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
531
- if (!Entries[I].isValid ())
532
- continue ;
533
- MapInfo[N] = Entries[I].MemMap ;
534
- remove (I);
535
- N++;
536
- }
578
+ auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
579
+ for (uptr I = EntryLists[ListType].Head ;
580
+ I != CachedBlock::InvalidEntry;) {
581
+ uptr ToRemove = I;
582
+ I = Entries[I].Next ;
583
+ MapInfo[N] = Entries[ToRemove].MemMap ;
584
+ remove (ToRemove, ListType);
585
+ N++;
586
+ }
587
+ };
588
+ emptyList (COMMITTED);
589
+ emptyList (DECOMMITTED);
537
590
EntriesCount = 0 ;
591
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
592
+ DCHECK (!Entries[I].isValid ());
538
593
}
539
594
for (uptr I = 0 ; I < N; I++) {
540
595
MemMapT &MemMap = MapInfo[I];
@@ -561,8 +616,14 @@ template <typename Config> class MapAllocatorCache {
561
616
OldestTime = 0 ;
562
617
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
563
618
releaseIfOlderThan (Quarantine[I], Time);
564
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
619
+ for (u16 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
620
+ I = Entries[I].Next ) {
621
+ if (Entries[I].Time && Entries[I].Time <= Time) {
622
+ unlink (I, COMMITTED);
623
+ pushFront (I, DECOMMITTED);
624
+ }
565
625
releaseIfOlderThan (Entries[I], Time);
626
+ }
566
627
}
567
628
568
629
HybridMutex Mutex;
@@ -579,10 +640,12 @@ template <typename Config> class MapAllocatorCache {
579
640
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
580
641
Quarantine GUARDED_BY (Mutex) = {};
581
642
582
- // The LRUHead of the cache is the most recently used cache entry
583
- u16 LRUHead GUARDED_BY (Mutex) = 0;
584
- // The LRUTail of the cache is the least recently used cache entry
585
- u16 LRUTail GUARDED_BY (Mutex) = 0;
643
+ // EntryLists stores the head and tail indices of all
644
+ // lists being used to store valid cache entries.
645
+ // Currently there are lists storing COMMITTED and DECOMMITTED entries.
646
+ // COMMITTED entries have memory chunks that have not been released to the OS
647
+ // DECOMMITTED entries have memory chunks that have been released to the OS
648
+ ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
586
649
// The AvailableHead is the top of the stack of available entries
587
650
u16 AvailableHead GUARDED_BY (Mutex) = 0;
588
651
};
0 commit comments