@@ -184,14 +184,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
184
184
template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
185
185
class MapAllocatorCache {
186
186
public:
187
- typedef enum { COMMITTED = 0 , DECOMMITTED = 1 , NONE } EntryListT;
188
-
189
- // TODO: Refactor the intrusive list to support non-pointer link type
190
- typedef struct {
191
- u16 Head;
192
- u16 Tail;
193
- } ListInfo;
194
-
195
187
void getStats (ScopedString *Str) {
196
188
ScopedLock L (Mutex);
197
189
uptr Integral;
@@ -209,18 +201,13 @@ class MapAllocatorCache {
209
201
SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
210
202
Str->append (" Cache Entry Info (Most Recent -> Least Recent):\n " );
211
203
212
- auto printList = [&](EntryListT ListType) REQUIRES (Mutex) {
213
- for (u32 I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
214
- I = Entries[I].Next ) {
215
- CachedBlock &Entry = Entries[I];
216
- Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
217
- " BlockSize: %zu %s\n " ,
218
- Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
219
- Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
220
- }
221
- };
222
- printList (COMMITTED);
223
- printList (DECOMMITTED);
204
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
205
+ CachedBlock &Entry = Entries[I];
206
+ Str->append (" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
207
+ " BlockSize: %zu %s\n " ,
208
+ Entry.CommitBase , Entry.CommitBase + Entry.CommitSize ,
209
+ Entry.CommitSize , Entry.Time == 0 ? " [R]" : " " );
210
+ }
224
211
}
225
212
226
213
// Ensure the default maximum specified fits the array.
@@ -244,10 +231,8 @@ class MapAllocatorCache {
244
231
setOption (Option::ReleaseInterval, static_cast <sptr>(ReleaseToOsInterval));
245
232
246
233
// The cache is initially empty
247
- EntryLists[COMMITTED].Head = CachedBlock::InvalidEntry;
248
- EntryLists[COMMITTED].Tail = CachedBlock::InvalidEntry;
249
- EntryLists[DECOMMITTED].Head = CachedBlock::InvalidEntry;
250
- EntryLists[DECOMMITTED].Tail = CachedBlock::InvalidEntry;
234
+ LRUHead = CachedBlock::InvalidEntry;
235
+ LRUTail = CachedBlock::InvalidEntry;
251
236
252
237
// Available entries will be retrieved starting from the beginning of the
253
238
// Entries array
@@ -265,6 +250,7 @@ class MapAllocatorCache {
265
250
const s32 Interval = atomic_load_relaxed (&ReleaseToOsIntervalMs);
266
251
u64 Time;
267
252
CachedBlock Entry;
253
+
268
254
Entry.CommitBase = CommitBase;
269
255
Entry.CommitSize = CommitSize;
270
256
Entry.BlockBegin = BlockBegin;
@@ -326,27 +312,18 @@ class MapAllocatorCache {
326
312
Entry = PrevEntry;
327
313
}
328
314
329
- // All excess entries are evicted from the cache.
330
- // DECOMMITTED entries, being older than the COMMITTED
331
- // entries, are evicted first in least recently used (LRU)
332
- // fashioned followed by the COMMITTED entries
315
+ // All excess entries are evicted from the cache
333
316
while (needToEvict ()) {
334
- EntryListT EvictionListType;
335
- if (EntryLists[DECOMMITTED].Tail == CachedBlock::InvalidEntry)
336
- EvictionListType = COMMITTED;
337
- else
338
- EvictionListType = DECOMMITTED;
339
317
// Save MemMaps of evicted entries to perform unmap outside of lock
340
- EvictionMemMaps.push_back (
341
- Entries[EntryLists[EvictionListType].Tail ].MemMap );
342
- remove (EntryLists[EvictionListType].Tail , EvictionListType);
318
+ EvictionMemMaps.push_back (Entries[LRUTail].MemMap );
319
+ remove (LRUTail);
343
320
}
344
321
345
- insert (Entry, (Entry. Time == 0 ) ? DECOMMITTED : COMMITTED );
322
+ insert (Entry);
346
323
347
324
if (OldestTime == 0 )
348
325
OldestTime = Entry.Time ;
349
- } while (0 ); // ScopedLock L(Mutex);
326
+ } while (0 );
350
327
351
328
for (MemMapT &EvictMemMap : EvictionMemMaps)
352
329
unmapCallBack (EvictMemMap);
@@ -363,14 +340,17 @@ class MapAllocatorCache {
363
340
// 10% of the requested size proved to be the optimal choice for
364
341
// retrieving cached blocks after testing several options.
365
342
constexpr u32 FragmentedBytesDivisor = 10 ;
343
+ bool Found = false ;
366
344
CachedBlock Entry;
367
- uptr OptimalFitIndex = CachedBlock::InvalidEntry;
368
- uptr MinDiff = UINTPTR_MAX;
369
- EntryListT OptimalFitListType = NONE;
370
345
EntryHeaderPos = 0 ;
371
-
372
- auto FindAvailableEntry = [&](EntryListT ListType) REQUIRES (Mutex) {
373
- for (uptr I = EntryLists[ListType].Head ; I != CachedBlock::InvalidEntry;
346
+ {
347
+ ScopedLock L (Mutex);
348
+ CallsToRetrieve++;
349
+ if (EntriesCount == 0 )
350
+ return {};
351
+ u32 OptimalFitIndex = 0 ;
352
+ uptr MinDiff = UINTPTR_MAX;
353
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry;
374
354
I = Entries[I].Next ) {
375
355
const uptr CommitBase = Entries[I].CommitBase ;
376
356
const uptr CommitSize = Entries[I].CommitSize ;
@@ -380,48 +360,34 @@ class MapAllocatorCache {
380
360
if (HeaderPos > CommitBase + CommitSize)
381
361
continue ;
382
362
if (HeaderPos < CommitBase ||
383
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
363
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
384
364
continue ;
385
-
365
+ }
366
+ Found = true ;
386
367
const uptr Diff = HeaderPos - CommitBase;
387
- // immediately use a cached block if it's size is close enough to
388
- // the requested size.
368
+ // immediately use a cached block if it's size is close enough to the
369
+ // requested size.
389
370
const uptr MaxAllowedFragmentedBytes =
390
371
(CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
391
372
if (Diff <= MaxAllowedFragmentedBytes) {
392
373
OptimalFitIndex = I;
393
374
EntryHeaderPos = HeaderPos;
394
- OptimalFitListType = ListType;
395
- return true ;
375
+ break ;
396
376
}
397
-
398
377
// keep track of the smallest cached block
399
378
// that is greater than (AllocSize + HeaderSize)
400
379
if (Diff > MinDiff)
401
380
continue ;
402
381
OptimalFitIndex = I;
403
382
MinDiff = Diff;
404
- OptimalFitListType = ListType;
405
383
EntryHeaderPos = HeaderPos;
406
384
}
407
- return (OptimalFitIndex != CachedBlock::InvalidEntry);
408
- };
409
-
410
- {
411
- ScopedLock L (Mutex);
412
- CallsToRetrieve++;
413
- if (EntriesCount == 0 )
414
- return {};
415
-
416
- // Prioritize valid fit from COMMITTED entries over
417
- // optimal fit from DECOMMITTED entries
418
- if (!FindAvailableEntry (COMMITTED) && !FindAvailableEntry (DECOMMITTED))
419
- return {};
420
-
421
- Entry = Entries[OptimalFitIndex];
422
- remove (OptimalFitIndex, OptimalFitListType);
423
- SuccessfulRetrieves++;
424
- } // ScopedLock L(Mutex);
385
+ if (Found) {
386
+ Entry = Entries[OptimalFitIndex];
387
+ remove (OptimalFitIndex);
388
+ SuccessfulRetrieves++;
389
+ }
390
+ }
425
391
426
392
return Entry;
427
393
}
@@ -466,15 +432,10 @@ class MapAllocatorCache {
466
432
Quarantine[I].invalidate ();
467
433
}
468
434
}
469
- auto disableLists = [&](EntryListT EntryList) REQUIRES (Mutex) {
470
- for (u32 I = EntryLists[EntryList].Head ; I != CachedBlock::InvalidEntry;
471
- I = Entries[I].Next ) {
472
- Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
473
- Entries[I].CommitSize , 0 );
474
- }
475
- };
476
- disableLists (COMMITTED);
477
- disableLists (DECOMMITTED);
435
+ for (u32 I = LRUHead; I != CachedBlock::InvalidEntry; I = Entries[I].Next ) {
436
+ Entries[I].MemMap .setMemoryPermission (Entries[I].CommitBase ,
437
+ Entries[I].CommitSize , 0 );
438
+ }
478
439
QuarantinePos = -1U ;
479
440
}
480
441
@@ -489,7 +450,7 @@ class MapAllocatorCache {
489
450
return (EntriesCount >= atomic_load_relaxed (&MaxEntriesCount));
490
451
}
491
452
492
- void insert (const CachedBlock &Entry, EntryListT ListType ) REQUIRES(Mutex) {
453
+ void insert (const CachedBlock &Entry) REQUIRES(Mutex) {
493
454
DCHECK_LT (EntriesCount, atomic_load_relaxed (&MaxEntriesCount));
494
455
495
456
// Cache should be populated with valid entries when not empty
@@ -498,86 +459,66 @@ class MapAllocatorCache {
498
459
u32 FreeIndex = AvailableHead;
499
460
AvailableHead = Entries[AvailableHead].Next ;
500
461
462
+ if (EntriesCount == 0 ) {
463
+ LRUTail = static_cast <u16>(FreeIndex);
464
+ } else {
465
+ // Check list order
466
+ if (EntriesCount > 1 )
467
+ DCHECK_GE (Entries[LRUHead].Time , Entries[Entries[LRUHead].Next ].Time );
468
+ Entries[LRUHead].Prev = static_cast <u16>(FreeIndex);
469
+ }
470
+
501
471
Entries[FreeIndex] = Entry;
502
- pushFront (FreeIndex, ListType);
472
+ Entries[FreeIndex].Next = LRUHead;
473
+ Entries[FreeIndex].Prev = CachedBlock::InvalidEntry;
474
+ LRUHead = static_cast <u16>(FreeIndex);
503
475
EntriesCount++;
504
476
505
- if (Entries[EntryLists[ListType].Head ].Next != CachedBlock::InvalidEntry) {
506
- DCHECK_GE (Entries[EntryLists[ListType].Head ].Time ,
507
- Entries[Entries[EntryLists[ListType].Head ].Next ].Time );
508
- }
509
477
// Availability stack should not have available entries when all entries
510
478
// are in use
511
479
if (EntriesCount == Config::getEntriesArraySize ())
512
480
DCHECK_EQ (AvailableHead, CachedBlock::InvalidEntry);
513
481
}
514
482
515
- // Joins the entries adjacent to Entries[I], effectively
516
- // unlinking Entries[I] from the list
517
- void unlink (uptr I, EntryListT ListType) REQUIRES(Mutex) {
518
- if (I == EntryLists[ListType].Head )
519
- EntryLists[ListType].Head = Entries[I].Next ;
483
+ void remove (uptr I) REQUIRES(Mutex) {
484
+ DCHECK (Entries[I].isValid ());
485
+
486
+ Entries[I].invalidate ();
487
+
488
+ if (I == LRUHead)
489
+ LRUHead = Entries[I].Next ;
520
490
else
521
491
Entries[Entries[I].Prev ].Next = Entries[I].Next ;
522
492
523
- if (I == EntryLists[ListType]. Tail )
524
- EntryLists[ListType]. Tail = Entries[I].Prev ;
493
+ if (I == LRUTail )
494
+ LRUTail = Entries[I].Prev ;
525
495
else
526
496
Entries[Entries[I].Next ].Prev = Entries[I].Prev ;
527
- }
528
-
529
- // Invalidates Entries[I], removes Entries[I] from list, and pushes
530
- // Entries[I] onto the stack of available entries
531
- void remove (uptr I, EntryListT ListType) REQUIRES(Mutex) {
532
- DCHECK (Entries[I].isValid ());
533
-
534
- Entries[I].invalidate ();
535
497
536
- unlink (I, ListType);
537
498
Entries[I].Next = AvailableHead;
538
499
AvailableHead = static_cast <u16>(I);
539
500
EntriesCount--;
540
501
541
502
// Cache should not have valid entries when not empty
542
503
if (EntriesCount == 0 ) {
543
- DCHECK_EQ (EntryLists[COMMITTED].Head , CachedBlock::InvalidEntry);
544
- DCHECK_EQ (EntryLists[COMMITTED].Tail , CachedBlock::InvalidEntry);
545
- DCHECK_EQ (EntryLists[DECOMMITTED].Head , CachedBlock::InvalidEntry);
546
- DCHECK_EQ (EntryLists[DECOMMITTED].Tail , CachedBlock::InvalidEntry);
504
+ DCHECK_EQ (LRUHead, CachedBlock::InvalidEntry);
505
+ DCHECK_EQ (LRUTail, CachedBlock::InvalidEntry);
547
506
}
548
507
}
549
508
550
- inline void pushFront (uptr I, EntryListT ListType) REQUIRES(Mutex) {
551
- if (EntryLists[ListType].Tail == CachedBlock::InvalidEntry)
552
- EntryLists[ListType].Tail = static_cast <u16>(I);
553
- else
554
- Entries[EntryLists[ListType].Head ].Prev = static_cast <u16>(I);
555
-
556
- Entries[I].Next = EntryLists[ListType].Head ;
557
- Entries[I].Prev = CachedBlock::InvalidEntry;
558
- EntryLists[ListType].Head = static_cast <u16>(I);
559
- }
560
-
561
509
void empty () {
562
510
MemMapT MapInfo[Config::getEntriesArraySize ()];
563
511
uptr N = 0 ;
564
512
{
565
513
ScopedLock L (Mutex);
566
- auto emptyList = [&](EntryListT ListType) REQUIRES (Mutex) {
567
- for (uptr I = EntryLists[ListType].Head ;
568
- I != CachedBlock::InvalidEntry;) {
569
- uptr ToRemove = I;
570
- I = Entries[I].Next ;
571
- MapInfo[N] = Entries[ToRemove].MemMap ;
572
- remove (ToRemove, ListType);
573
- N++;
574
- }
575
- };
576
- emptyList (COMMITTED);
577
- emptyList (DECOMMITTED);
514
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++) {
515
+ if (!Entries[I].isValid ())
516
+ continue ;
517
+ MapInfo[N] = Entries[I].MemMap ;
518
+ remove (I);
519
+ N++;
520
+ }
578
521
EntriesCount = 0 ;
579
- for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
580
- DCHECK (!Entries[I].isValid ());
581
522
}
582
523
for (uptr I = 0 ; I < N; I++) {
583
524
MemMapT &MemMap = MapInfo[I];
@@ -604,14 +545,8 @@ class MapAllocatorCache {
604
545
OldestTime = 0 ;
605
546
for (uptr I = 0 ; I < Config::getQuarantineSize (); I++)
606
547
releaseIfOlderThan (Quarantine[I], Time);
607
- for (u16 I = EntryLists[COMMITTED].Head ; I != CachedBlock::InvalidEntry;
608
- I = Entries[I].Next ) {
609
- if (Entries[I].Time && Entries[I].Time <= Time) {
610
- unlink (I, COMMITTED);
611
- pushFront (I, DECOMMITTED);
612
- }
548
+ for (uptr I = 0 ; I < Config::getEntriesArraySize (); I++)
613
549
releaseIfOlderThan (Entries[I], Time);
614
- }
615
550
}
616
551
617
552
HybridMutex Mutex;
@@ -628,12 +563,10 @@ class MapAllocatorCache {
628
563
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
629
564
Quarantine GUARDED_BY (Mutex) = {};
630
565
631
- // EntryLists stores the head and tail indices of all
632
- // lists being used to store valid cache entries.
633
- // Currently there are lists storing COMMITTED and DECOMMITTED entries.
634
- // COMMITTED entries have memory chunks that have not been released to the OS
635
- // DECOMMITTED entries have memory chunks that have been released to the OS
636
- ListInfo EntryLists[2 ] GUARDED_BY(Mutex) = {};
566
+ // The LRUHead of the cache is the most recently used cache entry
567
+ u16 LRUHead GUARDED_BY (Mutex) = 0;
568
+ // The LRUTail of the cache is the least recently used cache entry
569
+ u16 LRUTail GUARDED_BY (Mutex) = 0;
637
570
// The AvailableHead is the top of the stack of available entries
638
571
u16 AvailableHead GUARDED_BY (Mutex) = 0;
639
572
};
@@ -773,7 +706,6 @@ MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
773
706
}
774
707
return Ptr ;
775
708
}
776
-
777
709
// As with the Primary, the size passed to this function includes any desired
778
710
// alignment, so that the frontend can align the user allocation. The hint
779
711
// parameter allows us to unmap spurious memory when dealing with larger
0 commit comments