@@ -1303,19 +1303,6 @@ size_t CacheAllocator<CacheTrait>::wakeUpWaitersLocked(folly::StringPiece key,
1303
1303
template <typename CacheTrait>
1304
1304
bool CacheAllocator<CacheTrait>::moveRegularItemWithSync(
1305
1305
Item& oldItem, WriteHandle& newItemHdl) {
1306
- // on function exit - the new item handle is no longer moving
1307
- // and other threads may access it - but in case where
1308
- // we failed to replace in access container we can give the
1309
- // new item back to the allocator
1310
- auto guard = folly::makeGuard ([&]() {
1311
- auto ref = newItemHdl->unmarkMoving ();
1312
- if (UNLIKELY (ref == 0 )) {
1313
- const auto res =
1314
- releaseBackToAllocator (*newItemHdl, RemoveContext::kNormal , false );
1315
- XDCHECK (res == ReleaseRes::kReleased );
1316
- }
1317
- });
1318
-
1319
1306
XDCHECK (oldItem.isMoving ());
1320
1307
XDCHECK (!oldItem.isExpired ());
1321
1308
// TODO: should we introduce new latency tracker. E.g. evictRegularLatency_
@@ -1330,20 +1317,26 @@ bool CacheAllocator<CacheTrait>::moveRegularItemWithSync(
1330
1317
newItemHdl->markNvmClean ();
1331
1318
}
1332
1319
1333
- // mark new item as moving to block readers until the data is copied
1334
- // (moveCb is called). Mark item in MMContainer temporarily (TODO: should
1335
- // we remove markMoving requirement for the item to be linked?)
1336
- newItemHdl->markInMMContainer ();
1337
- auto marked = newItemHdl->markMoving (false /* there is already a handle */ );
1338
- newItemHdl->unmarkInMMContainer ();
1339
- XDCHECK (marked);
1340
-
1341
1320
auto predicate = [&](const Item& item){
1342
1321
// we rely on moving flag being set (it should block all readers)
1343
1322
XDCHECK (item.getRefCount () == 0 );
1344
1323
return true ;
1345
1324
};
1346
1325
1326
+ if (config_.moveCb ) {
1327
+ // Execute the move callback. We cannot make any guarantees about the
1328
+ // consistency of the old item beyond this point, because the callback can
1329
+ // do more than a simple memcpy() e.g. update external references. If there
1330
+ // are any remaining handles to the old item, it is the caller's
1331
+ // responsibility to invalidate them. The move can only fail after this
1332
+ // statement if the old item has been removed or replaced, in which case it
1333
+ // should be fine for it to be left in an inconsistent state.
1334
+ config_.moveCb (oldItem, *newItemHdl, nullptr );
1335
+ } else {
1336
+ std::memcpy (newItemHdl->getMemory (), oldItem.getMemory (),
1337
+ oldItem.getSize ());
1338
+ }
1339
+
1347
1340
auto replaced = accessContainer_->replaceIf (oldItem, *newItemHdl,
1348
1341
predicate);
1349
1342
// another thread may have called insertOrReplace which could have
@@ -1363,26 +1356,13 @@ bool CacheAllocator<CacheTrait>::moveRegularItemWithSync(
1363
1356
// 3. replaced handle is returned and eventually drops
1364
1357
// ref to 0 and the item is recycled back to allocator.
1365
1358
1366
- if (config_.moveCb ) {
1367
- // Execute the move callback. We cannot make any guarantees about the
1368
- // consistency of the old item beyond this point, because the callback can
1369
- // do more than a simple memcpy() e.g. update external references. If there
1370
- // are any remaining handles to the old item, it is the caller's
1371
- // responsibility to invalidate them. The move can only fail after this
1372
- // statement if the old item has been removed or replaced, in which case it
1373
- // should be fine for it to be left in an inconsistent state.
1374
- config_.moveCb (oldItem, *newItemHdl, nullptr );
1375
- } else {
1376
- std::memcpy (newItemHdl->getMemory (), oldItem.getMemory (),
1377
- oldItem.getSize ());
1378
- }
1379
-
1380
1359
// Adding the item to mmContainer has to succeed since no one can remove the item
1381
1360
auto & newContainer = getMMContainer (*newItemHdl);
1382
1361
auto mmContainerAdded = newContainer.add (*newItemHdl);
1383
1362
XDCHECK (mmContainerAdded);
1384
1363
1385
1364
// no one can add or remove chained items at this point
1365
+ // TODO: is this race? new Item is already accessible but not yet marked with hasChainedItem()
1386
1366
if (oldItem.hasChainedItem ()) {
1387
1367
// safe to acquire handle for a moving Item
1388
1368
auto incRes = incRef (oldItem, false );
@@ -1612,7 +1592,7 @@ CacheAllocator<CacheTrait>::findEviction(TierId tid, PoolId pid, ClassId cid) {
1612
1592
if (lastTier && shouldWriteToNvmCache (*candidate_) && !token.isValid ()) {
1613
1593
stats_.evictFailConcurrentFill .inc ();
1614
1594
} else if ( (lastTier && candidate_->markForEviction ()) ||
1615
- (!lastTier && candidate_->markMoving (true )) ) {
1595
+ (!lastTier && candidate_->markMoving ()) ) {
1616
1596
XDCHECK (candidate_->isMoving () || candidate_->isMarkedForEviction ());
1617
1597
// markForEviction to make sure no other thead is evicting the item
1618
1598
// nor holding a handle to that item if this is last tier
@@ -3499,10 +3479,7 @@ bool CacheAllocator<CacheTrait>::markMovingForSlabRelease(
3499
3479
// Since this callback is executed, the item is not yet freed
3500
3480
itemFreed = false ;
3501
3481
Item* item = static_cast <Item*>(memory);
3502
- // TODO: for chained items, moving bit is only used to avoid
3503
- // freeing the item prematurely
3504
- auto failIfRefNotZero = numTiers > 1 && !item->isChainedItem ();
3505
- if (item->markMoving (failIfRefNotZero)) {
3482
+ if (item->markMoving ()) {
3506
3483
markedMoving = true ;
3507
3484
}
3508
3485
};
0 commit comments