Skip to content

Commit a30f2ac

Browse files
committed
Block readers when item is moving
This simplifies moving code a lot. We don't need to worry about any failure other than allocation failure since item is protected from any readers (and by extension, removes).
1 parent eaf1f35 commit a30f2ac

File tree

8 files changed

+168
-184
lines changed

8 files changed

+168
-184
lines changed

cachelib/allocator/CacheAllocator-inl.h

Lines changed: 132 additions & 118 deletions
Large diffs are not rendered by default.

cachelib/allocator/CacheAllocator.h

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1389,7 +1389,7 @@ class CacheAllocator : public CacheBase {
13891389
double slabsApproxFreePercentage(TierId tid) const;
13901390

13911391
// wrapper around Item's refcount and active handle tracking
1392-
FOLLY_ALWAYS_INLINE bool incRef(Item& it);
1392+
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(Item& it, bool failIfMoving);
13931393
FOLLY_ALWAYS_INLINE RefcountWithFlags::Value decRef(Item& it);
13941394

13951395
// drops the refcount and if needed, frees the allocation back to the memory
@@ -1622,8 +1622,7 @@ class CacheAllocator : public CacheBase {
16221622
//
16231623
// @return true If the move was completed, and the containers were updated
16241624
// successfully.
1625-
template <typename P>
1626-
bool moveRegularItemWithSync(Item& oldItem, WriteHandle& newItemHdl, P&& predicate);
1625+
void moveRegularItemWithSync(Item& oldItem, WriteHandle& newItemHdl);
16271626

16281627
// Moves a regular item to a different slab. This should only be used during
16291628
// slab release after the item's exclusive bit has been set. The user supplied
@@ -1792,19 +1791,19 @@ class CacheAllocator : public CacheBase {
17921791
//
17931792
// @return valid handle to the item. This will be the last
17941793
// handle to the item. On failure an empty handle.
1795-
bool tryEvictToNextMemoryTier(TierId tid, PoolId pid, Item& item, bool fromBgThread);
1794+
WriteHandle tryEvictToNextMemoryTier(TierId tid, PoolId pid, Item& item, bool fromBgThread);
17961795

1797-
bool tryPromoteToNextMemoryTier(TierId tid, PoolId pid, Item& item, bool fromBgThread);
1796+
WriteHandle tryPromoteToNextMemoryTier(TierId tid, PoolId pid, Item& item, bool fromBgThread);
17981797

1799-
bool tryPromoteToNextMemoryTier(Item& item, bool fromBgThread);
1798+
WriteHandle tryPromoteToNextMemoryTier(Item& item, bool fromBgThread);
18001799

18011800
// Try to move the item down to the next memory tier
18021801
//
18031802
// @param item the item to evict
18041803
//
18051804
// @return valid handle to the item. This will be the last
18061805
// handle to the item. On failure an empty handle.
1807-
bool tryEvictToNextMemoryTier(Item& item, bool fromBgThread);
1806+
WriteHandle tryEvictToNextMemoryTier(Item& item, bool fromBgThread);
18081807

18091808
size_t memoryTierSize(TierId tid) const;
18101809

@@ -2235,8 +2234,9 @@ class CacheAllocator : public CacheBase {
22352234
return memoryTierConfigs.size();
22362235
}
22372236

2238-
bool addWaitContextForMovingItem(
2239-
folly::StringPiece key, std::shared_ptr<WaitContext<ReadHandle>> waiter);
2237+
WriteHandle handleWithWaitContextForMovingItem(Item& item);
2238+
2239+
size_t wakeUpWaiters(folly::StringPiece key, WriteHandle&& handle);
22402240

22412241
class MoveCtx {
22422242
public:
@@ -2261,6 +2261,8 @@ class CacheAllocator : public CacheBase {
22612261
waiters.push_back(std::move(waiter));
22622262
}
22632263

2264+
size_t numWaiters() const { return waiters.size(); }
2265+
22642266
private:
22652267
// notify all pending waiters that are waiting for the fetch.
22662268
void wakeUpWaiters() {

cachelib/allocator/CacheItem-inl.h

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -237,8 +237,8 @@ bool CacheItem<CacheTrait>::markExclusiveWhenMoving() {
237237
}
238238

239239
template <typename CacheTrait>
240-
bool CacheItem<CacheTrait>::markMoving() {
241-
return ref_.markMoving();
240+
bool CacheItem<CacheTrait>::markMoving(bool failIfRefNotZero) {
241+
return ref_.markMoving(failIfRefNotZero);
242242
}
243243

244244
template <typename CacheTrait>
@@ -286,21 +286,6 @@ bool CacheItem<CacheTrait>::isNvmEvicted() const noexcept {
286286
return ref_.isNvmEvicted();
287287
}
288288

289-
template <typename CacheTrait>
290-
void CacheItem<CacheTrait>::markIncomplete() noexcept {
291-
ref_.markIncomplete();
292-
}
293-
294-
template <typename CacheTrait>
295-
void CacheItem<CacheTrait>::unmarkIncomplete() noexcept {
296-
ref_.unmarkIncomplete();
297-
}
298-
299-
template <typename CacheTrait>
300-
bool CacheItem<CacheTrait>::isIncomplete() const noexcept {
301-
return ref_.isIncomplete();
302-
}
303-
304289
template <typename CacheTrait>
305290
void CacheItem<CacheTrait>::markIsChainedItem() noexcept {
306291
XDCHECK(!hasChainedItem());

cachelib/allocator/CacheItem.h

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -242,14 +242,6 @@ class CACHELIB_PACKED_ATTR CacheItem {
242242
void unmarkNvmEvicted() noexcept;
243243
bool isNvmEvicted() const noexcept;
244244

245-
/**
246-
* Marks that the item is migrating between memory tiers and
247-
* not ready for access now. Accessing thread should wait.
248-
*/
249-
void markIncomplete() noexcept;
250-
void unmarkIncomplete() noexcept;
251-
bool isIncomplete() const noexcept;
252-
253245
/**
254246
* Function to set the timestamp for when to expire an item
255247
*
@@ -318,9 +310,9 @@ class CACHELIB_PACKED_ATTR CacheItem {
318310
//
319311
// @return true on success, failure if item is marked as exclusive
320312
// @throw exception::RefcountOverflow on ref count overflow
321-
FOLLY_ALWAYS_INLINE bool incRef() {
313+
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(bool failIfMoving) {
322314
try {
323-
return ref_.incRef();
315+
return ref_.incRef(failIfMoving);
324316
} catch (exception::RefcountOverflow& e) {
325317
throw exception::RefcountOverflow(
326318
folly::sformat("{} item: {}", e.what(), toString()));
@@ -387,7 +379,7 @@ class CACHELIB_PACKED_ATTR CacheItem {
387379
* Unmarking moving will also return the refcount at the moment of
388380
* unmarking.
389381
*/
390-
bool markMoving();
382+
bool markMoving(bool failIfRefNotZero);
391383
RefcountWithFlags::Value unmarkMoving() noexcept;
392384
bool isMoving() const noexcept;
393385
bool isOnlyMoving() const noexcept;

cachelib/allocator/Handle.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -488,13 +488,6 @@ struct ReadHandleImpl {
488488
// Handle which has the item already
489489
FOLLY_ALWAYS_INLINE ReadHandleImpl(Item* it, CacheT& alloc) noexcept
490490
: alloc_(&alloc), it_(it) {
491-
if (it_ && it_->isIncomplete()) {
492-
waitContext_ = std::make_shared<ItemWaitContext>(alloc);
493-
if (!alloc_->addWaitContextForMovingItem(it->getKey(), waitContext_)) {
494-
waitContext_->discard();
495-
waitContext_.reset();
496-
}
497-
}
498491
}
499492

500493
// handle that has a wait context allocated. Used for async handles

cachelib/allocator/Refcount.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,6 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
116116
// unevictable in the past.
117117
kUnevictable_NOOP,
118118

119-
// Item is accecible but content is not ready yet. Used by eviction
120-
// when Item is moved between memory tiers.
121-
kIncomplete,
122-
123119
// Unused. This is just to indciate the maximum number of flags
124120
kFlagMax,
125121
};
@@ -135,12 +131,18 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
135131
RefcountWithFlags(RefcountWithFlags&&) = delete;
136132
RefcountWithFlags& operator=(RefcountWithFlags&&) = delete;
137133

134+
enum incResult {
135+
incOk,
136+
incFailedMoving,
137+
incFailedExclusive
138+
};
139+
138140
// Bumps up the reference count only if the new count will be strictly less
139141
// than or equal to the maxCount and the item is not exclusive
140142
// @return true if refcount is bumped. false otherwise (if item is exclusive)
141143
// @throw exception::RefcountOverflow if new count would be greater than
142144
// maxCount
143-
FOLLY_ALWAYS_INLINE bool incRef() {
145+
FOLLY_ALWAYS_INLINE incResult incRef(bool failIfMoving) {
144146
Value* const refPtr = &refCount_;
145147
unsigned int nCASFailures = 0;
146148
constexpr bool isWeak = false;
@@ -155,12 +157,15 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
155157
throw exception::RefcountOverflow("Refcount maxed out.");
156158
}
157159
if (alreadyExclusive && (oldVal & kAccessRefMask) == 0) {
158-
return false;
160+
return incFailedExclusive;
161+
}
162+
else if (alreadyExclusive && failIfMoving) {
163+
return incFailedMoving;
159164
}
160165

161166
if (__atomic_compare_exchange_n(refPtr, &oldVal, newCount, isWeak,
162167
__ATOMIC_ACQ_REL, __ATOMIC_RELAXED)) {
163-
return true;
168+
return incOk;
164169
}
165170

166171
if ((++nCASFailures % 4) == 0) {
@@ -327,8 +332,8 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
327332
*
328333
* Unmarking moving does not depend on `isInMMContainer`
329334
*/
330-
bool markMoving() {
331-
auto predicate = [](const Value curValue) {
335+
bool markMoving(bool failIfRefNotZero) {
336+
auto predicate = [failIfRefNotZero](const Value curValue) {
332337
Value conditionBitMask = getAdminRef<kLinked>();
333338
const bool flagSet = curValue & conditionBitMask;
334339
const bool alreadyExclusive = curValue & getAdminRef<kExclusive>();
@@ -337,6 +342,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
337342
if (!flagSet || alreadyExclusive) {
338343
return false;
339344
}
345+
if (failIfRefNotZero && (curValue & kAccessRefMask) != 0) {
346+
return false;
347+
}
340348
if (UNLIKELY((curValue & kAccessRefMask) == (kAccessRefMask))) {
341349
throw exception::RefcountOverflow("Refcount maxed out.");
342350
}
@@ -424,14 +432,6 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
424432
void unmarkNvmEvicted() noexcept { return unSetFlag<kNvmEvicted>(); }
425433
bool isNvmEvicted() const noexcept { return isFlagSet<kNvmEvicted>(); }
426434

427-
/**
428-
* Marks that the item is migrating between memory tiers and
429-
* not ready for access now. Accessing thread should wait.
430-
*/
431-
void markIncomplete() noexcept { return setFlag<kIncomplete>(); }
432-
void unmarkIncomplete() noexcept { return unSetFlag<kIncomplete>(); }
433-
bool isIncomplete() const noexcept { return isFlagSet<kIncomplete>(); }
434-
435435
// Whether or not an item is completely drained of access
436436
// Refcount is 0 and the item is not linked, accessible, nor exclusive
437437
bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; }

cachelib/allocator/tests/ItemHandleTest.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ struct TestItem {
4141
void reset() {}
4242

4343
folly::StringPiece getKey() const { return folly::StringPiece(); }
44-
45-
bool isIncomplete() const { return false; }
4644
};
4745

4846
struct TestNvmCache;

cachelib/allocator/tests/RefCountTest.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void RefCountTest::testMultiThreaded() {
5151
nLocalRef--;
5252
ref.markAccessible();
5353
} else {
54-
ref.incRef();
54+
ref.incRef(true);
5555
nLocalRef++;
5656
ref.unmarkAccessible();
5757
}
@@ -100,12 +100,12 @@ void RefCountTest::testBasic() {
100100
ASSERT_FALSE(ref.template isFlagSet<RefcountWithFlags::Flags::kMMFlag1>());
101101

102102
for (uint32_t i = 0; i < RefcountWithFlags::kAccessRefMask; i++) {
103-
ASSERT_TRUE(ref.incRef());
103+
ASSERT_TRUE(ref.incRef(true));
104104
}
105105

106106
// Incrementing past the max will fail
107107
auto rawRef = ref.getRaw();
108-
ASSERT_THROW(ref.incRef(), std::overflow_error);
108+
ASSERT_THROW(ref.incRef(true), std::overflow_error);
109109
ASSERT_EQ(rawRef, ref.getRaw());
110110

111111
// Bumping up access ref shouldn't affect admin ref and flags

0 commit comments

Comments
 (0)