Skip to content

HBASE-28211 BucketCache.blocksByHFile may leak on allocationFailure or if we reach io errors tolerated #5530

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ public class BucketCache implements BlockCache, HeapSize {
*/
transient final IdReadWriteLock<Long> offsetLock;

private final NavigableSet<BlockCacheKey> blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> {
final NavigableSet<BlockCacheKey> blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> {
int nameComparison = a.getHfileName().compareTo(b.getHfileName());
if (nameComparison != 0) {
return nameComparison;
Expand Down Expand Up @@ -643,12 +643,14 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre
blocksByHFile.remove(cacheKey);
if (decrementBlockNumber) {
this.blockNumber.decrement();
if (ioEngine.isPersistent()) {
removeFileFromPrefetch(cacheKey.getHfileName());
}
}
if (evictedByEvictionProcess) {
cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
}
if (ioEngine.isPersistent()) {
removeFileFromPrefetch(cacheKey.getHfileName());
setCacheInconsistent(true);
}
}
Expand Down Expand Up @@ -1083,6 +1085,7 @@ public void run() {
*/
protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) {
BucketEntry previousEntry = backingMap.put(key, bucketEntry);
blocksByHFile.add(key);
if (previousEntry != null && previousEntry != bucketEntry) {
previousEntry.withWriteLock(offsetLock, () -> {
blockEvicted(key, previousEntry, false, false);
Expand Down Expand Up @@ -1163,10 +1166,6 @@ void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws Inte
index++;
continue;
}
BlockCacheKey cacheKey = re.getKey();
if (ramCache.containsKey(cacheKey)) {
blocksByHFile.add(cacheKey);
}
// Reset the position for reuse.
// It should be guaranteed that the data in the metaBuff has been transferred to the
// ioEngine safely. Otherwise, this reuse is problematic. Fortunately, the data is already
Expand Down Expand Up @@ -1517,6 +1516,7 @@ private void disableCache() {
if (!ioEngine.isPersistent() || persistencePath == null) {
// If persistent ioengine and a path, we will serialize out the backingMap.
this.backingMap.clear();
this.blocksByHFile.clear();
this.fullyCachedFiles.clear();
this.regionCachedSizeMap.clear();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@ public void testTooBigEntry() throws InterruptedException {
Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE);
this.bc.cacheBlock(this.plainKey, tooBigCacheable);
doDrainOfOneEntry(this.bc, this.wt, this.q);
assertTrue(bc.blocksByHFile.isEmpty());
assertTrue(bc.getBackingMap().isEmpty());
}

/**
Expand All @@ -138,6 +140,8 @@ public void testIOE() throws IOException, InterruptedException {
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any());
this.q.add(spiedRqe);
doDrainOfOneEntry(bc, wt, q);
assertTrue(bc.blocksByHFile.isEmpty());
assertTrue(bc.getBackingMap().isEmpty());
// Cache disabled when ioes w/o ever healing.
assertTrue(!bc.isCacheEnabled());
}
Expand Down