Skip to content

Commit 9414e3b

Browse files
comnetworkApache9
authored andcommitted
HBASE-26281 DBB got from BucketCache would be freed unexpectedly before RPC completed (#3680)
Signed-off-by: Duo Zhang <zhangduo@apache.org>
1 parent 74eff79 commit 9414e3b

File tree

8 files changed

+758
-198
lines changed

8 files changed

+758
-198
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java

Lines changed: 218 additions & 154 deletions
Large diffs are not rendered by default.

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketEntry.java

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@
2525
import java.util.Comparator;
2626
import java.util.concurrent.atomic.AtomicBoolean;
2727
import java.util.concurrent.locks.ReentrantReadWriteLock;
28+
import java.util.function.Function;
2829

2930
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
31+
import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
3032
import org.apache.hadoop.hbase.io.hfile.BlockPriority;
3133
import org.apache.hadoop.hbase.io.hfile.Cacheable;
3234
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
@@ -88,16 +90,21 @@ class BucketEntry implements HBaseReferenceCounted {
8890
private final long cachedTime = System.nanoTime();
8991

9092
BucketEntry(long offset, int length, long accessCounter, boolean inMemory) {
91-
this(offset, length, accessCounter, inMemory, RefCnt.create(), ByteBuffAllocator.HEAP);
93+
this(offset, length, accessCounter, inMemory, null, ByteBuffAllocator.HEAP);
9294
}
9395

94-
BucketEntry(long offset, int length, long accessCounter, boolean inMemory, RefCnt refCnt,
96+
BucketEntry(long offset, int length, long accessCounter, boolean inMemory,
97+
Function<BucketEntry, Recycler> createRecycler,
9598
ByteBuffAllocator allocator) {
9699
setOffset(offset);
97100
this.length = length;
98101
this.accessCounter = accessCounter;
99102
this.priority = inMemory ? BlockPriority.MEMORY : BlockPriority.MULTI;
100-
this.refCnt = refCnt;
103+
if (createRecycler == null) {
104+
this.refCnt = RefCnt.create();
105+
} else {
106+
this.refCnt = RefCnt.create(createRecycler.apply(this));
107+
}
101108
this.markedAsEvicted = new AtomicBoolean(false);
102109
this.allocator = allocator;
103110
}
@@ -165,19 +172,6 @@ boolean markAsEvicted() {
165172
return false;
166173
}
167174

168-
/**
169-
* Mark as evicted only when NO RPC references. Mainly used for eviction when cache size exceed
170-
* the max acceptable size.
171-
* @return true if we deallocate this entry successfully.
172-
*/
173-
boolean markStaleAsEvicted() {
174-
if (!markedAsEvicted.get() && this.refCnt() == 1) {
175-
// The only reference was coming from backingMap, now release the stale entry.
176-
return this.markAsEvicted();
177-
}
178-
return false;
179-
}
180-
181175
/**
182176
* Check whether have some RPC patch referring this block. There're two case: <br>
183177
* 1. If current refCnt is greater than 1, there must be at least one referring RPC path; <br>

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
package org.apache.hadoop.hbase.client;
1919

2020
import static org.junit.Assert.assertEquals;
21+
import static org.junit.Assert.assertTrue;
2122

2223
import java.io.IOException;
2324
import java.util.ArrayList;
@@ -43,6 +44,8 @@
4344
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
4445
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
4546
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
47+
import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
48+
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
4649
import org.apache.hadoop.hbase.regionserver.DelegatingInternalScanner;
4750
import org.apache.hadoop.hbase.regionserver.HRegion;
4851
import org.apache.hadoop.hbase.regionserver.HStore;
@@ -372,7 +375,11 @@ public void run() {
372375
CachedBlock next = iterator.next();
373376
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
374377
cacheList.add(cacheKey);
375-
cache.evictBlock(cacheKey);
378+
/**
379+
* There is only one Block referenced by rpc,here we evict blocks which have no rpc
380+
* referenced.
381+
*/
382+
evictBlock(cache, cacheKey);
376383
}
377384
try {
378385
Thread.sleep(1);
@@ -437,4 +444,20 @@ public void run() {
437444
table.close();
438445
}
439446
}
447+
448+
/**
449+
* For {@link BucketCache},we only evict Block if there is no rpc referenced.
450+
*/
451+
private void evictBlock(BlockCache blockCache, BlockCacheKey blockCacheKey) {
452+
assertTrue(blockCache instanceof CombinedBlockCache);
453+
BlockCache[] blockCaches = blockCache.getBlockCaches();
454+
for (BlockCache currentBlockCache : blockCaches) {
455+
if (currentBlockCache instanceof BucketCache) {
456+
((BucketCache) currentBlockCache).evictBlockIfNoRpcReferenced(blockCacheKey);
457+
} else {
458+
currentBlockCache.evictBlock(blockCacheKey);
459+
}
460+
}
461+
462+
}
440463
}

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,26 @@ public void run() {
249249
assertEquals(1, cache.getBlockCount());
250250
lock.writeLock().unlock();
251251
evictThread.join();
252-
assertEquals(0, cache.getBlockCount());
253-
assertEquals(cache.getCurrentSize(), 0L);
252+
/**
253+
* <pre>
254+
* The asserts here before HBASE-21957 are:
255+
* assertEquals(1L, cache.getBlockCount());
256+
* assertTrue(cache.getCurrentSize() > 0L);
257+
* assertTrue("We should have a block!", cache.iterator().hasNext());
258+
*
259+
* The asserts here after HBASE-21957 are:
260+
* assertEquals(0, cache.getBlockCount());
261+
* assertEquals(cache.getCurrentSize(), 0L);
262+
*
263+
* I think the asserts before HBASE-21957 is more reasonable,because
264+
* {@link BucketCache#evictBlock} should only evict the {@link BucketEntry}
265+
* it had seen, and newly added Block after the {@link BucketEntry}
266+
* it had seen should not be evicted.
267+
* </pre>
268+
*/
269+
assertEquals(1L, cache.getBlockCount());
270+
assertTrue(cache.getCurrentSize() > 0L);
271+
assertTrue("We should have a block!", cache.iterator().hasNext());
254272
}
255273

256274
@Test
@@ -503,8 +521,8 @@ public void testRAMCache() {
503521
HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP);
504522
HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
505523
HFileBlock.FILL_HEADER, -1, -1, -1, meta, ByteBuffAllocator.HEAP);
506-
RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, ByteBuffAllocator.NONE);
507-
RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, ByteBuffAllocator.NONE);
524+
RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false);
525+
RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false);
508526

509527
assertFalse(cache.containsKey(key1));
510528
assertNull(cache.putIfAbsent(key1, re1));
@@ -551,11 +569,11 @@ public void testFreeBlockWhenIOEngineWriteFailure() throws IOException {
551569
BucketAllocator allocator = new BucketAllocator(availableSpace, null);
552570

553571
BlockCacheKey key = new BlockCacheKey("dummy", 1L);
554-
RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, ByteBuffAllocator.NONE);
572+
RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true);
555573

556574
Assert.assertEquals(0, allocator.getUsedSize());
557575
try {
558-
re.writeToCache(ioEngine, allocator, null);
576+
re.writeToCache(ioEngine, allocator, null, null);
559577
Assert.fail();
560578
} catch (Exception e) {
561579
}

0 commit comments

Comments
 (0)