Skip to content

Commit 9f4177f

Browse files
authored
HBASE-25698 Fixing IllegalReferenceCountException when using TinyLfuBlockCache (#3215)
Signed-off-by: Andrew Purtell <apurtell@apache.org> Signed-off-by: Anoop Sam John <anoopsamjohn@apache.org> Signed-off-by: Michael Stack <stack@apache.org>
1 parent d292375 commit 9f4177f

File tree

2 files changed

+112
-8
lines changed

2 files changed

+112
-8
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java

Lines changed: 40 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,13 @@ public boolean containsBlock(BlockCacheKey cacheKey) {
158158
@Override
159159
public Cacheable getBlock(BlockCacheKey cacheKey,
160160
boolean caching, boolean repeat, boolean updateCacheMetrics) {
161-
Cacheable value = cache.getIfPresent(cacheKey);
161+
Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> {
162+
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
163+
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
164+
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
165+
cacheable.retain();
166+
return cacheable;
167+
});
162168
if (value == null) {
163169
if (repeat) {
164170
return null;
@@ -169,9 +175,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey,
169175
if (victimCache != null) {
170176
value = victimCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
171177
if ((value != null) && caching) {
172-
if ((value instanceof HFileBlock) && ((HFileBlock) value).isSharedMem()) {
173-
value = HFileBlock.deepCloneOnHeap((HFileBlock) value);
174-
}
175178
cacheBlock(cacheKey, value);
176179
}
177180
}
@@ -192,17 +195,48 @@ public void cacheBlock(BlockCacheKey key, Cacheable value) {
192195
// If there are a lot of blocks that are too big this can make the logs too noisy (2% logged)
193196
if (stats.failInsert() % 50 == 0) {
194197
LOG.warn(String.format(
195-
"Trying to cache too large a block %s @ %,d is %,d which is larger than %,d",
196-
key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE));
198+
"Trying to cache too large a block %s @ %,d is %,d which is larger than %,d",
199+
key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE));
197200
}
198201
} else {
202+
value = asReferencedHeapBlock(value);
199203
cache.put(key, value);
200204
}
201205
}
202206

207+
/**
208+
* The block cached in TinyLfuBlockCache will always be an heap block: on the one side, the heap
209+
* access will be more faster then off-heap, the small index block or meta block cached in
210+
* CombinedBlockCache will benefit a lot. on other side, the TinyLfuBlockCache size is always
211+
* calculated based on the total heap size, if caching an off-heap block in TinyLfuBlockCache, the
212+
* heap size will be messed up. Here we will clone the block into an heap block if it's an
213+
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
214+
* the block (HBASE-22127): <br>
215+
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
216+
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
217+
* reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be
218+
* garbage collected by JVM, so need a retain here.
219+
*
220+
* @param buf the original block
221+
* @return an block with an heap memory backend.
222+
*/
223+
private Cacheable asReferencedHeapBlock(Cacheable buf) {
224+
if (buf instanceof HFileBlock) {
225+
HFileBlock blk = ((HFileBlock) buf);
226+
if (blk.isSharedMem()) {
227+
return HFileBlock.deepCloneOnHeap(blk);
228+
}
229+
}
230+
// The block will be referenced by this TinyLfuBlockCache, so should increase its refCnt here.
231+
return buf.retain();
232+
}
233+
203234
@Override
204235
public boolean evictBlock(BlockCacheKey cacheKey) {
205236
Cacheable value = cache.asMap().remove(cacheKey);
237+
if (value != null) {
238+
value.release();
239+
}
206240
return (value != null);
207241
}
208242

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java

Lines changed: 72 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY;
2323
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY;
2424
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY;
25+
import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BLOCKCACHE_POLICY_KEY;
2526
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
2627
import static org.junit.Assert.assertEquals;
2728
import static org.junit.Assert.assertFalse;
@@ -205,10 +206,11 @@ public void testReaderWithLRUBlockCache() throws Exception {
205206
lru.shutdown();
206207
}
207208

208-
private BlockCache initCombinedBlockCache() {
209+
private BlockCache initCombinedBlockCache(final String l1CachePolicy) {
209210
Configuration that = HBaseConfiguration.create(conf);
210211
that.setFloat(BUCKET_CACHE_SIZE_KEY, 32); // 32MB for bucket cache.
211212
that.set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
213+
that.set(BLOCKCACHE_POLICY_KEY, l1CachePolicy);
212214
BlockCache bc = BlockCacheFactory.createBlockCache(that);
213215
Assert.assertNotNull(bc);
214216
Assert.assertTrue(bc instanceof CombinedBlockCache);
@@ -225,7 +227,7 @@ public void testReaderWithCombinedBlockCache() throws Exception {
225227
fillByteBuffAllocator(alloc, bufCount);
226228
Path storeFilePath = writeStoreFile();
227229
// Open the file reader with CombinedBlockCache
228-
BlockCache combined = initCombinedBlockCache();
230+
BlockCache combined = initCombinedBlockCache("LRU");
229231
conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
230232
CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
231233
HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf);
@@ -890,4 +892,72 @@ public void testDBEShipped() throws IOException {
890892
writer.close();
891893
}
892894
}
895+
896+
/**
897+
* Test case for CombinedBlockCache with TinyLfu as L1 cache
898+
*/
899+
@Test
900+
public void testReaderWithTinyLfuCombinedBlockCache() throws Exception {
901+
testReaderCombinedCache("TinyLfu");
902+
}
903+
904+
/**
905+
* Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
906+
*/
907+
@Test
908+
public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception {
909+
testReaderCombinedCache("AdaptiveLRU");
910+
}
911+
912+
/**
913+
* Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
914+
*/
915+
@Test
916+
public void testReaderWithLruCombinedBlockCache() throws Exception {
917+
testReaderCombinedCache("LRU");
918+
}
919+
920+
private void testReaderCombinedCache(final String l1CachePolicy) throws Exception {
921+
int bufCount = 1024;
922+
int blockSize = 64 * 1024;
923+
ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0);
924+
fillByteBuffAllocator(alloc, bufCount);
925+
Path storeFilePath = writeStoreFile();
926+
// Open the file reader with CombinedBlockCache
927+
BlockCache combined = initCombinedBlockCache(l1CachePolicy);
928+
conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
929+
CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
930+
HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf);
931+
long offset = 0;
932+
Cacheable cachedBlock = null;
933+
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
934+
BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset);
935+
HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null);
936+
offset += block.getOnDiskSizeWithHeader();
937+
// Read the cached block.
938+
cachedBlock = combined.getBlock(key, false, false, true);
939+
try {
940+
Assert.assertNotNull(cachedBlock);
941+
Assert.assertTrue(cachedBlock instanceof HFileBlock);
942+
HFileBlock hfb = (HFileBlock) cachedBlock;
943+
// Data block will be cached in BucketCache, so it should be an off-heap block.
944+
if (hfb.getBlockType().isData()) {
945+
Assert.assertTrue(hfb.isSharedMem());
946+
} else if (!l1CachePolicy.equals("TinyLfu")) {
947+
Assert.assertFalse(hfb.isSharedMem());
948+
}
949+
} finally {
950+
cachedBlock.release();
951+
}
952+
block.release(); // return back the ByteBuffer back to allocator.
953+
}
954+
reader.close();
955+
combined.shutdown();
956+
if (cachedBlock != null) {
957+
Assert.assertEquals(0, cachedBlock.refCnt());
958+
}
959+
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
960+
alloc.clean();
961+
}
962+
893963
}

0 commit comments

Comments
 (0)