Skip to content

Commit b22201f

Browse files
committed
HBASE-22463 Some paths in HFileScannerImpl did not consider block#release which will exhaust the ByteBuffAllocator
1 parent b673000 commit b22201f

19 files changed

+398
-168
lines changed

hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
import org.apache.hadoop.conf.Configuration;
3838
import org.apache.hadoop.hbase.HConstants;
3939
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
40-
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
4140
import org.apache.hadoop.hbase.nio.ByteBuff;
4241
import org.apache.hadoop.hbase.nio.SingleByteBuff;
4342
import org.apache.hadoop.hbase.trace.TraceUtil;
@@ -272,8 +271,7 @@ public CachedData encode(HFileBlock block) {
272271
public HFileBlock decode(CachedData d) {
273272
try {
274273
ByteBuff buf = new SingleByteBuff(ByteBuffer.wrap(d.getData()));
275-
return (HFileBlock) HFileBlock.BLOCK_DESERIALIZER.deserialize(buf, ByteBuffAllocator.HEAP,
276-
MemoryType.EXCLUSIVE);
274+
return (HFileBlock) HFileBlock.BLOCK_DESERIALIZER.deserialize(buf, ByteBuffAllocator.HEAP);
277275
} catch (IOException e) {
278276
LOG.warn("Failed to deserialize data from memcached", e);
279277
}

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Cacheable.java

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -63,20 +63,6 @@ public interface Cacheable extends HeapSize, HBaseReferenceCounted {
6363
*/
6464
BlockType getBlockType();
6565

66-
/**
67-
* @return the {@code MemoryType} of this Cacheable
68-
*/
69-
MemoryType getMemoryType();
70-
71-
/**
72-
* SHARED means when this Cacheable is read back from cache it refers to the same memory area as
73-
* used by the cache for caching it. EXCLUSIVE means when this Cacheable is read back from cache,
74-
* the data was copied to an exclusive memory area of this Cacheable.
75-
*/
76-
enum MemoryType {
77-
SHARED, EXCLUSIVE
78-
}
79-
8066
/******************************* ReferenceCounted Interfaces ***********************************/
8167

8268
/**

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121

2222
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
2323
import org.apache.yetus.audience.InterfaceAudience;
24-
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
2524
import org.apache.hadoop.hbase.nio.ByteBuff;
2625

2726
/**
@@ -33,11 +32,10 @@ public interface CacheableDeserializer<T extends Cacheable> {
3332
/**
3433
* @param b ByteBuff to deserialize the Cacheable.
3534
* @param allocator to manage NIO ByteBuffers for future allocation or de-allocation.
36-
* @param memType the {@link MemoryType} of the buffer
3735
* @return T the deserialized object.
3836
* @throws IOException
3937
*/
40-
T deserialize(ByteBuff b, ByteBuffAllocator allocator, MemoryType memType) throws IOException;
38+
T deserialize(ByteBuff b, ByteBuffAllocator allocator) throws IOException;
4139

4240
/**
4341
* Get the identifier of this deserializer. Identifier is unique for each deserializer and

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -201,8 +201,6 @@ static class Header {
201201
*/
202202
private long offset = UNSET;
203203

204-
private MemoryType memType = MemoryType.EXCLUSIVE;
205-
206204
/**
207205
* The on-disk size of the next block, including the header and checksums if present.
208206
* UNSET if unknown.
@@ -274,7 +272,7 @@ private BlockDeserializer() {
274272
}
275273

276274
@Override
277-
public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc, MemoryType memType)
275+
public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc)
278276
throws IOException {
279277
// The buf has the file block followed by block metadata.
280278
// Set limit to just before the BLOCK_METADATA_SPACE then rewind.
@@ -287,8 +285,7 @@ public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc, MemoryType
287285
boolean usesChecksum = buf.get() == (byte) 1;
288286
long offset = buf.getLong();
289287
int nextBlockOnDiskSize = buf.getInt();
290-
return new HFileBlock(newByteBuff, usesChecksum, memType, offset, nextBlockOnDiskSize, null,
291-
alloc);
288+
return new HFileBlock(newByteBuff, usesChecksum, offset, nextBlockOnDiskSize, null, alloc);
292289
}
293290

294291
@Override
@@ -366,7 +363,7 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
366363
* to that point.
367364
* @param buf Has header, content, and trailing checksums if present.
368365
*/
369-
HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset,
366+
HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, final long offset,
370367
final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator)
371368
throws IOException {
372369
buf.rewind();
@@ -398,7 +395,6 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
398395
assert usesHBaseChecksum == fileContext.isUseHBaseChecksum();
399396
init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, prevBlockOffset, offset,
400397
onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext, allocator);
401-
this.memType = memType;
402398
this.offset = offset;
403399
this.buf = buf;
404400
this.buf.rewind();
@@ -1785,8 +1781,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17851781
// The onDiskBlock will become the headerAndDataBuffer for this block.
17861782
// If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
17871783
// contains the header of next block, so no need to set next block's header in it.
1788-
HFileBlock hFileBlock = new HFileBlock(curBlock, checksumSupport, MemoryType.EXCLUSIVE,
1789-
offset, nextBlockOnDiskSize, fileContext, intoHeap ? HEAP: allocator);
1784+
HFileBlock hFileBlock = new HFileBlock(curBlock, checksumSupport, offset,
1785+
nextBlockOnDiskSize, fileContext, intoHeap ? HEAP : allocator);
17901786
// Run check on uncompressed sizings.
17911787
if (!fileContext.isCompressedOrEncrypted()) {
17921788
hFileBlock.sanityCheckUncompressed();
@@ -2060,18 +2056,6 @@ HFileContext getHFileContext() {
20602056
return this.fileContext;
20612057
}
20622058

2063-
@Override
2064-
public MemoryType getMemoryType() {
2065-
return this.memType;
2066-
}
2067-
2068-
/**
2069-
* @return true if this block is backed by a shared memory area(such as that of a BucketCache).
2070-
*/
2071-
boolean usesSharedMemory() {
2072-
return this.memType == MemoryType.SHARED;
2073-
}
2074-
20752059
/**
20762060
* Convert the contents of the block header into a human readable string.
20772061
* This is mostly helpful for debugging. This assumes that the block

0 commit comments

Comments
 (0)