@@ -285,7 +285,7 @@ public HFileBlock deserialize(ByteBuff buf, ByteBuffAllocator alloc)
285
285
boolean usesChecksum = buf .get () == (byte ) 1 ;
286
286
long offset = buf .getLong ();
287
287
int nextBlockOnDiskSize = buf .getInt ();
288
- return new HFileBlock (newByteBuff , usesChecksum , offset , nextBlockOnDiskSize , null , alloc );
288
+ return createFromBuff (newByteBuff , usesChecksum , offset , nextBlockOnDiskSize , null , alloc );
289
289
}
290
290
291
291
@ Override
@@ -300,28 +300,6 @@ public int getDeserializerIdentifier() {
300
300
CacheableDeserializerIdManager .registerDeserializer (BLOCK_DESERIALIZER );
301
301
}
302
302
303
- /**
304
- * Copy constructor. Creates a shallow copy of {@code that}'s buffer.
305
- */
306
- private HFileBlock (HFileBlock that ) {
307
- this (that , false );
308
- }
309
-
310
- /**
311
- * Copy constructor. Creates a shallow/deep copy of {@code that}'s buffer as per the boolean
312
- * param.
313
- */
314
- private HFileBlock (HFileBlock that , boolean bufCopy ) {
315
- init (that .blockType , that .onDiskSizeWithoutHeader , that .uncompressedSizeWithoutHeader ,
316
- that .prevBlockOffset , that .offset , that .onDiskDataSizeWithHeader , that .nextBlockOnDiskSize ,
317
- that .fileContext , that .allocator );
318
- if (bufCopy ) {
319
- this .buf = ByteBuff .wrap (ByteBuffer .wrap (that .buf .toBytes (0 , that .buf .limit ())));
320
- } else {
321
- this .buf = that .buf .duplicate ();
322
- }
323
- }
324
-
325
303
/**
326
304
* Creates a new {@link HFile} block from the given fields. This constructor
327
305
* is used only while writing blocks and caching,
@@ -337,20 +315,27 @@ private HFileBlock(HFileBlock that, boolean bufCopy) {
337
315
* @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader}
338
316
* @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader}
339
317
* @param prevBlockOffset see {@link #prevBlockOffset}
340
- * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes)
318
+ * @param buf block buffer with header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes)
341
319
* @param fillHeader when true, write the first 4 header fields into passed buffer.
342
320
* @param offset the file offset the block was read from
343
321
* @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader}
344
322
* @param fileContext HFile meta data
345
323
*/
346
324
@ VisibleForTesting
347
325
public HFileBlock (BlockType blockType , int onDiskSizeWithoutHeader ,
348
- int uncompressedSizeWithoutHeader , long prevBlockOffset , ByteBuffer b , boolean fillHeader ,
349
- long offset , final int nextBlockOnDiskSize , int onDiskDataSizeWithHeader ,
350
- HFileContext fileContext , ByteBuffAllocator allocator ) {
351
- init (blockType , onDiskSizeWithoutHeader , uncompressedSizeWithoutHeader , prevBlockOffset , offset ,
352
- onDiskDataSizeWithHeader , nextBlockOnDiskSize , fileContext , allocator );
353
- this .buf = new SingleByteBuff (b );
326
+ int uncompressedSizeWithoutHeader , long prevBlockOffset , ByteBuff buf , boolean fillHeader ,
327
+ long offset , int nextBlockOnDiskSize , int onDiskDataSizeWithHeader , HFileContext fileContext ,
328
+ ByteBuffAllocator allocator ) {
329
+ this .blockType = blockType ;
330
+ this .onDiskSizeWithoutHeader = onDiskSizeWithoutHeader ;
331
+ this .uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader ;
332
+ this .prevBlockOffset = prevBlockOffset ;
333
+ this .offset = offset ;
334
+ this .onDiskDataSizeWithHeader = onDiskDataSizeWithHeader ;
335
+ this .nextBlockOnDiskSize = nextBlockOnDiskSize ;
336
+ this .fileContext = fileContext ;
337
+ this .allocator = allocator ;
338
+ this .buf = buf ;
354
339
if (fillHeader ) {
355
340
overwriteHeader ();
356
341
}
@@ -364,7 +349,7 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
364
349
* to that point.
365
350
* @param buf Has header, content, and trailing checksums if present.
366
351
*/
367
- HFileBlock (ByteBuff buf , boolean usesHBaseChecksum , final long offset ,
352
+ static HFileBlock createFromBuff (ByteBuff buf , boolean usesHBaseChecksum , final long offset ,
368
353
final int nextBlockOnDiskSize , HFileContext fileContext , ByteBuffAllocator allocator )
369
354
throws IOException {
370
355
buf .rewind ();
@@ -375,15 +360,15 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
375
360
final long prevBlockOffset = buf .getLong (Header .PREV_BLOCK_OFFSET_INDEX );
376
361
// This constructor is called when we deserialize a block from cache and when we read a block in
377
362
// from the fs. fileCache is null when deserialized from cache so need to make up one.
378
- HFileContextBuilder fileContextBuilder = fileContext != null ?
379
- new HFileContextBuilder (fileContext ): new HFileContextBuilder ();
363
+ HFileContextBuilder fileContextBuilder =
364
+ fileContext != null ? new HFileContextBuilder (fileContext ) : new HFileContextBuilder ();
380
365
fileContextBuilder .withHBaseCheckSum (usesHBaseChecksum );
381
366
int onDiskDataSizeWithHeader ;
382
367
if (usesHBaseChecksum ) {
383
368
byte checksumType = buf .get (Header .CHECKSUM_TYPE_INDEX );
384
369
int bytesPerChecksum = buf .getInt (Header .BYTES_PER_CHECKSUM_INDEX );
385
370
onDiskDataSizeWithHeader = buf .getInt (Header .ON_DISK_DATA_SIZE_WITH_HEADER_INDEX );
386
- // Use the checksum type and bytes per checksum from header, not from filecontext .
371
+ // Use the checksum type and bytes per checksum from header, not from fileContext .
387
372
fileContextBuilder .withChecksumType (ChecksumType .codeToType (checksumType ));
388
373
fileContextBuilder .withBytesPerCheckSum (bytesPerChecksum );
389
374
} else {
@@ -394,29 +379,19 @@ public HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader,
394
379
}
395
380
fileContext = fileContextBuilder .build ();
396
381
assert usesHBaseChecksum == fileContext .isUseHBaseChecksum ();
397
- init (blockType , onDiskSizeWithoutHeader , uncompressedSizeWithoutHeader , prevBlockOffset , offset ,
398
- onDiskDataSizeWithHeader , nextBlockOnDiskSize , fileContext , allocator );
399
- this .offset = offset ;
400
- this .buf = buf ;
401
- this .buf .rewind ();
402
- }
403
-
404
- /**
405
- * Called from constructors.
406
- */
407
- private void init (BlockType blockType , int onDiskSizeWithoutHeader ,
408
- int uncompressedSizeWithoutHeader , long prevBlockOffset , long offset ,
409
- int onDiskDataSizeWithHeader , final int nextBlockOnDiskSize , HFileContext fileContext ,
410
- ByteBuffAllocator allocator ) {
411
- this .blockType = blockType ;
412
- this .onDiskSizeWithoutHeader = onDiskSizeWithoutHeader ;
413
- this .uncompressedSizeWithoutHeader = uncompressedSizeWithoutHeader ;
414
- this .prevBlockOffset = prevBlockOffset ;
415
- this .offset = offset ;
416
- this .onDiskDataSizeWithHeader = onDiskDataSizeWithHeader ;
417
- this .nextBlockOnDiskSize = nextBlockOnDiskSize ;
418
- this .fileContext = fileContext ;
419
- this .allocator = allocator ;
382
+ return new HFileBlockBuilder ()
383
+ .withBlockType (blockType )
384
+ .withOnDiskSizeWithoutHeader (onDiskSizeWithoutHeader )
385
+ .withUncompressedSizeWithoutHeader (uncompressedSizeWithoutHeader )
386
+ .withPrevBlockOffset (prevBlockOffset )
387
+ .withOffset (offset )
388
+ .withOnDiskDataSizeWithHeader (onDiskDataSizeWithHeader )
389
+ .withNextBlockOnDiskSize (nextBlockOnDiskSize )
390
+ .withHFileContext (fileContext )
391
+ .withByteBuffAllocator (allocator )
392
+ .withByteBuff (buf .rewind ())
393
+ .withShared (!buf .hasArray ())
394
+ .build ();
420
395
}
421
396
422
397
/**
@@ -640,7 +615,7 @@ public String toString() {
640
615
.append ("(" ).append (onDiskSizeWithoutHeader )
641
616
.append ("+" ).append (HConstants .HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM ).append (")" );
642
617
}
643
- String dataBegin = null ;
618
+ String dataBegin ;
644
619
if (buf .hasArray ()) {
645
620
dataBegin = Bytes .toStringBinary (buf .array (), buf .arrayOffset () + headerSize (),
646
621
Math .min (32 , buf .limit () - buf .arrayOffset () - headerSize ()));
@@ -674,7 +649,7 @@ HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException
674
649
return this ;
675
650
}
676
651
677
- HFileBlock unpacked = new HFileBlock (this );
652
+ HFileBlock unpacked = shallowClone (this );
678
653
unpacked .allocateBuffer (); // allocates space for the decompressed block
679
654
boolean succ = false ;
680
655
try {
@@ -762,10 +737,16 @@ public long heapSize() {
762
737
}
763
738
764
739
/**
765
- * @return true to indicate the block is allocated from JVM heap, otherwise from off-heap.
740
+ * Will be override by {@link SharedMemHFileBlock} or {@link ExclusiveMemHFileBlock}. Return true
741
+ * by default.
766
742
*/
767
- boolean isOnHeap () {
768
- return buf .hasArray ();
743
+ public boolean isSharedMem () {
744
+ if (this instanceof SharedMemHFileBlock ) {
745
+ return true ;
746
+ } else if (this instanceof ExclusiveMemHFileBlock ) {
747
+ return false ;
748
+ }
749
+ return true ;
769
750
}
770
751
771
752
/**
@@ -1040,8 +1021,7 @@ void writeHeaderAndData(FSDataOutputStream out) throws IOException {
1040
1021
+ offset );
1041
1022
}
1042
1023
startOffset = offset ;
1043
-
1044
- finishBlockAndWriteHeaderAndData ((DataOutputStream ) out );
1024
+ finishBlockAndWriteHeaderAndData (out );
1045
1025
}
1046
1026
1047
1027
/**
@@ -1252,13 +1232,27 @@ HFileBlock getBlockForCaching(CacheConfig cacheConf) {
1252
1232
.withIncludesMvcc (fileContext .isIncludesMvcc ())
1253
1233
.withIncludesTags (fileContext .isIncludesTags ())
1254
1234
.build ();
1255
- return new HFileBlock (blockType , getOnDiskSizeWithoutHeader (),
1256
- getUncompressedSizeWithoutHeader (), prevOffset ,
1257
- cacheConf .shouldCacheCompressed (blockType .getCategory ()) ? cloneOnDiskBufferWithHeader ()
1258
- : cloneUncompressedBufferWithHeader (),
1259
- FILL_HEADER , startOffset , UNSET ,
1260
- onDiskBlockBytesWithHeader .size () + onDiskChecksum .length , newContext ,
1261
- cacheConf .getByteBuffAllocator ());
1235
+ // Build the HFileBlock.
1236
+ HFileBlockBuilder builder = new HFileBlockBuilder ();
1237
+ ByteBuffer buffer ;
1238
+ if (cacheConf .shouldCacheCompressed (blockType .getCategory ())) {
1239
+ buffer = cloneOnDiskBufferWithHeader ();
1240
+ } else {
1241
+ buffer = cloneUncompressedBufferWithHeader ();
1242
+ }
1243
+ return builder .withBlockType (blockType )
1244
+ .withOnDiskSizeWithoutHeader (getOnDiskSizeWithoutHeader ())
1245
+ .withUncompressedSizeWithoutHeader (getUncompressedSizeWithoutHeader ())
1246
+ .withPrevBlockOffset (prevOffset )
1247
+ .withByteBuff (ByteBuff .wrap (buffer ))
1248
+ .withFillHeader (FILL_HEADER )
1249
+ .withOffset (startOffset )
1250
+ .withNextBlockOnDiskSize (UNSET )
1251
+ .withOnDiskDataSizeWithHeader (onDiskBlockBytesWithHeader .size () + onDiskChecksum .length )
1252
+ .withHFileContext (newContext )
1253
+ .withByteBuffAllocator (cacheConf .getByteBuffAllocator ())
1254
+ .withShared (!buffer .hasArray ())
1255
+ .build ();
1262
1256
}
1263
1257
}
1264
1258
@@ -1782,8 +1776,8 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
1782
1776
// The onDiskBlock will become the headerAndDataBuffer for this block.
1783
1777
// If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
1784
1778
// contains the header of next block, so no need to set next block's header in it.
1785
- HFileBlock hFileBlock = new HFileBlock (curBlock , checksumSupport , offset ,
1786
- nextBlockOnDiskSize , fileContext , intoHeap ? HEAP : allocator );
1779
+ HFileBlock hFileBlock = createFromBuff (curBlock , checksumSupport , offset ,
1780
+ nextBlockOnDiskSize , fileContext , intoHeap ? HEAP : allocator );
1787
1781
// Run check on uncompressed sizings.
1788
1782
if (!fileContext .isCompressedOrEncrypted ()) {
1789
1783
hFileBlock .sanityCheckUncompressed ();
@@ -1948,7 +1942,7 @@ public boolean equals(Object comparison) {
1948
1942
if (comparison == null ) {
1949
1943
return false ;
1950
1944
}
1951
- if (comparison . getClass () != this . getClass ( )) {
1945
+ if (!( comparison instanceof HFileBlock )) {
1952
1946
return false ;
1953
1947
}
1954
1948
@@ -2085,7 +2079,27 @@ static String toStringHeader(ByteBuff buf) throws IOException {
2085
2079
" onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader ;
2086
2080
}
2087
2081
2088
- public HFileBlock deepCloneOnHeap () {
2089
- return new HFileBlock (this , true );
2082
+ private static HFileBlockBuilder createBuilder (HFileBlock blk ){
2083
+ return new HFileBlockBuilder ()
2084
+ .withBlockType (blk .blockType )
2085
+ .withOnDiskSizeWithoutHeader (blk .onDiskSizeWithoutHeader )
2086
+ .withUncompressedSizeWithoutHeader (blk .uncompressedSizeWithoutHeader )
2087
+ .withPrevBlockOffset (blk .prevBlockOffset )
2088
+ .withByteBuff (blk .buf .duplicate ()) // Duplicate the buffer.
2089
+ .withOffset (blk .offset )
2090
+ .withOnDiskDataSizeWithHeader (blk .onDiskDataSizeWithHeader )
2091
+ .withNextBlockOnDiskSize (blk .nextBlockOnDiskSize )
2092
+ .withHFileContext (blk .fileContext )
2093
+ .withByteBuffAllocator (blk .allocator )
2094
+ .withShared (blk .isSharedMem ());
2095
+ }
2096
+
2097
+ static HFileBlock shallowClone (HFileBlock blk ) {
2098
+ return createBuilder (blk ).build ();
2099
+ }
2100
+
2101
+ static HFileBlock deepCloneOnHeap (HFileBlock blk ) {
2102
+ ByteBuff deepCloned = ByteBuff .wrap (ByteBuffer .wrap (blk .buf .toBytes (0 , blk .buf .limit ())));
2103
+ return createBuilder (blk ).withByteBuff (deepCloned ).withShared (false ).build ();
2090
2104
}
2091
2105
}
0 commit comments