5151import org .apache .hadoop .hbase .client .RegionInfoBuilder ;
5252import org .apache .hadoop .hbase .fs .HFileSystem ;
5353import org .apache .hadoop .hbase .io .ByteBuffAllocator ;
54+ import org .apache .hadoop .hbase .io .HFileLink ;
5455import org .apache .hadoop .hbase .io .hfile .BlockCache ;
5556import org .apache .hadoop .hbase .io .hfile .BlockCacheFactory ;
5657import org .apache .hadoop .hbase .io .hfile .BlockCacheKey ;
7172import org .apache .hadoop .hbase .regionserver .HRegionFileSystem ;
7273import org .apache .hadoop .hbase .regionserver .HStoreFile ;
7374import org .apache .hadoop .hbase .regionserver .StoreContext ;
75+ import org .apache .hadoop .hbase .regionserver .StoreFileInfo ;
7476import org .apache .hadoop .hbase .regionserver .StoreFileWriter ;
7577import org .apache .hadoop .hbase .regionserver .storefiletracker .StoreFileTracker ;
7678import org .apache .hadoop .hbase .regionserver .storefiletracker .StoreFileTrackerFactory ;
7779import org .apache .hadoop .hbase .testclassification .IOTests ;
7880import org .apache .hadoop .hbase .testclassification .MediumTests ;
7981import org .apache .hadoop .hbase .util .Bytes ;
82+ import org .apache .hadoop .hbase .util .CommonFSUtils ;
8083import org .junit .After ;
8184import org .junit .Before ;
8285import org .junit .ClassRule ;
@@ -363,7 +366,7 @@ public void testPrefetchMetricProgress() throws Exception {
363366 BucketCache bc = BucketCache .getBucketCacheFromCacheConfig (cacheConf ).get ();
364367 MutableLong regionCachedSize = new MutableLong (0 );
365368 // Our file should have 6 DATA blocks. We should wait for all of them to be cached
366- long waitedTime = Waiter .waitFor (conf , 300 , () -> {
369+ Waiter .waitFor (conf , 300 , () -> {
367370 if (bc .getBackingMap ().size () > 0 ) {
368371 long currentSize = bc .getRegionCachedInfo ().get ().get (regionName );
369372 assertTrue (regionCachedSize .getValue () <= currentSize );
@@ -374,6 +377,132 @@ public void testPrefetchMetricProgress() throws Exception {
374377 });
375378 }
376379
380+ @ Test
381+ public void testPrefetchMetricProgressForLinks () throws Exception {
382+ conf .setLong (BUCKET_CACHE_SIZE_KEY , 200 );
383+ blockCache = BlockCacheFactory .createBlockCache (conf );
384+ cacheConf = new CacheConfig (conf , blockCache );
385+ final RegionInfo hri =
386+ RegionInfoBuilder .newBuilder (TableName .valueOf (name .getMethodName ())).build ();
387+ // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
388+ Configuration testConf = new Configuration (this .conf );
389+ Path testDir = TEST_UTIL .getDataTestDir (name .getMethodName ());
390+ CommonFSUtils .setRootDir (testConf , testDir );
391+ Path tableDir = CommonFSUtils .getTableDir (testDir , hri .getTable ());
392+ RegionInfo region = RegionInfoBuilder .newBuilder (TableName .valueOf (tableDir .getName ())).build ();
393+ Path regionDir = new Path (tableDir , region .getEncodedName ());
394+ Path cfDir = new Path (regionDir , "cf" );
395+ HRegionFileSystem regionFS =
396+ HRegionFileSystem .createRegionOnFileSystem (testConf , fs , tableDir , region );
397+ Path storeFile = writeStoreFile (100 , cfDir );
398+ // Prefetches the file blocks
399+ LOG .debug ("First read should prefetch the blocks." );
400+ readStoreFile (storeFile );
401+ BucketCache bc = BucketCache .getBucketCacheFromCacheConfig (cacheConf ).get ();
402+ // Our file should have 6 DATA blocks. We should wait for all of them to be cached
403+ Waiter .waitFor (testConf , 300 , () -> bc .getBackingMap ().size () == 6 );
404+ long cachedSize = bc .getRegionCachedInfo ().get ().get (region .getEncodedName ());
405+
406+ final RegionInfo dstHri =
407+ RegionInfoBuilder .newBuilder (TableName .valueOf (name .getMethodName ())).build ();
408+ HRegionFileSystem dstRegionFs = HRegionFileSystem .createRegionOnFileSystem (testConf , fs ,
409+ CommonFSUtils .getTableDir (testDir , dstHri .getTable ()), dstHri );
410+
411+ Path dstPath = new Path (regionFS .getTableDir (), new Path (dstHri .getRegionNameAsString (), "cf" ));
412+
413+ Path linkFilePath =
414+ new Path (dstPath , HFileLink .createHFileLinkName (region , storeFile .getName ()));
415+
416+ StoreFileTracker sft = StoreFileTrackerFactory .create (testConf , false ,
417+ StoreContext .getBuilder ().withFamilyStoreDirectoryPath (dstPath )
418+ .withColumnFamilyDescriptor (ColumnFamilyDescriptorBuilder .of ("cf" ))
419+ .withRegionFileSystem (dstRegionFs ).build ());
420+ sft .createHFileLink (hri .getTable (), hri .getEncodedName (), storeFile .getName (), true );
421+ StoreFileInfo sfi = sft .getStoreFileInfo (linkFilePath , true );
422+
423+ HStoreFile hsf = new HStoreFile (sfi , BloomType .NONE , cacheConf );
424+ assertTrue (sfi .isLink ());
425+ hsf .initReader ();
426+ HFile .Reader reader = hsf .getReader ().getHFileReader ();
427+ while (!reader .prefetchComplete ()) {
428+ // Sleep for a bit
429+ Thread .sleep (1000 );
430+ }
431+ // HFileLink use the path of the target file to create a reader, so it should resolve to the
432+ // already cached blocks and not insert new blocks in the cache.
433+ Waiter .waitFor (testConf , 300 , () -> bc .getBackingMap ().size () == 6 );
434+
435+ assertEquals (cachedSize , (long ) bc .getRegionCachedInfo ().get ().get (region .getEncodedName ()));
436+ }
437+
438+ @ Test
439+ public void testPrefetchMetricProgressForLinksToArchived () throws Exception {
440+ conf .setLong (BUCKET_CACHE_SIZE_KEY , 200 );
441+ blockCache = BlockCacheFactory .createBlockCache (conf );
442+ cacheConf = new CacheConfig (conf , blockCache );
443+
444+ // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
445+ Configuration testConf = new Configuration (this .conf );
446+ Path testDir = TEST_UTIL .getDataTestDir (name .getMethodName ());
447+ CommonFSUtils .setRootDir (testConf , testDir );
448+
449+ final RegionInfo hri =
450+ RegionInfoBuilder .newBuilder (TableName .valueOf (name .getMethodName ())).build ();
451+ Path tableDir = CommonFSUtils .getTableDir (testDir , hri .getTable ());
452+ RegionInfo region = RegionInfoBuilder .newBuilder (TableName .valueOf (tableDir .getName ())).build ();
453+ Path regionDir = new Path (tableDir , region .getEncodedName ());
454+ Path cfDir = new Path (regionDir , "cf" );
455+
456+ Path storeFile = writeStoreFile (100 , cfDir );
457+ // Prefetches the file blocks
458+ LOG .debug ("First read should prefetch the blocks." );
459+ readStoreFile (storeFile );
460+ BucketCache bc = BucketCache .getBucketCacheFromCacheConfig (cacheConf ).get ();
461+ // Our file should have 6 DATA blocks. We should wait for all of them to be cached
462+ Waiter .waitFor (testConf , 300 , () -> bc .getBackingMap ().size () == 6 );
463+ long cachedSize = bc .getRegionCachedInfo ().get ().get (region .getEncodedName ());
464+
465+ // create another file, but in the archive dir, hence it won't be cached
466+ Path archiveRoot = new Path (testDir , "archive" );
467+ Path archiveTableDir = CommonFSUtils .getTableDir (archiveRoot , hri .getTable ());
468+ Path archiveRegionDir = new Path (archiveTableDir , region .getEncodedName ());
469+ Path archiveCfDir = new Path (archiveRegionDir , "cf" );
470+ Path archivedFile = writeStoreFile (100 , archiveCfDir );
471+
472+ final RegionInfo testRegion =
473+ RegionInfoBuilder .newBuilder (TableName .valueOf (tableDir .getName ())).build ();
474+ final HRegionFileSystem testRegionFs = HRegionFileSystem .createRegionOnFileSystem (testConf , fs ,
475+ CommonFSUtils .getTableDir (testDir , testRegion .getTable ()), testRegion );
476+ // Just create a link to the archived file
477+ Path dstPath = new Path (tableDir , new Path (testRegion .getEncodedName (), "cf" ));
478+
479+ Path linkFilePath =
480+ new Path (dstPath , HFileLink .createHFileLinkName (region , archivedFile .getName ()));
481+
482+ StoreFileTracker sft = StoreFileTrackerFactory .create (testConf , false ,
483+ StoreContext .getBuilder ().withFamilyStoreDirectoryPath (dstPath )
484+ .withColumnFamilyDescriptor (ColumnFamilyDescriptorBuilder .of ("cf" ))
485+ .withRegionFileSystem (testRegionFs ).build ());
486+ sft .createHFileLink (hri .getTable (), hri .getEncodedName (), storeFile .getName (), true );
487+ StoreFileInfo sfi = sft .getStoreFileInfo (linkFilePath , true );
488+
489+ HStoreFile hsf = new HStoreFile (sfi , BloomType .NONE , cacheConf );
490+ assertTrue (sfi .isLink ());
491+ hsf .initReader ();
492+ HFile .Reader reader = hsf .getReader ().getHFileReader ();
493+ while (!reader .prefetchComplete ()) {
494+ // Sleep for a bit
495+ Thread .sleep (1000 );
496+ }
497+ // HFileLink use the path of the target file to create a reader, but the target file is in the
498+ // archive, so it wasn't cached previously and should be cached when we open the link.
499+ Waiter .waitFor (testConf , 300 , () -> bc .getBackingMap ().size () == 12 );
500+ // cached size for the region of target file shouldn't change
501+ assertEquals (cachedSize , (long ) bc .getRegionCachedInfo ().get ().get (region .getEncodedName ()));
502+ // cached size for the region with link pointing to archive dir shouldn't be updated
503+ assertNull (bc .getRegionCachedInfo ().get ().get (testRegion .getEncodedName ()));
504+ }
505+
377506 private void readStoreFile (Path storeFilePath ) throws Exception {
378507 readStoreFile (storeFilePath , (r , o ) -> {
379508 HFileBlock block = null ;
0 commit comments