Skip to content

Commit b76c7f5

Browse files
committed
HBASE-28806 ExportSnapshot failed if reference file presented
1 parent 31da0f2 commit b76c7f5

File tree

2 files changed

+57
-1
lines changed

2 files changed

+57
-1
lines changed

hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,10 @@
2626
import java.util.ArrayList;
2727
import java.util.Collections;
2828
import java.util.Comparator;
29+
import java.util.HashSet;
2930
import java.util.LinkedList;
3031
import java.util.List;
32+
import java.util.Set;
3133
import java.util.concurrent.ExecutionException;
3234
import java.util.concurrent.ExecutorService;
3335
import java.util.concurrent.Executors;
@@ -656,6 +658,7 @@ private static List<Pair<SnapshotFileInfo, Long>> getSnapshotFiles(final Configu
656658

657659
// Get snapshot files
658660
LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list");
661+
Set<String> addedFiles = new HashSet<>();
659662
SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc,
660663
new SnapshotReferenceUtil.SnapshotVisitor() {
661664
@Override
@@ -675,7 +678,13 @@ public void storeFile(final RegionInfo regionInfo, final String family,
675678
snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family,
676679
referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
677680
}
678-
files.add(snapshotFileAndSize);
681+
String fileToExport = snapshotFileAndSize.getFirst().getHfile();
682+
if (!addedFiles.contains(fileToExport)) {
683+
files.add(snapshotFileAndSize);
684+
addedFiles.add(fileToExport);
685+
} else {
686+
LOG.debug("Skip the existing file: {}.", fileToExport);
687+
}
679688
}
680689
});
681690

hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,12 @@
5151
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
5252
import org.apache.hadoop.hbase.testclassification.LargeTests;
5353
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
54+
import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
5455
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
5556
import org.apache.hadoop.hbase.util.Bytes;
5657
import org.apache.hadoop.hbase.util.CommonFSUtils;
5758
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
59+
import org.apache.hadoop.hbase.util.HFileTestUtil;
5860
import org.apache.hadoop.hbase.util.Pair;
5961
import org.junit.After;
6062
import org.junit.AfterClass;
@@ -206,6 +208,51 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception {
206208
TEST_UTIL.deleteTable(tableName0);
207209
}
208210

211+
@Test
212+
public void testExportFileSystemStateWithSplitRegion() throws Exception {
213+
// disable compaction
214+
admin.compactionSwitch(false,
215+
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
216+
// create Table
217+
TableName splitTableName = TableName.valueOf(testName.getMethodName());
218+
String splitTableSnap = "snapshot-" + testName.getMethodName();
219+
admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies(
220+
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build());
221+
222+
Path output = TEST_UTIL.getDataTestDir("output/cf");
223+
TEST_UTIL.getTestFileSystem().mkdirs(output);
224+
// Create and load a large hfile to ensure the execution time of MR job.
225+
HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(),
226+
new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"),
227+
Bytes.toBytes("9"), 9999999);
228+
BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration());
229+
tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() });
230+
231+
List<RegionInfo> regions = admin.getRegions(splitTableName);
232+
assertEquals(1, regions.size());
233+
tableNumFiles = regions.size();
234+
235+
// split region
236+
admin.splitRegionAsync(regions.get(0).getEncodedNameAsBytes(), Bytes.toBytes("5")).get();
237+
regions = admin.getRegions(splitTableName);
238+
assertEquals(2, regions.size());
239+
240+
// take a snapshot
241+
admin.snapshot(splitTableSnap, splitTableName);
242+
// export snapshot and verify
243+
Configuration tmpConf = TEST_UTIL.getConfiguration();
244+
// Decrease the buffer size of copier to avoid the export task finished shortly
245+
tmpConf.setInt("snapshot.export.buffer.size", 1);
246+
// Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference
247+
// files) copied in different mappers concurrently.
248+
tmpConf.setInt("snapshot.export.default.map.group", 1);
249+
testExportFileSystemState(tmpConf, splitTableName, Bytes.toBytes(splitTableSnap),
250+
Bytes.toBytes(splitTableSnap), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(),
251+
getHdfsDestinationDir(), false, false, getBypassRegionPredicate(), true, false);
252+
// delete table
253+
TEST_UTIL.deleteTable(splitTableName);
254+
}
255+
209256
@Test
210257
public void testExportFileSystemStateWithSkipTmp() throws Exception {
211258
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);

0 commit comments

Comments
 (0)