Skip to content

Commit 3c12f7b

Browse files
committed
Revert "HBASE-28806 ExportSnapshot failed if reference file presented"
This reverts commit 1780ab6.
1 parent 1780ab6 commit 3c12f7b

File tree

2 files changed

+1
-53
lines changed

2 files changed

+1
-53
lines changed

hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,6 @@
2828
import java.util.Comparator;
2929
import java.util.LinkedList;
3030
import java.util.List;
31-
import java.util.Set;
32-
import java.util.TreeSet;
3331
import java.util.concurrent.ExecutionException;
3432
import java.util.concurrent.ExecutorService;
3533
import java.util.concurrent.Executors;
@@ -661,7 +659,6 @@ private static List<Pair<SnapshotFileInfo, Long>> getSnapshotFiles(final Configu
661659

662660
// Get snapshot files
663661
LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list");
664-
Set<String> existingFiles = new TreeSet<>();
665662
SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc,
666663
new SnapshotReferenceUtil.SnapshotVisitor() {
667664
@Override
@@ -681,13 +678,7 @@ public void storeFile(final RegionInfo regionInfo, final String family,
681678
snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family,
682679
referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
683680
}
684-
String fileToExport = snapshotFileAndSize.getFirst().getHfile();
685-
if (!existingFiles.contains(fileToExport)) {
686-
files.add(snapshotFileAndSize);
687-
existingFiles.add(fileToExport);
688-
} else {
689-
LOG.debug("Skip the existing file: {}.", fileToExport);
690-
}
681+
files.add(snapshotFileAndSize);
691682
}
692683
});
693684

hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ public static void setUpBaseConf(Configuration conf) {
106106
// If a single node has enough failures (default 3), resource manager will blacklist it.
107107
// With only 2 nodes and tests injecting faults, we don't want that.
108108
conf.setInt("mapreduce.job.maxtaskfailures.per.tracker", 100);
109-
conf.setInt("snapshot.export.default.map.group", 1);
110109
}
111110

112111
@BeforeClass
@@ -207,48 +206,6 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception {
207206
TEST_UTIL.deleteTable(tableName0);
208207
}
209208

210-
@Test
211-
public void testExportFileSystemStateWithSplitRegion() throws Exception {
212-
// disable compaction
213-
admin.compactionSwitch(false,
214-
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
215-
// create Table
216-
TableName splitTableName = TableName.valueOf(testName.getMethodName());
217-
String splitTableSnap = "snapshot-" + testName.getMethodName();
218-
admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies(
219-
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build());
220-
221-
// put some data
222-
try (Table table = admin.getConnection().getTable(splitTableName)) {
223-
table.put(new Put(Bytes.toBytes("row1")).addColumn(FAMILY, null, Bytes.toBytes("value1")));
224-
table.put(new Put(Bytes.toBytes("row2")).addColumn(FAMILY, null, Bytes.toBytes("value2")));
225-
table.put(new Put(Bytes.toBytes("row3")).addColumn(FAMILY, null, Bytes.toBytes("value3")));
226-
table.put(new Put(Bytes.toBytes("row4")).addColumn(FAMILY, null, Bytes.toBytes("value4")));
227-
table.put(new Put(Bytes.toBytes("row5")).addColumn(FAMILY, null, Bytes.toBytes("value5")));
228-
table.put(new Put(Bytes.toBytes("row6")).addColumn(FAMILY, null, Bytes.toBytes("value6")));
229-
table.put(new Put(Bytes.toBytes("row7")).addColumn(FAMILY, null, Bytes.toBytes("value7")));
230-
table.put(new Put(Bytes.toBytes("row8")).addColumn(FAMILY, null, Bytes.toBytes("value8")));
231-
// Flush to HFile
232-
admin.flush(tableName);
233-
}
234-
235-
List<RegionInfo> regions = admin.getRegions(splitTableName);
236-
assertEquals(1, regions.size());
237-
tableNumFiles = regions.size();
238-
239-
// split region
240-
admin.split(splitTableName, Bytes.toBytes("row5"));
241-
regions = admin.getRegions(splitTableName);
242-
assertEquals(2, regions.size());
243-
244-
// take a snapshot
245-
admin.snapshot(splitTableSnap, splitTableName);
246-
// export snapshot and verify
247-
testExportFileSystemState(splitTableName, splitTableSnap, splitTableSnap, tableNumFiles);
248-
// delete table
249-
TEST_UTIL.deleteTable(splitTableName);
250-
}
251-
252209
@Test
253210
public void testExportFileSystemStateWithSkipTmp() throws Exception {
254211
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);

0 commit comments

Comments
 (0)