|
51 | 51 | import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
52 | 52 | import org.apache.hadoop.hbase.testclassification.LargeTests;
|
53 | 53 | import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
|
| 54 | +import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; |
54 | 55 | import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
55 | 56 | import org.apache.hadoop.hbase.util.Bytes;
|
56 | 57 | import org.apache.hadoop.hbase.util.CommonFSUtils;
|
57 | 58 | import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
| 59 | +import org.apache.hadoop.hbase.util.HFileTestUtil; |
58 | 60 | import org.apache.hadoop.hbase.util.Pair;
|
59 | 61 | import org.junit.After;
|
60 | 62 | import org.junit.AfterClass;
|
@@ -206,6 +208,51 @@ public void testExportFileSystemStateWithMergeRegion() throws Exception {
|
206 | 208 | TEST_UTIL.deleteTable(tableName0);
|
207 | 209 | }
|
208 | 210 |
|
| 211 | + @Test |
| 212 | + public void testExportFileSystemStateWithSplitRegion() throws Exception { |
| 213 | + // disable compaction |
| 214 | + admin.compactionSwitch(false, |
| 215 | + admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList())); |
| 216 | + // create Table |
| 217 | + TableName splitTableName = TableName.valueOf(testName.getMethodName()); |
| 218 | + String splitTableSnap = "snapshot-" + testName.getMethodName(); |
| 219 | + admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies( |
| 220 | + Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build()); |
| 221 | + |
| 222 | + Path output = TEST_UTIL.getDataTestDir("output/cf"); |
| 223 | + TEST_UTIL.getTestFileSystem().mkdirs(output); |
| 224 | + // Create and load a large hfile to ensure the execution time of MR job. |
| 225 | + HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), |
| 226 | + new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"), |
| 227 | + Bytes.toBytes("9"), 9999999); |
| 228 | + BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration()); |
| 229 | + tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() }); |
| 230 | + |
| 231 | + List<RegionInfo> regions = admin.getRegions(splitTableName); |
| 232 | + assertEquals(1, regions.size()); |
| 233 | + tableNumFiles = regions.size(); |
| 234 | + |
| 235 | + // split region |
| 236 | + admin.splitRegionAsync(regions.get(0).getEncodedNameAsBytes(), Bytes.toBytes("5")).get(); |
| 237 | + regions = admin.getRegions(splitTableName); |
| 238 | + assertEquals(2, regions.size()); |
| 239 | + |
| 240 | + // take a snapshot |
| 241 | + admin.snapshot(splitTableSnap, splitTableName); |
| 242 | + // export snapshot and verify |
| 243 | + Configuration tmpConf = TEST_UTIL.getConfiguration(); |
| 244 | + // Decrease the buffer size of copier to avoid the export task finished shortly |
| 245 | + tmpConf.setInt("snapshot.export.buffer.size", 1); |
| 246 | + // Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference |
| 247 | + // files) copied in different mappers concurrently. |
| 248 | + tmpConf.setInt("snapshot.export.default.map.group", 1); |
| 249 | + testExportFileSystemState(tmpConf, splitTableName, Bytes.toBytes(splitTableSnap), |
| 250 | + Bytes.toBytes(splitTableSnap), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), |
| 251 | + getHdfsDestinationDir(), false, false, getBypassRegionPredicate(), true, false); |
| 252 | + // delete table |
| 253 | + TEST_UTIL.deleteTable(splitTableName); |
| 254 | + } |
| 255 | + |
209 | 256 | @Test
|
210 | 257 | public void testExportFileSystemStateWithSkipTmp() throws Exception {
|
211 | 258 | TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);
|
|
0 commit comments