|
17 | 17 | */
|
18 | 18 | package org.apache.hadoop.hbase.client;
|
19 | 19 |
|
| 20 | +import java.io.FileNotFoundException; |
20 | 21 | import java.io.IOException;
|
21 | 22 | import java.util.Arrays;
|
22 | 23 | import java.util.List;
|
| 24 | +import java.util.stream.Collectors; |
23 | 25 | import org.apache.hadoop.conf.Configuration;
|
| 26 | +import org.apache.hadoop.fs.FileStatus; |
24 | 27 | import org.apache.hadoop.fs.FileSystem;
|
25 | 28 | import org.apache.hadoop.fs.Path;
|
26 | 29 | import org.apache.hadoop.hbase.Cell;
|
|
29 | 32 | import org.apache.hadoop.hbase.HBaseTestingUtility;
|
30 | 33 | import org.apache.hadoop.hbase.StartMiniClusterOption;
|
31 | 34 | import org.apache.hadoop.hbase.TableName;
|
| 35 | +import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; |
32 | 36 | import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
| 37 | +import org.apache.hadoop.hbase.regionserver.HRegion; |
| 38 | +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; |
| 39 | +import org.apache.hadoop.hbase.regionserver.HRegionServer; |
33 | 40 | import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
34 | 41 | import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
35 | 42 | import org.apache.hadoop.hbase.testclassification.ClientTests;
|
36 | 43 | import org.apache.hadoop.hbase.testclassification.LargeTests;
|
37 | 44 | import org.apache.hadoop.hbase.util.Bytes;
|
38 | 45 | import org.apache.hadoop.hbase.util.FSUtils;
|
| 46 | +import org.apache.hadoop.hbase.util.HFileArchiveUtil; |
| 47 | +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; |
39 | 48 | import org.junit.After;
|
40 | 49 | import org.junit.Assert;
|
41 | 50 | import org.junit.ClassRule;
|
@@ -306,4 +315,133 @@ private static void verifyRow(Result result) throws IOException {
|
306 | 315 | }
|
307 | 316 | }
|
308 | 317 |
|
| 318 | + @Test |
| 319 | + public void testMergeRegion() throws Exception { |
| 320 | + setupCluster(); |
| 321 | + TableName tableName = TableName.valueOf("testMergeRegion"); |
| 322 | + String snapshotName = tableName.getNameAsString() + "_snapshot"; |
| 323 | + Configuration conf = UTIL.getConfiguration(); |
| 324 | + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); |
| 325 | + long timeout = 20000; // 20s |
| 326 | + try (Admin admin = UTIL.getAdmin()) { |
| 327 | + List<String> serverList = admin.getRegionServers().stream().map(sn -> sn.getServerName()) |
| 328 | + .collect(Collectors.toList()); |
| 329 | + // create table with 3 regions |
| 330 | + Table table = UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, 3); |
| 331 | + List<RegionInfo> regions = admin.getRegions(tableName); |
| 332 | + Assert.assertEquals(3, regions.size()); |
| 333 | + RegionInfo region0 = regions.get(0); |
| 334 | + RegionInfo region1 = regions.get(1); |
| 335 | + RegionInfo region2 = regions.get(2); |
| 336 | + // put some data in the table |
| 337 | + UTIL.loadTable(table, FAMILIES); |
| 338 | + admin.flush(tableName); |
| 339 | + // wait flush is finished |
| 340 | + UTIL.waitFor(timeout, () -> { |
| 341 | + try { |
| 342 | + Path tableDir = FSUtils.getTableDir(rootDir, tableName); |
| 343 | + for (RegionInfo region : regions) { |
| 344 | + Path regionDir = new Path(tableDir, region.getEncodedName()); |
| 345 | + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { |
| 346 | + if (fs.listStatus(familyDir).length != 1) { |
| 347 | + return false; |
| 348 | + } |
| 349 | + } |
| 350 | + } |
| 351 | + return true; |
| 352 | + } catch (IOException e) { |
| 353 | + LOG.warn("Failed check if flush is finished", e); |
| 354 | + return false; |
| 355 | + } |
| 356 | + }); |
| 357 | + // merge 2 regions |
| 358 | + admin.compactionSwitch(false, serverList); |
| 359 | + admin.mergeRegionsAsync(region0.getEncodedNameAsBytes(), region1.getEncodedNameAsBytes(), |
| 360 | + true); |
| 361 | + UTIL.waitFor(timeout, () -> admin.getRegions(tableName).size() == 2); |
| 362 | + List<RegionInfo> mergedRegions = admin.getRegions(tableName); |
| 363 | + RegionInfo mergedRegion = |
| 364 | + mergedRegions.get(0).getEncodedName().equals(region2.getEncodedName()) |
| 365 | + ? mergedRegions.get(1) |
| 366 | + : mergedRegions.get(0); |
| 367 | + // snapshot |
| 368 | + admin.snapshot(snapshotName, tableName); |
| 369 | + Assert.assertEquals(1, admin.listSnapshots().size()); |
| 370 | + // major compact |
| 371 | + admin.compactionSwitch(true, serverList); |
| 372 | + admin.majorCompactRegion(mergedRegion.getRegionName()); |
| 373 | + // wait until merged region has no reference |
| 374 | + UTIL.waitFor(timeout, () -> { |
| 375 | + try { |
| 376 | + for (RegionServerThread regionServerThread : UTIL.getMiniHBaseCluster() |
| 377 | + .getRegionServerThreads()) { |
| 378 | + HRegionServer regionServer = regionServerThread.getRegionServer(); |
| 379 | + for (HRegion subRegion : regionServer.getRegions(tableName)) { |
| 380 | + if (subRegion.getRegionInfo().getEncodedName() |
| 381 | + .equals(mergedRegion.getEncodedName())) { |
| 382 | + regionServer.getCompactedHFilesDischarger().chore(); |
| 383 | + } |
| 384 | + } |
| 385 | + } |
| 386 | + Path tableDir = FSUtils.getTableDir(rootDir, tableName); |
| 387 | + HRegionFileSystem regionFs = HRegionFileSystem |
| 388 | + .openRegionFromFileSystem(UTIL.getConfiguration(), fs, tableDir, mergedRegion, true); |
| 389 | + return !regionFs.hasReferences(admin.getDescriptor(tableName)); |
| 390 | + } catch (IOException e) { |
| 391 | + LOG.warn("Failed check merged region has no reference", e); |
| 392 | + return false; |
| 393 | + } |
| 394 | + }); |
| 395 | + // run catalog janitor to clean and wait for parent regions are archived |
| 396 | + UTIL.getMiniHBaseCluster().getMaster().getCatalogJanitor().choreForTesting(); |
| 397 | + UTIL.waitFor(timeout, () -> { |
| 398 | + try { |
| 399 | + Path tableDir = FSUtils.getTableDir(rootDir, tableName); |
| 400 | + for (FileStatus fileStatus : fs.listStatus(tableDir)) { |
| 401 | + String name = fileStatus.getPath().getName(); |
| 402 | + if (name.equals(region0.getEncodedName()) || name.equals(region1.getEncodedName())) { |
| 403 | + return false; |
| 404 | + } |
| 405 | + } |
| 406 | + return true; |
| 407 | + } catch (IOException e) { |
| 408 | + LOG.warn("Check if parent regions are archived error", e); |
| 409 | + return false; |
| 410 | + } |
| 411 | + }); |
| 412 | + // set file modify time and then run cleaner |
| 413 | + long time = System.currentTimeMillis() - TimeToLiveHFileCleaner.DEFAULT_TTL * 1000; |
| 414 | + traverseAndSetFileTime(HFileArchiveUtil.getArchivePath(conf), time); |
| 415 | + UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().runCleaner(); |
| 416 | + // scan snapshot |
| 417 | + try (TableSnapshotScanner scanner = new TableSnapshotScanner(conf, |
| 418 | + UTIL.getDataTestDirOnTestFS(snapshotName), snapshotName, new Scan(bbb, yyy))) { |
| 419 | + verifyScanner(scanner, bbb, yyy); |
| 420 | + } |
| 421 | + } catch (Exception e) { |
| 422 | + LOG.error("scan snapshot error", e); |
| 423 | + Assert.fail("Should not throw FileNotFoundException"); |
| 424 | + Assert.assertTrue(e.getCause() != null); |
| 425 | + Assert.assertTrue(e.getCause().getCause() instanceof FileNotFoundException); |
| 426 | + } finally { |
| 427 | + tearDownCluster(); |
| 428 | + } |
| 429 | + } |
| 430 | + |
| 431 | + private void traverseAndSetFileTime(Path path, long time) throws IOException { |
| 432 | + fs.setTimes(path, time, -1); |
| 433 | + if (fs.isDirectory(path)) { |
| 434 | + List<FileStatus> allPaths = Arrays.asList(fs.listStatus(path)); |
| 435 | + List<FileStatus> subDirs = |
| 436 | + allPaths.stream().filter(FileStatus::isDirectory).collect(Collectors.toList()); |
| 437 | + List<FileStatus> files = |
| 438 | + allPaths.stream().filter(FileStatus::isFile).collect(Collectors.toList()); |
| 439 | + for (FileStatus subDir : subDirs) { |
| 440 | + traverseAndSetFileTime(subDir.getPath(), time); |
| 441 | + } |
| 442 | + for (FileStatus file : files) { |
| 443 | + fs.setTimes(file.getPath(), time, -1); |
| 444 | + } |
| 445 | + } |
| 446 | + } |
309 | 447 | }
|
0 commit comments