|
19 | 19 |
|
20 | 20 | import static org.junit.Assert.assertEquals; |
21 | 21 | import static org.junit.Assert.assertFalse; |
| 22 | +import static org.junit.Assert.assertNotEquals; |
22 | 23 | import static org.junit.Assert.assertNotNull; |
23 | 24 | import static org.junit.Assert.assertNotSame; |
24 | 25 | import static org.junit.Assert.assertNull; |
25 | 26 | import static org.junit.Assert.assertTrue; |
26 | 27 | import static org.junit.Assert.fail; |
27 | 28 |
|
28 | 29 | import java.io.IOException; |
| 30 | +import java.lang.reflect.Field; |
| 31 | +import java.util.ArrayList; |
29 | 32 | import java.util.Collection; |
30 | 33 | import java.util.List; |
31 | 34 | import java.util.Map; |
|
75 | 78 | import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; |
76 | 79 | import org.apache.hadoop.hbase.coprocessor.MasterObserver; |
77 | 80 | import org.apache.hadoop.hbase.coprocessor.ObserverContext; |
| 81 | +import org.apache.hadoop.hbase.io.Reference; |
78 | 82 | import org.apache.hadoop.hbase.master.HMaster; |
79 | 83 | import org.apache.hadoop.hbase.master.LoadBalancer; |
80 | 84 | import org.apache.hadoop.hbase.master.MasterRpcServices; |
|
86 | 90 | import org.apache.hadoop.hbase.master.assignment.RegionStates; |
87 | 91 | import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; |
88 | 92 | import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; |
| 93 | +import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; |
89 | 94 | import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; |
90 | 95 | import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; |
91 | 96 | import org.apache.hadoop.hbase.testclassification.LargeTests; |
|
110 | 115 | import org.junit.Test; |
111 | 116 | import org.junit.experimental.categories.Category; |
112 | 117 | import org.junit.rules.TestName; |
| 118 | +import org.mockito.Mockito; |
113 | 119 | import org.slf4j.Logger; |
114 | 120 | import org.slf4j.LoggerFactory; |
115 | 121 |
|
@@ -280,6 +286,79 @@ public void testSplitFailedCompactionAndSplit() throws Exception { |
280 | 286 | assertEquals(2, cluster.getRegions(tableName).size()); |
281 | 287 | } |
282 | 288 |
|
| 289 | + @Test |
| 290 | + public void testSplitCompactWithPriority() throws Exception { |
| 291 | + final TableName tableName = TableName.valueOf(name.getMethodName()); |
| 292 | + // Create table then get the single region for our new table. |
| 293 | + byte[] cf = Bytes.toBytes("cf"); |
| 294 | + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) |
| 295 | + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); |
| 296 | + admin.createTable(htd); |
| 297 | + |
| 298 | + assertNotEquals("Unable to retrieve regions of the table", -1, |
| 299 | + TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1)); |
| 300 | + |
| 301 | + HRegion region = cluster.getRegions(tableName).get(0); |
| 302 | + HStore store = region.getStore(cf); |
| 303 | + int regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); |
| 304 | + HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); |
| 305 | + |
| 306 | + Table table = TESTING_UTIL.getConnection().getTable(tableName); |
| 307 | + // insert data |
| 308 | + insertData(tableName, admin, table); |
| 309 | + insertData(tableName, admin, table, 20); |
| 310 | + insertData(tableName, admin, table, 40); |
| 311 | + |
| 312 | + // Compaction Request |
| 313 | + store.triggerMajorCompaction(); |
| 314 | + Optional<CompactionContext> compactionContext = store.requestCompaction(); |
| 315 | + assertTrue(compactionContext.isPresent()); |
| 316 | + assertFalse(compactionContext.get().getRequest().isAfterSplit()); |
| 317 | + assertEquals(compactionContext.get().getRequest().getPriority(), 13); |
| 318 | + |
| 319 | + // Split |
| 320 | + long procId = |
| 321 | + cluster.getMaster().splitRegion(region.getRegionInfo(), Bytes.toBytes("row4"), 0, 0); |
| 322 | + |
| 323 | + // wait for the split to complete or get interrupted. If the split completes successfully, |
| 324 | + // the procedure will return true; if the split fails, the procedure would throw exception. |
| 325 | + ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), |
| 326 | + procId); |
| 327 | + |
| 328 | + assertEquals(2, cluster.getRegions(tableName).size()); |
| 329 | + // we have 2 daughter regions |
| 330 | + HRegion hRegion1 = cluster.getRegions(tableName).get(0); |
| 331 | + HRegion hRegion2 = cluster.getRegions(tableName).get(1); |
| 332 | + HStore hStore1 = hRegion1.getStore(cf); |
| 333 | + HStore hStore2 = hRegion2.getStore(cf); |
| 334 | + |
| 335 | + // For hStore1 && hStore2, set mock reference to one of the storeFiles |
| 336 | + StoreFileInfo storeFileInfo1 = new ArrayList<>(hStore1.getStorefiles()).get(0).getFileInfo(); |
| 337 | + StoreFileInfo storeFileInfo2 = new ArrayList<>(hStore2.getStorefiles()).get(0).getFileInfo(); |
| 338 | + Field field = StoreFileInfo.class.getDeclaredField("reference"); |
| 339 | + field.setAccessible(true); |
| 340 | + field.set(storeFileInfo1, Mockito.mock(Reference.class)); |
| 341 | + field.set(storeFileInfo2, Mockito.mock(Reference.class)); |
| 342 | + hStore1.triggerMajorCompaction(); |
| 343 | + hStore2.triggerMajorCompaction(); |
| 344 | + |
| 345 | + compactionContext = hStore1.requestCompaction(); |
| 346 | + assertTrue(compactionContext.isPresent()); |
| 347 | + // since we set mock reference to one of the storeFiles, we will get isAfterSplit=true && |
| 348 | + // highest priority for hStore1's compactionContext |
| 349 | + assertTrue(compactionContext.get().getRequest().isAfterSplit()); |
| 350 | + assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 1000); |
| 351 | + |
| 352 | + compactionContext = |
| 353 | + hStore2.requestCompaction(Integer.MIN_VALUE + 10, CompactionLifeCycleTracker.DUMMY, null); |
| 354 | + assertTrue(compactionContext.isPresent()); |
| 355 | + // compaction request contains higher priority than default priority of daughter region |
| 356 | + // compaction (Integer.MIN_VALUE + 1000), hence we are expecting request priority to |
| 357 | + // be accepted. |
| 358 | + assertTrue(compactionContext.get().getRequest().isAfterSplit()); |
| 359 | + assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 10); |
| 360 | + } |
| 361 | + |
283 | 362 | public static class FailingSplitMasterObserver implements MasterCoprocessor, MasterObserver { |
284 | 363 | volatile CountDownLatch latch; |
285 | 364 |
|
@@ -641,18 +720,21 @@ public void testSplitWithRegionReplicas() throws Exception { |
641 | 720 | } |
642 | 721 | } |
643 | 722 |
|
644 | | - private void insertData(final TableName tableName, Admin admin, Table t) throws IOException, |
645 | | - InterruptedException { |
646 | | - Put p = new Put(Bytes.toBytes("row1")); |
| 723 | + private void insertData(final TableName tableName, Admin admin, Table t) throws IOException { |
| 724 | + insertData(tableName, admin, t, 1); |
| 725 | + } |
| 726 | + |
| 727 | + private void insertData(TableName tableName, Admin admin, Table t, int i) throws IOException { |
| 728 | + Put p = new Put(Bytes.toBytes("row" + i)); |
647 | 729 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1")); |
648 | 730 | t.put(p); |
649 | | - p = new Put(Bytes.toBytes("row2")); |
| 731 | + p = new Put(Bytes.toBytes("row" + (i + 1))); |
650 | 732 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2")); |
651 | 733 | t.put(p); |
652 | | - p = new Put(Bytes.toBytes("row3")); |
| 734 | + p = new Put(Bytes.toBytes("row" + (i + 2))); |
653 | 735 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3")); |
654 | 736 | t.put(p); |
655 | | - p = new Put(Bytes.toBytes("row4")); |
| 737 | + p = new Put(Bytes.toBytes("row" + (i + 3))); |
656 | 738 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4")); |
657 | 739 | t.put(p); |
658 | 740 | admin.flush(tableName); |
|
0 commit comments