|
19 | 19 |
|
20 | 20 | import static org.junit.Assert.assertEquals; |
21 | 21 | import static org.junit.Assert.assertFalse; |
| 22 | +import static org.junit.Assert.assertNotEquals; |
22 | 23 | import static org.junit.Assert.assertNotNull; |
23 | 24 | import static org.junit.Assert.assertNotSame; |
24 | 25 | import static org.junit.Assert.assertNull; |
25 | 26 | import static org.junit.Assert.assertTrue; |
26 | 27 | import static org.junit.Assert.fail; |
27 | 28 |
|
28 | 29 | import java.io.IOException; |
| 30 | +import java.lang.reflect.Field; |
| 31 | +import java.util.ArrayList; |
29 | 32 | import java.util.Collection; |
30 | 33 | import java.util.List; |
31 | 34 | import java.util.Map; |
|
73 | 76 | import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; |
74 | 77 | import org.apache.hadoop.hbase.coprocessor.MasterObserver; |
75 | 78 | import org.apache.hadoop.hbase.coprocessor.ObserverContext; |
| 79 | +import org.apache.hadoop.hbase.io.Reference; |
76 | 80 | import org.apache.hadoop.hbase.master.HMaster; |
77 | 81 | import org.apache.hadoop.hbase.master.LoadBalancer; |
78 | 82 | import org.apache.hadoop.hbase.master.MasterRpcServices; |
|
84 | 88 | import org.apache.hadoop.hbase.master.assignment.RegionStates; |
85 | 89 | import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; |
86 | 90 | import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; |
| 91 | +import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; |
87 | 92 | import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; |
88 | 93 | import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; |
89 | 94 | import org.apache.hadoop.hbase.testclassification.LargeTests; |
|
106 | 111 | import org.junit.Test; |
107 | 112 | import org.junit.experimental.categories.Category; |
108 | 113 | import org.junit.rules.TestName; |
| 114 | +import org.mockito.Mockito; |
109 | 115 | import org.slf4j.Logger; |
110 | 116 | import org.slf4j.LoggerFactory; |
111 | 117 |
|
@@ -275,6 +281,79 @@ public void testSplitFailedCompactionAndSplit() throws Exception { |
275 | 281 | assertEquals(2, cluster.getRegions(tableName).size()); |
276 | 282 | } |
277 | 283 |
|
| 284 | + @Test |
| 285 | + public void testSplitCompactWithPriority() throws Exception { |
| 286 | + final TableName tableName = TableName.valueOf(name.getMethodName()); |
| 287 | + // Create table then get the single region for our new table. |
| 288 | + byte[] cf = Bytes.toBytes("cf"); |
| 289 | + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) |
| 290 | + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf)).build(); |
| 291 | + admin.createTable(htd); |
| 292 | + |
| 293 | + assertNotEquals("Unable to retrieve regions of the table", -1, |
| 294 | + TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1)); |
| 295 | + |
| 296 | + HRegion region = cluster.getRegions(tableName).get(0); |
| 297 | + HStore store = region.getStore(cf); |
| 298 | + int regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); |
| 299 | + HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); |
| 300 | + |
| 301 | + Table table = TESTING_UTIL.getConnection().getTable(tableName); |
| 302 | + // insert data |
| 303 | + insertData(tableName, admin, table); |
| 304 | + insertData(tableName, admin, table, 20); |
| 305 | + insertData(tableName, admin, table, 40); |
| 306 | + |
| 307 | + // Compaction Request |
| 308 | + store.triggerMajorCompaction(); |
| 309 | + Optional<CompactionContext> compactionContext = store.requestCompaction(); |
| 310 | + assertTrue(compactionContext.isPresent()); |
| 311 | + assertFalse(compactionContext.get().getRequest().isAfterSplit()); |
| 312 | + assertEquals(compactionContext.get().getRequest().getPriority(), 13); |
| 313 | + |
| 314 | + // Split |
| 315 | + long procId = |
| 316 | + cluster.getMaster().splitRegion(region.getRegionInfo(), Bytes.toBytes("row4"), 0, 0); |
| 317 | + |
| 318 | + // wait for the split to complete or get interrupted. If the split completes successfully, |
| 319 | + // the procedure will return true; if the split fails, the procedure would throw exception. |
| 320 | + ProcedureTestingUtility.waitProcedure(cluster.getMaster().getMasterProcedureExecutor(), |
| 321 | + procId); |
| 322 | + |
| 323 | + assertEquals(2, cluster.getRegions(tableName).size()); |
| 324 | + // we have 2 daughter regions |
| 325 | + HRegion hRegion1 = cluster.getRegions(tableName).get(0); |
| 326 | + HRegion hRegion2 = cluster.getRegions(tableName).get(1); |
| 327 | + HStore hStore1 = hRegion1.getStore(cf); |
| 328 | + HStore hStore2 = hRegion2.getStore(cf); |
| 329 | + |
| 330 | + // For hStore1 && hStore2, set mock reference to one of the storeFiles |
| 331 | + StoreFileInfo storeFileInfo1 = new ArrayList<>(hStore1.getStorefiles()).get(0).getFileInfo(); |
| 332 | + StoreFileInfo storeFileInfo2 = new ArrayList<>(hStore2.getStorefiles()).get(0).getFileInfo(); |
| 333 | + Field field = StoreFileInfo.class.getDeclaredField("reference"); |
| 334 | + field.setAccessible(true); |
| 335 | + field.set(storeFileInfo1, Mockito.mock(Reference.class)); |
| 336 | + field.set(storeFileInfo2, Mockito.mock(Reference.class)); |
| 337 | + hStore1.triggerMajorCompaction(); |
| 338 | + hStore2.triggerMajorCompaction(); |
| 339 | + |
| 340 | + compactionContext = hStore1.requestCompaction(); |
| 341 | + assertTrue(compactionContext.isPresent()); |
| 342 | + // since we set mock reference to one of the storeFiles, we will get isAfterSplit=true && |
| 343 | + // highest priority for hStore1's compactionContext |
| 344 | + assertTrue(compactionContext.get().getRequest().isAfterSplit()); |
| 345 | + assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 1000); |
| 346 | + |
| 347 | + compactionContext = |
| 348 | + hStore2.requestCompaction(Integer.MIN_VALUE + 10, CompactionLifeCycleTracker.DUMMY, null); |
| 349 | + assertTrue(compactionContext.isPresent()); |
| 350 | + // compaction request contains higher priority than default priority of daughter region |
| 351 | + // compaction (Integer.MIN_VALUE + 1000), hence we are expecting request priority to |
| 352 | + // be accepted. |
| 353 | + assertTrue(compactionContext.get().getRequest().isAfterSplit()); |
| 354 | + assertEquals(compactionContext.get().getRequest().getPriority(), Integer.MIN_VALUE + 10); |
| 355 | + } |
| 356 | + |
278 | 357 | public static class FailingSplitMasterObserver implements MasterCoprocessor, MasterObserver { |
279 | 358 | volatile CountDownLatch latch; |
280 | 359 |
|
@@ -634,18 +713,21 @@ public void testSplitWithRegionReplicas() throws Exception { |
634 | 713 | } |
635 | 714 | } |
636 | 715 |
|
637 | | - private void insertData(final TableName tableName, Admin admin, Table t) throws IOException, |
638 | | - InterruptedException { |
639 | | - Put p = new Put(Bytes.toBytes("row1")); |
| 716 | + private void insertData(final TableName tableName, Admin admin, Table t) throws IOException { |
| 717 | + insertData(tableName, admin, t, 1); |
| 718 | + } |
| 719 | + |
| 720 | + private void insertData(TableName tableName, Admin admin, Table t, int i) throws IOException { |
| 721 | + Put p = new Put(Bytes.toBytes("row" + i)); |
640 | 722 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("1")); |
641 | 723 | t.put(p); |
642 | | - p = new Put(Bytes.toBytes("row2")); |
| 724 | + p = new Put(Bytes.toBytes("row" + (i + 1))); |
643 | 725 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("2")); |
644 | 726 | t.put(p); |
645 | | - p = new Put(Bytes.toBytes("row3")); |
| 727 | + p = new Put(Bytes.toBytes("row" + (i + 2))); |
646 | 728 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("3")); |
647 | 729 | t.put(p); |
648 | | - p = new Put(Bytes.toBytes("row4")); |
| 730 | + p = new Put(Bytes.toBytes("row" + (i + 3))); |
649 | 731 | p.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q1"), Bytes.toBytes("4")); |
650 | 732 | t.put(p); |
651 | 733 | admin.flush(tableName); |
|
0 commit comments