Skip to content

Commit

Permalink
HBASE-21736 Remove the server from online servers before scheduling S…
Browse files Browse the repository at this point in the history
…CP for it in hbck

Signed-off-by: Peter Somogyi <psomogyi@apache.org>
  • Loading branch information
Apache9 committed Mar 9, 2019
1 parent 92e0d2c commit 3ff2847
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 43 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2477,9 +2477,10 @@ public MasterProtos.ScheduleServerCrashProcedureResponse scheduleServerCrashProc
for (HBaseProtos.ServerName serverName : serverNames) {
ServerName server = ProtobufUtil.toServerName(serverName);
if (shouldSubmitSCP(server)) {
master.getServerManager().moveFromOnlineToDeadServers(server);
ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
pids.add(procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
server, true, containMetaWals(server))));
server, true, containMetaWals(server))));
} else {
pids.add(-1L);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand All @@ -15,12 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hbase.client;

import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;

import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
Expand All @@ -39,8 +39,8 @@
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
Expand All @@ -59,16 +59,14 @@
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;

/**
* Class to test HBaseHbck.
* Spins up the minicluster once at test start and then takes it down afterward.
* Add any testing of HBaseHbck functionality here.
* Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down
* afterward. Add any testing of HBaseHbck functionality here.
*/
@RunWith(Parameterized.class)
@Category({LargeTests.class, ClientTests.class})
@Category({ LargeTests.class, ClientTests.class })
public class TestHbck {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHbck.class);
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHbck.class);

private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Expand Down Expand Up @@ -112,15 +110,20 @@ public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

@Before
public void setUp() throws IOException {
TEST_UTIL.ensureSomeRegionServersAvailable(3);
}

public static class SuspendProcedure extends
ProcedureTestingUtility.NoopProcedure<MasterProcedureEnv> implements TableProcedureInterface {
public SuspendProcedure() {
super();
}

@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
protected Procedure[] execute(final MasterProcedureEnv env)
throws ProcedureSuspendedException {
protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
// Always suspend the procedure
throw new ProcedureSuspendedException();
}
Expand All @@ -143,8 +146,8 @@ public void testBypassProcedure() throws Exception {
long procId = procExec.submitProcedure(proc);
Thread.sleep(500);

//bypass the procedure
List<Long> pids = Arrays.<Long>asList(procId);
// bypass the procedure
List<Long> pids = Arrays.<Long> asList(procId);
List<Boolean> results = getHbck().bypassProcedure(pids, 30000, false, false);
assertTrue("Failed to by pass procedure!", results.get(0));
TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass());
Expand All @@ -159,43 +162,43 @@ public void testSetTableStateInMeta() throws Exception {
// Method {@link Hbck#setTableStateInMeta()} returns previous state, which in this case
// will be DISABLED
TableState prevState =
hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState.getState(),
prevState.isDisabled());
prevState.isDisabled());
}

@Test
public void testAssigns() throws Exception {
Hbck hbck = getHbck();
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
for (RegionInfo ri: regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().getRegionState(ri.getEncodedName());
for (RegionInfo ri : regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
.getRegionStates().getRegionState(ri.getEncodedName());
LOG.info("RS: {}", rs.toString());
}
List<Long> pids = hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).
collect(Collectors.toList()));
List<Long> pids =
hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
waitOnPids(pids);
for (RegionInfo ri: regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().getRegionState(ri.getEncodedName());
for (RegionInfo ri : regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
.getRegionStates().getRegionState(ri.getEncodedName());
LOG.info("RS: {}", rs.toString());
assertTrue(rs.toString(), rs.isClosed());
}
pids = hbck.assigns(regions.stream().map(r -> r.getEncodedName()).
collect(Collectors.toList()));
pids =
hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
waitOnPids(pids);
for (RegionInfo ri: regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().getRegionState(ri.getEncodedName());
for (RegionInfo ri : regions) {
RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
.getRegionStates().getRegionState(ri.getEncodedName());
LOG.info("RS: {}", rs.toString());
assertTrue(rs.toString(), rs.isOpened());
}
// What happens if crappy region list passed?
pids = hbck.assigns(Arrays.stream(new String [] {"a", "some rubbish name"}).
collect(Collectors.toList()));
for (long pid: pids) {
pids = hbck.assigns(
Arrays.stream(new String[] { "a", "some rubbish name" }).collect(Collectors.toList()));
for (long pid : pids) {
assertEquals(org.apache.hadoop.hbase.procedure2.Procedure.NO_PROC_ID, pid);
}
}
Expand All @@ -209,21 +212,18 @@ public void testScheduleSCP() throws Exception {
ServerName serverName = testRs.getServerName();
Hbck hbck = getHbck();
List<Long> pids =
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
assertTrue(pids.get(0) > 0);
LOG.info("pid is {}", pids.get(0));

pids = hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
assertTrue(pids.get(0) == -1);
LOG.info("pid is {}", pids.get(0));
List<Long> newPids =
hbck.scheduleServerCrashProcedure(Arrays.asList(ProtobufUtil.toServerName(serverName)));
assertTrue(newPids.get(0) < 0);
LOG.info("pid is {}", newPids.get(0));
waitOnPids(pids);
}

private void waitOnPids(List<Long> pids) {
for (Long pid: pids) {
while (!TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor().
isFinished(pid)) {
Threads.sleep(100);
}
}
TEST_UTIL.waitFor(60000, () -> pids.stream().allMatch(procExec::isFinished));
}
}

0 comments on commit 3ff2847

Please sign in to comment.