Skip to content

Commit 5c69f3f

Browse files
committed
HDFS-16968. Recover two replicas when 2-replication write pipepine fails
1 parent 26a5f38 commit 5c69f3f

File tree

2 files changed

+55
-3
lines changed

2 files changed

+55
-3
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ public class ReplaceDatanodeOnFailure {
4242
public boolean satisfy(final short replication,
4343
final DatanodeInfo[] existings, final int n, final boolean isAppend,
4444
final boolean isHflushed) {
45-
return replication >= 3 &&
45+
return replication >= 2 &&
4646
(n <= (replication / 2) || isAppend || isHflushed);
4747
}
4848
};

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,12 @@ public void testDefaultPolicy() throws Exception {
8686
final int half = replication/2;
8787
final boolean enoughReplica = replication <= nExistings;
8888
final boolean noReplica = nExistings == 0;
89-
final boolean replicationL3 = replication < 3;
89+
final boolean replicationL2 = replication < 2;
9090
final boolean existingsLEhalf = nExistings <= half;
9191
final boolean isAH = isAppend[i] || isHflushed[j];
9292

9393
final boolean expected;
94-
if (enoughReplica || noReplica || replicationL3) {
94+
if (enoughReplica || noReplica || replicationL2) {
9595
expected = false;
9696
} else {
9797
expected = isAH || existingsLEhalf;
@@ -114,6 +114,50 @@ public void testDefaultPolicy() throws Exception {
114114
}
115115
}
116116

117+
/** Test replace datanode on failure with 2-replication file. */
118+
@Test
119+
public void testReplaceDatanodeOnFailureWith2Replications() throws Exception {
120+
final Configuration conf = new HdfsConfiguration();
121+
// do not consider load factor when selecting a data node
122+
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
123+
false);
124+
//set policy to DEFAULT
125+
ReplaceDatanodeOnFailure.write(Policy.DEFAULT, false, conf);
126+
127+
final int repNum = 2;
128+
final String[] racks = new String[repNum];
129+
Arrays.fill(racks, RACK0);
130+
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
131+
).racks(racks).numDataNodes(repNum).build();
132+
133+
try {
134+
cluster.waitActive();
135+
final DistributedFileSystem fs = cluster.getFileSystem();
136+
final Path dir = new Path(DIR);
137+
final SlowWriter[] slowwriter = new SlowWriter[1];
138+
slowwriter[0] = new SlowWriter(fs, new Path(dir, "file-rep2"), 200L, (short) 2);
139+
slowwriter[0].start();
140+
141+
//start new datanodes
142+
cluster.startDataNodes(conf, 1, true, null, new String[]{RACK1});
143+
cluster.waitActive();
144+
// wait for first block reports for up to 10 seconds
145+
cluster.waitFirstBRCompleted(0, 10000);
146+
147+
//stop an old datanode
148+
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(
149+
AppendTestUtil.nextInt(repNum));
150+
151+
sleepSeconds(3);
152+
Assert.assertEquals(repNum, slowwriter[0].out.getCurrentBlockReplication());
153+
154+
slowwriter[0].interruptRunning();
155+
slowwriter[0].joinAndClose();
156+
} finally {
157+
if (cluster != null) {cluster.shutdown();}
158+
}
159+
}
160+
117161
/** Test replace datanode on failure. */
118162
@Test
119163
public void testReplaceDatanodeOnFailure() throws Exception {
@@ -236,6 +280,14 @@ static class SlowWriter extends Thread {
236280
this.sleepms = sleepms;
237281
}
238282

283+
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms,
284+
short replication) throws IOException {
285+
super(SlowWriter.class.getSimpleName() + ":" + filepath);
286+
this.filepath = filepath;
287+
this.out = (HdfsDataOutputStream)fs.create(filepath, replication);
288+
this.sleepms = sleepms;
289+
}
290+
239291
@Override
240292
public void run() {
241293
int i = 0;

0 commit comments

Comments
 (0)