Skip to content

Commit 474552f

Browse files
committed
HDFS-530. Refactor TestFileAppend* to remove code duplication. Contributed by Konstantin Boudnik
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@801736 13f79535-47bb-0310-9956-ffa450edef68
1 parent 5c6be25 commit 474552f

File tree

5 files changed

+130
-141
lines changed

5 files changed

+130
-141
lines changed

CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,9 @@ Trunk (unreleased changes)
8686
HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches
8787
in BlockManager. (shv)
8888

89+
HDFS-530. Refactor TestFileAppend* to remove code duplication.
90+
(Konstantin Boudnik via szetszwo)
91+
8992
BUG FIXES
9093

9194
HDFS-76. Better error message to users when commands fail because of

src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java

Lines changed: 52 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,12 @@
2323
import java.util.Random;
2424

2525
import junit.framework.TestCase;
26+
import junit.framework.Assert;
2627

2728
import org.apache.commons.logging.Log;
2829
import org.apache.commons.logging.LogFactory;
2930
import org.apache.hadoop.conf.Configuration;
30-
import org.apache.hadoop.fs.FileStatus;
31-
import org.apache.hadoop.fs.FileSystem;
32-
import org.apache.hadoop.fs.Path;
31+
import org.apache.hadoop.fs.*;
3332
import org.apache.hadoop.security.UnixUserGroupInformation;
3433
import org.apache.hadoop.security.UserGroupInformation;
3534

@@ -61,7 +60,11 @@ protected Random initialValue() {
6160
return r;
6261
}
6362
};
64-
63+
static final int BLOCK_SIZE = 1024;
64+
static final int NUM_BLOCKS = 10;
65+
static final int FILE_SIZE = NUM_BLOCKS * BLOCK_SIZE + 1;
66+
static long seed = -1;
67+
6568
static int nextInt() {return RANDOM.get().nextInt();}
6669
static int nextInt(int n) {return RANDOM.get().nextInt(n);}
6770
static int nextLong() {return RANDOM.get().nextInt();}
@@ -116,4 +119,49 @@ static void check(FileSystem fs, Path p, long length) throws IOException {
116119
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
117120
}
118121
}
122+
123+
/**
124+
* create a buffer that contains the entire test file data.
125+
*/
126+
static byte[] initBuffer(int size) {
127+
if (seed == -1)
128+
seed = nextLong();
129+
return randomBytes(seed, size);
130+
}
131+
132+
/**
133+
* Creates a file but does not close it
134+
* Make sure to call close() on the returned stream
135+
* @throws IOException an exception might be thrown
136+
*/
137+
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
138+
throws IOException {
139+
return fileSys.create(name, true,
140+
fileSys.getConf().getInt("io.file.buffer.size", 4096),
141+
(short) repl, (long) BLOCK_SIZE);
142+
}
143+
144+
/**
145+
* Compare the content of a file created from FileSystem and Path with
146+
* the specified byte[] buffer's content
147+
* @throws IOException an exception might be thrown
148+
*/
149+
static void checkFullFile(FileSystem fs, Path name, int len,
150+
final byte[] compareContent, String message) throws IOException {
151+
FSDataInputStream stm = fs.open(name);
152+
byte[] actual = new byte[len];
153+
stm.readFully(0, actual);
154+
checkData(actual, 0, compareContent, message);
155+
stm.close();
156+
}
157+
158+
private static void checkData(final byte[] actual, int from,
159+
final byte[] expected, String message) {
160+
for (int idx = 0; idx < actual.length; idx++) {
161+
Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
162+
expected[from+idx]+" actual "+actual[idx],
163+
expected[from+idx], actual[idx]);
164+
actual[idx] = 0;
165+
}
166+
}
119167
}

src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

Lines changed: 32 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626

2727
import org.apache.hadoop.conf.Configuration;
2828
import org.apache.hadoop.fs.BlockLocation;
29-
import org.apache.hadoop.fs.FSDataInputStream;
3029
import org.apache.hadoop.fs.FSDataOutputStream;
3130
import org.apache.hadoop.fs.FileSystem;
3231
import org.apache.hadoop.fs.Path;
@@ -43,38 +42,15 @@
4342
* support HDFS appends.
4443
*/
4544
public class TestFileAppend extends TestCase {
46-
static final int blockSize = 1024;
47-
static final int numBlocks = 10;
48-
static final int fileSize = numBlocks * blockSize + 1;
4945
boolean simulatedStorage = false;
5046

51-
private long seed;
52-
private byte[] fileContents = null;
53-
54-
//
55-
// create a buffer that contains the entire test file data.
56-
//
57-
private void initBuffer(int size) {
58-
seed = AppendTestUtil.nextLong();
59-
fileContents = AppendTestUtil.randomBytes(seed, size);
60-
}
61-
62-
/*
63-
* creates a file but does not close it
64-
*/
65-
private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
66-
throws IOException {
67-
FSDataOutputStream stm = fileSys.create(name, true,
68-
fileSys.getConf().getInt("io.file.buffer.size", 4096),
69-
(short)repl, (long)blockSize);
70-
return stm;
71-
}
47+
private static byte[] fileContents = null;
7248

7349
//
7450
// writes to file but does not close it
7551
//
7652
private void writeFile(FSDataOutputStream stm) throws IOException {
77-
byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
53+
byte[] buffer = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
7854
stm.write(buffer);
7955
}
8056

@@ -89,60 +65,41 @@ private void checkFile(FileSystem fileSys, Path name, int repl)
8965
while (!done) {
9066
try {
9167
Thread.sleep(1000);
92-
} catch (InterruptedException e) {}
68+
} catch (InterruptedException e) {;}
9369
done = true;
9470
BlockLocation[] locations = fileSys.getFileBlockLocations(
95-
fileSys.getFileStatus(name), 0, fileSize);
96-
if (locations.length < numBlocks) {
71+
fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
72+
if (locations.length < AppendTestUtil.NUM_BLOCKS) {
9773
System.out.println("Number of blocks found " + locations.length);
9874
done = false;
9975
continue;
10076
}
101-
for (int idx = 0; idx < numBlocks; idx++) {
77+
for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
10278
if (locations[idx].getHosts().length < repl) {
10379
System.out.println("Block index " + idx + " not yet replciated.");
10480
done = false;
10581
break;
10682
}
10783
}
10884
}
109-
FSDataInputStream stm = fileSys.open(name);
110-
byte[] expected = new byte[numBlocks * blockSize];
85+
byte[] expected =
86+
new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
11187
if (simulatedStorage) {
11288
for (int i= 0; i < expected.length; i++) {
11389
expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
11490
}
11591
} else {
116-
for (int i= 0; i < expected.length; i++) {
117-
expected[i] = fileContents[i];
118-
}
92+
System.arraycopy(fileContents, 0, expected, 0, expected.length);
11993
}
12094
// do a sanity check. Read the file
121-
byte[] actual = new byte[numBlocks * blockSize];
122-
stm.readFully(0, actual);
123-
checkData(actual, 0, expected, "Read 1");
124-
}
125-
126-
private void checkFullFile(FileSystem fs, Path name) throws IOException {
127-
FSDataInputStream stm = fs.open(name);
128-
byte[] actual = new byte[fileSize];
129-
stm.readFully(0, actual);
130-
checkData(actual, 0, fileContents, "Read 2");
131-
stm.close();
95+
AppendTestUtil.checkFullFile(fileSys, name,
96+
AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
97+
expected, "Read 1");
13298
}
13399

134-
private void checkData(byte[] actual, int from, byte[] expected, String message) {
135-
for (int idx = 0; idx < actual.length; idx++) {
136-
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
137-
expected[from+idx]+" actual "+actual[idx],
138-
expected[from+idx], actual[idx]);
139-
actual[idx] = 0;
140-
}
141-
}
142-
143-
144100
/**
145101
* Test that copy on write for blocks works correctly
102+
* @throws IOException an exception might be thrown
146103
*/
147104
public void testCopyOnWrite() throws IOException {
148105
Configuration conf = new Configuration();
@@ -159,7 +116,7 @@ public void testCopyOnWrite() throws IOException {
159116
// create a new file, write to it and close it.
160117
//
161118
Path file1 = new Path("/filestatus.dat");
162-
FSDataOutputStream stm = createFile(fs, file1, 1);
119+
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
163120
writeFile(stm);
164121
stm.close();
165122

@@ -178,11 +135,9 @@ public void testCopyOnWrite() throws IOException {
178135
//
179136
for (int i = 0; i < blocks.size(); i = i + 2) {
180137
Block b = blocks.get(i).getBlock();
181-
FSDataset fsd = dataset;
182-
File f = fsd.getFile(b);
138+
File f = dataset.getFile(b);
183139
File link = new File(f.toString() + ".link");
184-
System.out.println("Creating hardlink for File " + f +
185-
" to " + link);
140+
System.out.println("Creating hardlink for File " + f + " to " + link);
186141
HardLink.createHardLink(f, link);
187142
}
188143

@@ -193,7 +148,7 @@ public void testCopyOnWrite() throws IOException {
193148
Block b = blocks.get(i).getBlock();
194149
System.out.println("testCopyOnWrite detaching block " + b);
195150
assertTrue("Detaching block " + b + " should have returned true",
196-
dataset.detachBlock(b, 1) == true);
151+
dataset.detachBlock(b, 1));
197152
}
198153

199154
// Since the blocks were already detached earlier, these calls should
@@ -203,7 +158,7 @@ public void testCopyOnWrite() throws IOException {
203158
Block b = blocks.get(i).getBlock();
204159
System.out.println("testCopyOnWrite detaching block " + b);
205160
assertTrue("Detaching block " + b + " should have returned false",
206-
dataset.detachBlock(b, 1) == false);
161+
!dataset.detachBlock(b, 1));
207162
}
208163

209164
} finally {
@@ -214,30 +169,31 @@ public void testCopyOnWrite() throws IOException {
214169

215170
/**
216171
* Test a simple flush on a simple HDFS file.
172+
* @throws IOException an exception might be thrown
217173
*/
218174
public void testSimpleFlush() throws IOException {
219175
Configuration conf = new Configuration();
220176
if (simulatedStorage) {
221177
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
222178
}
223-
initBuffer(fileSize);
179+
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
224180
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
225181
FileSystem fs = cluster.getFileSystem();
226182
try {
227183

228184
// create a new file.
229185
Path file1 = new Path("/simpleFlush.dat");
230-
FSDataOutputStream stm = createFile(fs, file1, 1);
186+
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
231187
System.out.println("Created file simpleFlush.dat");
232188

233189
// write to file
234-
int mid = fileSize/2;
190+
int mid = AppendTestUtil.FILE_SIZE /2;
235191
stm.write(fileContents, 0, mid);
236192
stm.sync();
237193
System.out.println("Wrote and Flushed first part of file.");
238194

239195
// write the remainder of the file
240-
stm.write(fileContents, mid, fileSize - mid);
196+
stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
241197
System.out.println("Written second part of file");
242198
stm.sync();
243199
stm.sync();
@@ -250,7 +206,8 @@ public void testSimpleFlush() throws IOException {
250206
System.out.println("Closed file.");
251207

252208
// verify that entire file is good
253-
checkFullFile(fs, file1);
209+
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
210+
fileContents, "Read 2");
254211

255212
} catch (IOException e) {
256213
System.out.println("Exception :" + e);
@@ -267,36 +224,38 @@ public void testSimpleFlush() throws IOException {
267224

268225
/**
269226
* Test that file data can be flushed.
227+
* @throws IOException an exception might be thrown
270228
*/
271229
public void testComplexFlush() throws IOException {
272230
Configuration conf = new Configuration();
273231
if (simulatedStorage) {
274232
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
275233
}
276-
initBuffer(fileSize);
234+
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
277235
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
278236
FileSystem fs = cluster.getFileSystem();
279237
try {
280238

281239
// create a new file.
282240
Path file1 = new Path("/complexFlush.dat");
283-
FSDataOutputStream stm = createFile(fs, file1, 1);
241+
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
284242
System.out.println("Created file complexFlush.dat");
285243

286244
int start = 0;
287-
for (start = 0; (start + 29) < fileSize; ) {
245+
for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
288246
stm.write(fileContents, start, 29);
289247
stm.sync();
290248
start += 29;
291249
}
292-
stm.write(fileContents, start, fileSize-start);
250+
stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);
293251

294252
// verify that full blocks are sane
295253
checkFile(fs, file1, 1);
296254
stm.close();
297255

298256
// verify that entire file is good
299-
checkFullFile(fs, file1);
257+
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
258+
fileContents, "Read 2");
300259
} catch (IOException e) {
301260
System.out.println("Exception :" + e);
302261
throw e;

0 commit comments

Comments
 (0)