|
18 | 18 | package org.apache.hadoop.hdfs;
|
19 | 19 |
|
20 | 20 | import java.io.DataOutputStream;
|
| 21 | +import java.io.File; |
21 | 22 | import java.io.IOException;
|
22 | 23 | import java.lang.reflect.Field;
|
| 24 | +import java.lang.reflect.InvocationTargetException; |
23 | 25 | import java.lang.reflect.Method;
|
24 | 26 | import java.util.ArrayList;
|
25 | 27 | import java.util.EnumSet;
|
|
41 | 43 | import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
42 | 44 | import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
43 | 45 | import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
| 46 | +import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver; |
44 | 47 | import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
45 | 48 | import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
46 | 49 | import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
47 | 50 | import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
| 51 | +import org.apache.hadoop.test.GenericTestUtils; |
| 52 | +import org.apache.hadoop.test.PathUtils; |
48 | 53 | import org.apache.htrace.core.SpanId;
|
49 | 54 | import org.junit.AfterClass;
|
50 | 55 | import org.junit.Assert;
|
|
64 | 69 | import static org.mockito.Mockito.spy;
|
65 | 70 | import static org.mockito.Mockito.when;
|
66 | 71 |
|
| 72 | +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; |
| 73 | +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; |
| 74 | + |
67 | 75 | public class TestDFSOutputStream {
|
68 | 76 | static MiniDFSCluster cluster;
|
69 | 77 |
|
@@ -133,6 +141,124 @@ public void testComputePacketChunkSize() throws Exception {
|
133 | 141 | Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
|
134 | 142 | }
|
135 | 143 |
|
| 144 | + /** |
| 145 | + * This tests preventing overflows of package size and bodySize. |
| 146 | + * <p> |
| 147 | + * See also https://issues.apache.org/jira/browse/HDFS-11608. |
| 148 | + * </p> |
| 149 | + * @throws IOException |
| 150 | + * @throws SecurityException |
| 151 | + * @throws NoSuchFieldException |
| 152 | + * @throws InvocationTargetException |
| 153 | + * @throws IllegalArgumentException |
| 154 | + * @throws IllegalAccessException |
| 155 | + * @throws NoSuchMethodException |
| 156 | + */ |
| 157 | + @Test(timeout=60000) |
| 158 | + public void testPreventOverflow() throws IOException, NoSuchFieldException, |
| 159 | + SecurityException, IllegalAccessException, IllegalArgumentException, |
| 160 | + InvocationTargetException, NoSuchMethodException { |
| 161 | + |
| 162 | + final int defaultWritePacketSize = DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; |
| 163 | + int configuredWritePacketSize = defaultWritePacketSize; |
| 164 | + int finalWritePacketSize = defaultWritePacketSize; |
| 165 | + |
| 166 | + /* test default WritePacketSize, e.g. 64*1024 */ |
| 167 | + runAdjustChunkBoundary(configuredWritePacketSize, finalWritePacketSize); |
| 168 | + |
| 169 | + /* test large WritePacketSize, e.g. 1G */ |
| 170 | + configuredWritePacketSize = 1000 * 1024 * 1024; |
| 171 | + finalWritePacketSize = PacketReceiver.MAX_PACKET_SIZE; |
| 172 | + runAdjustChunkBoundary(configuredWritePacketSize, finalWritePacketSize); |
| 173 | + } |
| 174 | + |
| 175 | + /** |
| 176 | + * @configuredWritePacketSize the configured WritePacketSize. |
| 177 | + * @finalWritePacketSize the final WritePacketSize picked by |
| 178 | + * {@link DFSOutputStream#adjustChunkBoundary} |
| 179 | + */ |
| 180 | + private void runAdjustChunkBoundary( |
| 181 | + final int configuredWritePacketSize, |
| 182 | + final int finalWritePacketSize) throws IOException, NoSuchFieldException, |
| 183 | + SecurityException, IllegalAccessException, IllegalArgumentException, |
| 184 | + InvocationTargetException, NoSuchMethodException { |
| 185 | + |
| 186 | + final boolean appendChunk = false; |
| 187 | + final long blockSize = 3221225500L; |
| 188 | + final long bytesCurBlock = 1073741824L; |
| 189 | + final int bytesPerChecksum = 512; |
| 190 | + final int checksumSize = 4; |
| 191 | + final int chunkSize = bytesPerChecksum + checksumSize; |
| 192 | + final int packateMaxHeaderLength = 33; |
| 193 | + |
| 194 | + MiniDFSCluster dfsCluster = null; |
| 195 | + final File baseDir = new File(PathUtils.getTestDir(getClass()), |
| 196 | + GenericTestUtils.getMethodName()); |
| 197 | + |
| 198 | + try { |
| 199 | + final Configuration dfsConf = new Configuration(); |
| 200 | + dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, |
| 201 | + baseDir.getAbsolutePath()); |
| 202 | + dfsConf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, |
| 203 | + configuredWritePacketSize); |
| 204 | + dfsCluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(1).build(); |
| 205 | + dfsCluster.waitActive(); |
| 206 | + |
| 207 | + final FSDataOutputStream os = dfsCluster.getFileSystem() |
| 208 | + .create(new Path(baseDir.getAbsolutePath(), "testPreventOverflow")); |
| 209 | + final DFSOutputStream dos = (DFSOutputStream) Whitebox |
| 210 | + .getInternalState(os, "wrappedStream"); |
| 211 | + |
| 212 | + /* set appendChunk */ |
| 213 | + final Method setAppendChunkMethod = dos.getClass() |
| 214 | + .getDeclaredMethod("setAppendChunk", boolean.class); |
| 215 | + setAppendChunkMethod.setAccessible(true); |
| 216 | + setAppendChunkMethod.invoke(dos, appendChunk); |
| 217 | + |
| 218 | + /* set bytesCurBlock */ |
| 219 | + final Method setBytesCurBlockMethod = dos.getClass() |
| 220 | + .getDeclaredMethod("setBytesCurBlock", long.class); |
| 221 | + setBytesCurBlockMethod.setAccessible(true); |
| 222 | + setBytesCurBlockMethod.invoke(dos, bytesCurBlock); |
| 223 | + |
| 224 | + /* set blockSize */ |
| 225 | + final Field blockSizeField = dos.getClass().getDeclaredField("blockSize"); |
| 226 | + blockSizeField.setAccessible(true); |
| 227 | + blockSizeField.setLong(dos, blockSize); |
| 228 | + |
| 229 | + /* call adjustChunkBoundary */ |
| 230 | + final Method method = dos.getClass() |
| 231 | + .getDeclaredMethod("adjustChunkBoundary"); |
| 232 | + method.setAccessible(true); |
| 233 | + method.invoke(dos); |
| 234 | + |
| 235 | + /* get and verify writePacketSize */ |
| 236 | + final Field writePacketSizeField = dos.getClass() |
| 237 | + .getDeclaredField("writePacketSize"); |
| 238 | + writePacketSizeField.setAccessible(true); |
| 239 | + Assert.assertEquals(writePacketSizeField.getInt(dos), |
| 240 | + finalWritePacketSize); |
| 241 | + |
| 242 | + /* get and verify chunksPerPacket */ |
| 243 | + final Field chunksPerPacketField = dos.getClass() |
| 244 | + .getDeclaredField("chunksPerPacket"); |
| 245 | + chunksPerPacketField.setAccessible(true); |
| 246 | + Assert.assertEquals(chunksPerPacketField.getInt(dos), |
| 247 | + (finalWritePacketSize - packateMaxHeaderLength) / chunkSize); |
| 248 | + |
| 249 | + /* get and verify packetSize */ |
| 250 | + final Field packetSizeField = dos.getClass() |
| 251 | + .getDeclaredField("packetSize"); |
| 252 | + packetSizeField.setAccessible(true); |
| 253 | + Assert.assertEquals(packetSizeField.getInt(dos), |
| 254 | + chunksPerPacketField.getInt(dos) * chunkSize); |
| 255 | + } finally { |
| 256 | + if (dfsCluster != null) { |
| 257 | + dfsCluster.shutdown(); |
| 258 | + } |
| 259 | + } |
| 260 | + } |
| 261 | + |
136 | 262 | @Test
|
137 | 263 | public void testCongestionBackoff() throws IOException {
|
138 | 264 | DfsClientConf dfsClientConf = mock(DfsClientConf.class);
|
|
0 commit comments