Skip to content

Commit d0b6988

Browse files
committed
PR Feedback
* separate validation of onDiskSizeWithHeader as input parameter from as read from block header * simplify branching around fetching and populating onDiskSizeWithHeader. * inline retrieving nextOnDiskBlockSize ; add basic validation. * use matchers in test for improved failure output.
1 parent 9ef0b2f commit d0b6988

File tree

3 files changed

+61
-61
lines changed

3 files changed

+61
-61
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java

Lines changed: 42 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1568,14 +1568,11 @@ public HFileBlock readBlockData(long offset, long onDiskSizeWithHeaderL, boolean
15681568
}
15691569

15701570
/**
1571-
* Check that {@code value} value seems reasonable, within a large margin of error.
1571+
* Check that {@code value} read from a block header seems reasonable, within a large margin of
1572+
* error.
15721573
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
15731574
*/
15741575
private boolean checkOnDiskSizeWithHeader(int value) {
1575-
if (value == -1) {
1576-
// a magic value we expect to see.
1577-
return true;
1578-
}
15791576
if (value < 0) {
15801577
if (LOG.isTraceEnabled()) {
15811578
LOG.trace(
@@ -1596,18 +1593,23 @@ private boolean checkOnDiskSizeWithHeader(int value) {
15961593
}
15971594

15981595
/**
1599-
* Check that {@code value} value seems reasonable, within a large margin of error.
1596+
* Check that {@code value} provided by the calling context seems reasonable, within a large
1597+
* margin of error.
16001598
* @return {@code true} if the value is safe to proceed, {@code false} otherwise.
16011599
*/
1602-
private boolean checkOnDiskSizeWithHeader(long value) {
1600+
private boolean checkCallerProvidedOnDiskSizeWithHeader(long value) {
16031601
// same validation logic as is used by Math.toIntExact(long)
16041602
int intValue = (int) value;
16051603
if (intValue != value) {
16061604
if (LOG.isTraceEnabled()) {
1607-
LOG.trace("onDiskSizeWithHeader={}; value exceeds int size limits.", value);
1605+
LOG.trace("onDiskSizeWithHeaderL={}; value exceeds int size limits.", value);
16081606
}
16091607
return false;
16101608
}
1609+
if (intValue == -1) {
1610+
// a magic value we expect to see.
1611+
return true;
1612+
}
16111613
return checkOnDiskSizeWithHeader(intValue);
16121614
}
16131615

@@ -1646,14 +1648,9 @@ private void invalidateNextBlockHeader() {
16461648
prefetchedHeader.set(new PrefetchedHeader());
16471649
}
16481650

1649-
private int getNextBlockOnDiskSize(boolean readNextHeader, ByteBuff onDiskBlock,
1650-
int onDiskSizeWithHeader) {
1651-
int nextBlockOnDiskSize = -1;
1652-
if (readNextHeader) {
1653-
nextBlockOnDiskSize =
1654-
onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH) + hdrSize;
1655-
}
1656-
return nextBlockOnDiskSize;
1651+
private int getNextBlockOnDiskSize(ByteBuff onDiskBlock, int onDiskSizeWithHeader) {
1652+
return onDiskBlock.getIntAfterPosition(onDiskSizeWithHeader + BlockType.MAGIC_LENGTH)
1653+
+ hdrSize;
16571654
}
16581655

16591656
private ByteBuff allocate(int size, boolean intoHeap) {
@@ -1679,16 +1676,15 @@ private ByteBuff allocate(int size, boolean intoHeap) {
16791676
protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
16801677
long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics,
16811678
boolean intoHeap) throws IOException {
1682-
if (offset < 0) {
1683-
throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize="
1684-
+ onDiskSizeWithHeaderL + ")");
1685-
}
1686-
16871679
final Span span = Span.current();
16881680
final AttributesBuilder attributesBuilder = Attributes.builder();
16891681
Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY))
16901682
.ifPresent(c -> c.accept(attributesBuilder));
1691-
if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeaderL)) {
1683+
if (offset < 0) {
1684+
throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize="
1685+
+ onDiskSizeWithHeaderL + ")");
1686+
}
1687+
if (!checkCallerProvidedOnDiskSizeWithHeader(onDiskSizeWithHeaderL)) {
16921688
throw new IOException(
16931689
"Caller provided invalid onDiskSizeWithHeaderL=" + onDiskSizeWithHeaderL);
16941690
}
@@ -1722,28 +1718,24 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17221718
headerBuf = HEAP.allocate(hdrSize);
17231719
readAtOffset(is, headerBuf, hdrSize, false, offset, pread);
17241720
headerBuf.rewind();
1721+
}
1722+
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
1723+
}
17251724

1726-
// The caller didn't provide an anticipated block size and headerBuf was null, this is
1727-
// probably the first time this HDFS block has been read. The value we just read has not
1728-
// had HBase checksum validation ; assume it was not protected by HDFS checksum either.
1729-
// Sanity check the value. If it doesn't seem right, either trigger fall-back to hdfs
1730-
// checksum or abort the read.
1731-
//
1732-
// TODO: Should we also check the value vs. some multiple of fileContext.getBlocksize() ?
1733-
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
1734-
if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) {
1735-
if (verifyChecksum) {
1736-
invalidateNextBlockHeader();
1737-
span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build());
1738-
return null;
1739-
} else {
1740-
throw new IOException("Read invalid onDiskSizeWithHeader=" + onDiskSizeWithHeader);
1741-
}
1742-
}
1725+
// The common case is that onDiskSizeWithHeader was produced by a read without checksum
1726+
// validation, so give it a sanity check before trying to use it.
1727+
//
1728+
// TODO: Should we also check the value vs. some multiple of fileContext.getBlocksize() ?
1729+
if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) {
1730+
if (verifyChecksum) {
1731+
invalidateNextBlockHeader();
1732+
span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build());
1733+
return null;
17431734
} else {
1744-
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
1735+
throw new IOException("Invalid onDiskSizeWithHeader=" + onDiskSizeWithHeader);
17451736
}
17461737
}
1738+
17471739
int preReadHeaderSize = headerBuf == null ? 0 : hdrSize;
17481740
// Allocate enough space to fit the next block's header too; saves a seek next time through.
17491741
// onDiskBlock is whole block + header + checksums then extra hdrSize to read next header;
@@ -1760,8 +1752,16 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17601752
boolean readNextHeader = readAtOffset(is, onDiskBlock,
17611753
onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread);
17621754
onDiskBlock.rewind(); // in case of moving position when copying a cached header
1763-
int nextBlockOnDiskSize =
1764-
getNextBlockOnDiskSize(readNextHeader, onDiskBlock, onDiskSizeWithHeader);
1755+
1756+
// the call to validateChecksum for this block excludes the next block header over-read, so
1757+
// no reason to delay extracting this value.
1758+
int nextBlockOnDiskSize = -1;
1759+
if (readNextHeader) {
1760+
int parsedVal = getNextBlockOnDiskSize(onDiskBlock, onDiskSizeWithHeader);
1761+
if (checkOnDiskSizeWithHeader(parsedVal)) {
1762+
nextBlockOnDiskSize = parsedVal;
1763+
}
1764+
}
17651765
if (headerBuf == null) {
17661766
headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize);
17671767
}

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -163,12 +163,7 @@ public void testReaderWithoutBlockCache() throws Exception {
163163
fillByteBuffAllocator(alloc, bufCount);
164164
// start write to store file.
165165
Path path = writeStoreFile();
166-
try {
167-
readStoreFile(path, conf, alloc);
168-
} catch (Exception e) {
169-
// fail test
170-
assertTrue(false);
171-
}
166+
readStoreFile(path, conf, alloc);
172167
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
173168
alloc.clean();
174169
}

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockHeaderCorruption.java

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@
1717
*/
1818
package org.apache.hadoop.hbase.io.hfile;
1919

20+
import static org.hamcrest.MatcherAssert.assertThat;
21+
import static org.hamcrest.Matchers.equalTo;
22+
import static org.hamcrest.Matchers.instanceOf;
23+
import static org.hamcrest.Matchers.startsWith;
2024
import static org.junit.Assert.assertEquals;
2125
import static org.junit.Assert.assertTrue;
2226
import static org.junit.Assert.fail;
@@ -41,6 +45,7 @@
4145
import org.apache.hadoop.hbase.fs.HFileSystem;
4246
import org.apache.hadoop.hbase.nio.ByteBuff;
4347
import org.apache.hadoop.hbase.util.Bytes;
48+
import org.hamcrest.Matchers;
4449
import org.junit.Rule;
4550
import org.junit.Test;
4651
import org.junit.rules.ExternalResource;
@@ -81,8 +86,8 @@ public void testBlockMagicCorruptionFirstBlock() throws Exception {
8186
consumer.readFully();
8287
fail();
8388
} catch (Exception e) {
84-
assertTrue(e instanceof IOException);
85-
assertEquals("Invalid HFile block magic: _GARBAGE", e.getMessage());
89+
assertThat(e, instanceOf(IOException.class));
90+
assertThat(e.getMessage(), equalTo("Invalid HFile block magic: _GARBAGE"));
8691
}
8792
assertEquals(0, consumer.getItemsRead());
8893
}
@@ -107,8 +112,8 @@ public void testBlockMagicCorruptionSecondBlock() throws Exception {
107112
consumer.readFully();
108113
fail();
109114
} catch (Exception e) {
110-
assertTrue(e instanceof IOException);
111-
assertEquals("Invalid HFile block magic: _GARBAGE", e.getMessage());
115+
assertThat(e, instanceOf(IOException.class));
116+
assertThat(e.getMessage(), equalTo("Invalid HFile block magic: _GARBAGE"));
112117
}
113118
assertEquals(1, consumer.getItemsRead());
114119
}
@@ -137,8 +142,8 @@ public void testOnDiskSizeWithoutHeaderCorruptionFirstBlock() throws Exception {
137142
consumer.readFully();
138143
fail();
139144
} catch (Exception e) {
140-
assertTrue(e instanceof IOException);
141-
assertTrue(e.getMessage().startsWith("Read invalid onDiskSizeWithHeader="));
145+
assertThat(e, instanceOf(IOException.class));
146+
assertThat(e.getMessage(), startsWith("Invalid onDiskSizeWithHeader="));
142147
}
143148
assertEquals(0, consumer.getItemsRead());
144149
}
@@ -154,8 +159,8 @@ public void testOnDiskSizeWithoutHeaderCorruptionFirstBlock() throws Exception {
154159
consumer.readFully();
155160
fail();
156161
} catch (Exception e) {
157-
assertTrue(e instanceof IllegalArgumentException);
158-
assertTrue(e.getMessage().startsWith("newLimit > capacity"));
162+
assertThat(e, instanceOf(IllegalArgumentException.class));
163+
assertThat(e.getMessage(), startsWith("newLimit > capacity"));
159164
}
160165
assertEquals(0, consumer.getItemsRead());
161166
}
@@ -171,8 +176,8 @@ public void testOnDiskSizeWithoutHeaderCorruptionFirstBlock() throws Exception {
171176
consumer.readFully();
172177
fail();
173178
} catch (Exception e) {
174-
assertTrue(e instanceof IOException);
175-
assertTrue(e.getMessage().startsWith("Read invalid onDiskSizeWithHeader="));
179+
assertThat(e, instanceOf(IOException.class));
180+
assertThat(e.getMessage(), Matchers.startsWith("Invalid onDiskSizeWithHeader="));
176181
}
177182
assertEquals(0, consumer.getItemsRead());
178183
}
@@ -208,8 +213,8 @@ public void testOnDiskSizeWithoutHeaderCorruptionSecondBlock() throws Exception
208213
consumer.readFully();
209214
fail();
210215
} catch (Exception e) {
211-
assertTrue(e instanceof IOException);
212-
assertTrue(e.getMessage().startsWith("Caller provided invalid onDiskSizeWithHeaderL="));
216+
assertThat(e, instanceOf(IOException.class));
217+
assertThat(e.getMessage(), Matchers.startsWith("Invalid onDiskSizeWithHeader="));
213218
}
214219
assertEquals(1, consumer.getItemsRead());
215220
}
@@ -243,7 +248,7 @@ public void testOnDiskSizeWithoutHeaderCorruptionSecondBlock() throws Exception
243248
fail();
244249
} catch (Exception e) {
245250
assertTrue(e instanceof IOException);
246-
assertTrue(e.getMessage().startsWith("Caller provided invalid onDiskSizeWithHeaderL="));
251+
assertTrue(e.getMessage().startsWith("Invalid onDiskSizeWithHeader="));
247252
}
248253
assertEquals(1, consumer.getItemsRead());
249254
}

0 commit comments

Comments
 (0)