Skip to content

Commit

Permalink
Revert "Introduce large dictionary mode in SliceDictionarySelectiveRe…
Browse files Browse the repository at this point in the history
…ader"

This reverts commit 219c7d7.
  • Loading branch information
kewang1024 authored and rongrong committed Oct 8, 2020
1 parent 4cd2141 commit b8e72e8
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 70 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import org.openjdk.jol.info.ClassLayout;

import javax.annotation.Nullable;
import javax.validation.constraints.NotNull;

import java.io.IOException;
import java.util.Arrays;
Expand Down Expand Up @@ -79,11 +80,6 @@ public class SliceDictionarySelectiveReader
// add one extra entry for null after stripe/rowGroup dictionary
private static final int[] EMPTY_DICTIONARY_OFFSETS = new int[2];

// Each rowgroup has roughly 10K rows, and each batch reads 1K rows. So there're about 10 batches in a rowgroup.
private static final int BATCHES_PER_ROWGROUP = 10;
// MATERIALIZATION_RATIO should be greater than or equal to 1.0f to compensate the extra CPU to materialize blocks.
private static final float MATERIALIZATION_RATIO = 2.0f;

private final TupleDomainFilter filter;
private final boolean nonDeterministicFilter;
private final boolean nullsAllowed;
Expand Down Expand Up @@ -131,7 +127,6 @@ public class SliceDictionarySelectiveReader
private OrcLocalMemoryContext systemMemoryContext;

private int[] values;
private int nullsCount;
private boolean allNulls;
private int[] outputPositions;
private int outputPositionCount;
Expand Down Expand Up @@ -163,7 +158,6 @@ public int read(int offset, int[] positions, int positionCount)
openRowGroup();
}

nullsCount = 0;
allNulls = false;

if (outputRequired) {
Expand Down Expand Up @@ -209,7 +203,6 @@ private int readNoFilter(int[] positions, int positionCount)

if (presentStream != null && !presentStream.nextBit()) {
values[i] = currentDictionarySize - 1;
nullsCount++;
}
else {
boolean isInRowDictionary = inDictionaryStream != null && !inDictionaryStream.nextBit();
Expand Down Expand Up @@ -237,7 +230,6 @@ private int readWithFilter(int[] positions, int positionCount)
if ((nonDeterministicFilter && filter.testNull()) || nullsAllowed) {
if (outputRequired) {
values[outputPositionCount] = currentDictionarySize - 1;
nullsCount++;
}
outputPositions[outputPositionCount] = position;
outputPositionCount++;
Expand Down Expand Up @@ -388,45 +380,36 @@ public Block getBlock(int[] positions, int positionCount)
checkState(positionCount <= outputPositionCount, "Not enough values");
checkState(!valuesInUse, "BlockLease hasn't been closed yet");

if (allNulls || nullsCount == outputPositionCount) {
if (allNulls) {
return new RunLengthEncodedBlock(outputType.createBlockBuilder(null, 1).appendNull().build(), positionCount);
}

// compact values(ids) array, and calculate 1) the slice sizeInBytes if materialized, and 2) number of nulls
long blockSizeInBytes = 0;
int nullsCount = 0; // the nulls count for selected positions
int i = 0;
int j = 0;
while (i < positionCount && j < outputPositionCount) {
if (positions[i] != outputPositions[j]) {
j++;
continue;
}

int id = this.values[j];
values[i] = id;
wrapDictionaryIfNecessary();

blockSizeInBytes += dictionaryOffsetVector[id + 1] - dictionaryOffsetVector[id];
nullsCount += (id == currentDictionarySize - 1 ? 1 : 0);
if (positionCount == outputPositionCount) {
DictionaryBlock block = new DictionaryBlock(positionCount, dictionary, values);

i++;
j++;
values = null;
return block;
}

// If all selected positions are null, just return RLE block.
if (nullsCount == outputPositionCount) {
return new RunLengthEncodedBlock(outputType.createBlockBuilder(null, 1).appendNull().build(), positionCount);
}
int[] valuesCopy = new int[positionCount];

// If the expected materialized size of the output block is smaller than a certain ratio of the dictionary size, we will materialize the values
int dictionarySizeInBytes = dictionaryOffsetVector[currentDictionarySize - 1];
if (blockSizeInBytes * BATCHES_PER_ROWGROUP < dictionarySizeInBytes / MATERIALIZATION_RATIO) {
return getMaterializedBlock(positionCount, blockSizeInBytes, nullsCount);
int positionIndex = 0;
int nextPosition = positions[positionIndex];
for (int i = 0; i < outputPositionCount; i++) {
if (outputPositions[i] < nextPosition) {
continue;
}
assert outputPositions[i] == nextPosition;
valuesCopy[positionIndex] = this.values[i];
positionIndex++;
if (positionIndex >= positionCount) {
break;
}
nextPosition = positions[positionIndex];
}

wrapDictionaryIfNecessary();

int[] valuesCopy = Arrays.copyOf(values, positionCount);
return new DictionaryBlock(positionCount, dictionary, valuesCopy);
}

Expand Down Expand Up @@ -708,33 +691,4 @@ private BlockLease newLease(Block block)
valuesInUse = true;
return ClosingBlockLease.newLease(block, () -> valuesInUse = false);
}

private Block getMaterializedBlock(int positionCount, long blockSizeInBytes, int nullsCount)
{
byte[] sliceData = new byte[toIntExact(blockSizeInBytes)];
int[] offsetVector = new int[positionCount + 1];
int currentOffset = 0;
for (int k = 0; k < positionCount; k++) {
int id = values[k];
int offset = dictionaryOffsetVector[id];
int length = dictionaryOffsetVector[id + 1] - offset;
System.arraycopy(dictionaryData, offset, sliceData, currentOffset, length);

currentOffset += length;
offsetVector[k + 1] = currentOffset;
}

if (nullsCount > 0) {
boolean[] isNullVector = new boolean[positionCount];
for (int k = 0; k < positionCount; k++) {
if (values[k] == currentDictionarySize - 1) {
isNullVector[k] = true;
}
}
return new VariableWidthBlock(positionCount, wrappedBuffer(sliceData), offsetVector, Optional.of(isNullVector));
}
else {
return new VariableWidthBlock(positionCount, wrappedBuffer(sliceData), offsetVector, Optional.empty());
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -763,11 +763,11 @@ public void testVarchars()
1, stringIn(true, "10", "11"),
2, stringIn(true, "def", "abc"))));

// direct and dictionary
// dictionary
tester.testRoundTrip(VARCHAR, newArrayList(limit(cycle(ImmutableList.of("apple", "apple pie", "apple\uD835\uDC03", "apple\uFFFD")), NUM_ROWS)),
stringIn(false, "apple", "apple pie"));

// direct and dictionary materialized
// direct
tester.testRoundTrip(VARCHAR,
intsBetween(0, NUM_ROWS).stream().map(Object::toString).collect(toList()),
stringIn(false, "10", "11"),
Expand Down Expand Up @@ -812,7 +812,7 @@ public void testVarchars()
.map(Object::toString)
.collect(toList()));

// presentStream is null in some row groups & dictionary materialized
// presentStream is null in some row groups
Function<Integer, String> randomStrings = i -> String.valueOf(random.nextInt(NUM_ROWS));
tester.testRoundTripTypes(
ImmutableList.of(INTEGER, VARCHAR),
Expand Down

0 comments on commit b8e72e8

Please sign in to comment.