Skip to content

Commit

Permalink
Add position delete table
Browse files Browse the repository at this point in the history
  • Loading branch information
szehon-ho committed Jan 20, 2023
1 parent 35151fe commit 93d07ef
Show file tree
Hide file tree
Showing 20 changed files with 925 additions and 156 deletions.
5 changes: 5 additions & 0 deletions api/src/main/java/org/apache/iceberg/ContentScanTask.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ default StructLike partition() {
return file().partition();
}

@Override
default long sizeBytes() {
return length();
}

/**
* The starting position of this scan range in the file.
*
Expand Down
7 changes: 5 additions & 2 deletions core/src/main/java/org/apache/iceberg/BaseMetadataTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,12 @@ protected BaseMetadataTable(Table table, String name) {
*/
static PartitionSpec transformSpec(Schema metadataTableSchema, PartitionSpec spec) {
PartitionSpec.Builder identitySpecBuilder =
PartitionSpec.builderFor(metadataTableSchema).checkConflicts(false);
PartitionSpec.builderFor(metadataTableSchema)
.withSpecId(spec.specId())
.checkConflicts(false);
for (PartitionField field : spec.fields()) {
identitySpecBuilder.add(field.fieldId(), field.name(), Transforms.identity());
identitySpecBuilder.add(
field.fieldId(), field.fieldId(), field.name(), Transforms.identity());
}
return identitySpecBuilder.build();
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import org.apache.iceberg.expressions.ResidualEvaluator;

/** Base implememntation of {@link PositionDeletesScanTask} */
class BasePositionDeletesScanTask extends BaseContentScanTask<PositionDeletesScanTask, DeleteFile>
implements PositionDeletesScanTask, SplittableScanTask<PositionDeletesScanTask> {

BasePositionDeletesScanTask(
DeleteFile file, String schemaString, String specString, ResidualEvaluator evaluator) {
super(file, schemaString, specString, evaluator);
}

@Override
protected BasePositionDeletesScanTask self() {
return this;
}

@Override
protected PositionDeletesScanTask newSplitTask(
PositionDeletesScanTask parentTask, long offset, long length) {
return new SplitPositionDeletesScanTask(parentTask, offset, length);
}
}
24 changes: 23 additions & 1 deletion core/src/main/java/org/apache/iceberg/BaseScan.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import org.apache.iceberg.expressions.Binder;
Expand Down Expand Up @@ -59,6 +60,23 @@ abstract class BaseScan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>>
private static final List<String> SCAN_WITH_STATS_COLUMNS =
ImmutableList.<String>builder().addAll(SCAN_COLUMNS).addAll(STATS_COLUMNS).build();

protected static final List<String> DELETE_SCAN_COLUMNS =
ImmutableList.of(
"snapshot_id",
"content",
"file_path",
"file_ordinal",
"file_format",
"block_size_in_bytes",
"file_size_in_bytes",
"record_count",
"partition",
"key_metadata",
"split_offsets");

protected static final List<String> DELETE_SCAN_WITH_STATS_COLUMNS =
ImmutableList.<String>builder().addAll(DELETE_SCAN_COLUMNS).addAll(STATS_COLUMNS).build();

private static final boolean PLAN_SCANS_WITH_WORKER_POOL =
SystemProperties.getBoolean(SystemProperties.SCAN_THREAD_POOL_ENABLED, true);

Expand All @@ -84,7 +102,7 @@ protected TableOperations tableOps() {
return null;
}

protected Table table() {
public Table table() {
return table;
}

Expand All @@ -96,6 +114,10 @@ protected TableScanContext context() {
return context;
}

protected Map<String, String> options() {
return context().options();
}

protected List<String> scanColumns() {
return context.returnColumnStats() ? SCAN_WITH_STATS_COLUMNS : SCAN_COLUMNS;
}
Expand Down
135 changes: 1 addition & 134 deletions core/src/main/java/org/apache/iceberg/BaseTableScan.java
Original file line number Diff line number Diff line change
Expand Up @@ -18,63 +18,17 @@
*/
package org.apache.iceberg;

import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.iceberg.events.Listeners;
import org.apache.iceberg.events.ScanEvent;
import org.apache.iceberg.expressions.ExpressionUtil;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.metrics.DefaultMetricsContext;
import org.apache.iceberg.metrics.ImmutableScanReport;
import org.apache.iceberg.metrics.ScanMetrics;
import org.apache.iceberg.metrics.ScanMetricsResult;
import org.apache.iceberg.metrics.ScanReport;
import org.apache.iceberg.metrics.Timer;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.util.DateTimeUtil;
import org.apache.iceberg.util.SnapshotUtil;
import org.apache.iceberg.util.TableScanUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/** Base class for {@link TableScan} implementations. */
abstract class BaseTableScan extends BaseScan<TableScan, FileScanTask, CombinedScanTask>
abstract class BaseTableScan extends SnapshotScan<TableScan, FileScanTask, CombinedScanTask>
implements TableScan {
private static final Logger LOG = LoggerFactory.getLogger(BaseTableScan.class);
private ScanMetrics scanMetrics;

protected BaseTableScan(Table table, Schema schema, TableScanContext context) {
super(table, schema, context);
}

protected Long snapshotId() {
return context().snapshotId();
}

protected Map<String, String> options() {
return context().options();
}

protected abstract CloseableIterable<FileScanTask> doPlanFiles();

protected ScanMetrics scanMetrics() {
if (scanMetrics == null) {
this.scanMetrics = ScanMetrics.of(new DefaultMetricsContext());
}

return scanMetrics;
}

@Override
public Table table() {
return super.table();
}

@Override
public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) {
throw new UnsupportedOperationException("Incremental scan is not supported");
Expand All @@ -85,77 +39,6 @@ public TableScan appendsAfter(long fromSnapshotId) {
throw new UnsupportedOperationException("Incremental scan is not supported");
}

@Override
public TableScan useSnapshot(long scanSnapshotId) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override snapshot, already set snapshot id=%s", snapshotId());
Preconditions.checkArgument(
table().snapshot(scanSnapshotId) != null,
"Cannot find snapshot with ID %s",
scanSnapshotId);
return newRefinedScan(table(), tableSchema(), context().useSnapshotId(scanSnapshotId));
}

@Override
public TableScan useRef(String name) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override ref, already set snapshot id=%s", snapshotId());
Snapshot snapshot = table().snapshot(name);
Preconditions.checkArgument(snapshot != null, "Cannot find ref %s", name);
return newRefinedScan(table(), tableSchema(), context().useSnapshotId(snapshot.snapshotId()));
}

@Override
public TableScan asOfTime(long timestampMillis) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override snapshot, already set snapshot id=%s", snapshotId());

return useSnapshot(SnapshotUtil.snapshotIdAsOfTime(table(), timestampMillis));
}

@Override
public CloseableIterable<FileScanTask> planFiles() {
Snapshot snapshot = snapshot();
if (snapshot != null) {
LOG.info(
"Scanning table {} snapshot {} created at {} with filter {}",
table(),
snapshot.snapshotId(),
DateTimeUtil.formatTimestampMillis(snapshot.timestampMillis()),
ExpressionUtil.toSanitizedString(filter()));

Listeners.notifyAll(new ScanEvent(table().name(), snapshot.snapshotId(), filter(), schema()));
List<Integer> projectedFieldIds = Lists.newArrayList(TypeUtil.getProjectedIds(schema()));
List<String> projectedFieldNames =
projectedFieldIds.stream().map(schema()::findColumnName).collect(Collectors.toList());

Timer.Timed planningDuration = scanMetrics().totalPlanningDuration().start();

return CloseableIterable.whenComplete(
doPlanFiles(),
() -> {
planningDuration.stop();
Map<String, String> metadata = Maps.newHashMap(context().options());
metadata.putAll(EnvironmentContext.get());
ScanReport scanReport =
ImmutableScanReport.builder()
.schemaId(schema().schemaId())
.projectedFieldIds(projectedFieldIds)
.projectedFieldNames(projectedFieldNames)
.tableName(table().name())
.snapshotId(snapshot.snapshotId())
.filter(ExpressionUtil.sanitize(filter()))
.scanMetrics(ScanMetricsResult.fromScanMetrics(scanMetrics()))
.metadata(metadata)
.build();
context().metricsReporter().report(scanReport);
});
} else {
LOG.info("Scanning empty table {}", table());
return CloseableIterable.empty();
}
}

@Override
public CloseableIterable<CombinedScanTask> planTasks() {
CloseableIterable<FileScanTask> fileScanTasks = planFiles();
Expand All @@ -164,20 +47,4 @@ public CloseableIterable<CombinedScanTask> planTasks() {
return TableScanUtil.planTasks(
splitFiles, targetSplitSize(), splitLookback(), splitOpenFileCost());
}

@Override
public Snapshot snapshot() {
return snapshotId() != null ? table().snapshot(snapshotId()) : table().currentSnapshot();
}

@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("table", table())
.add("projection", schema().asStruct())
.add("filter", filter())
.add("ignoreResiduals", shouldIgnoreResiduals())
.add("caseSensitive", isCaseSensitive())
.toString();
}
}
14 changes: 6 additions & 8 deletions core/src/main/java/org/apache/iceberg/MetadataColumns.java
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,11 @@ public class MetadataColumns {
private MetadataColumns() {}

// IDs Integer.MAX_VALUE - (1-100) are used for metadata columns
public static final int FILE_PATH_COLUMN_ID = Integer.MAX_VALUE - 1;
public static final String FILE_PATH_COLUMN_DOC = "Path of the file in which a row is stored";
public static final NestedField FILE_PATH =
NestedField.required(
Integer.MAX_VALUE - 1,
"_file",
Types.StringType.get(),
"Path of the file in which a row is stored");
FILE_PATH_COLUMN_ID, "_file", Types.StringType.get(), FILE_PATH_COLUMN_DOC);
public static final NestedField ROW_POSITION =
NestedField.required(
Integer.MAX_VALUE - 2,
Expand All @@ -48,12 +47,11 @@ private MetadataColumns() {}
"_deleted",
Types.BooleanType.get(),
"Whether the row has been deleted");
public static final int SPEC_ID_COLUMN_ID = Integer.MAX_VALUE - 4;
public static final String SPEC_ID_COLUMN_DOC = "Spec ID used to track the file containing a row";
public static final NestedField SPEC_ID =
NestedField.required(
Integer.MAX_VALUE - 4,
"_spec_id",
Types.IntegerType.get(),
"Spec ID used to track the file containing a row");
SPEC_ID_COLUMN_ID, "_spec_id", Types.IntegerType.get(), SPEC_ID_COLUMN_DOC);
// the partition column type is not static and depends on all specs in the table
public static final int PARTITION_COLUMN_ID = Integer.MAX_VALUE - 5;
public static final String PARTITION_COLUMN_NAME = "_partition";
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/java/org/apache/iceberg/MetadataTableType.java
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ public enum MetadataTableType {
ALL_DELETE_FILES,
ALL_FILES,
ALL_MANIFESTS,
ALL_ENTRIES;
ALL_ENTRIES,
POSITION_DELETES;

public static MetadataTableType from(String name) {
try {
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/java/org/apache/iceberg/MetadataTableUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ private static Table createMetadataTableInstance(
return new AllManifestsTable(baseTable, metadataTableName);
case ALL_ENTRIES:
return new AllEntriesTable(baseTable, metadataTableName);
case POSITION_DELETES:
return new PositionDeletesTable(baseTable, metadataTableName);
default:
throw new NoSuchTableException(
"Unknown metadata table type: %s for %s", type, metadataTableName);
Expand Down
22 changes: 22 additions & 0 deletions core/src/main/java/org/apache/iceberg/PositionDeletesScanTask.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

/** A {@link ScanTask} for position delete files */
public interface PositionDeletesScanTask extends ContentScanTask<DeleteFile> {}
Loading

0 comments on commit 93d07ef

Please sign in to comment.