Skip to content

Commit

Permalink
Add position delete table
Browse files Browse the repository at this point in the history
  • Loading branch information
szehon-ho committed Jan 10, 2023
1 parent 457a1f1 commit e053f26
Show file tree
Hide file tree
Showing 15 changed files with 935 additions and 153 deletions.
7 changes: 5 additions & 2 deletions core/src/main/java/org/apache/iceberg/BaseMetadataTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,12 @@ protected BaseMetadataTable(TableOperations ops, Table table, String name) {
*/
static PartitionSpec transformSpec(Schema metadataTableSchema, PartitionSpec spec) {
PartitionSpec.Builder identitySpecBuilder =
PartitionSpec.builderFor(metadataTableSchema).checkConflicts(false);
PartitionSpec.builderFor(metadataTableSchema)
.withSpecId(spec.specId())
.checkConflicts(false);
for (PartitionField field : spec.fields()) {
identitySpecBuilder.add(field.fieldId(), field.name(), Transforms.identity());
identitySpecBuilder.add(
field.fieldId(), field.fieldId(), field.name(), Transforms.identity());
}
return identitySpecBuilder.build();
}
Expand Down
24 changes: 23 additions & 1 deletion core/src/main/java/org/apache/iceberg/BaseScan.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import org.apache.iceberg.expressions.Binder;
Expand Down Expand Up @@ -59,6 +60,23 @@ abstract class BaseScan<ThisT, T extends ScanTask, G extends ScanTaskGroup<T>>
private static final List<String> SCAN_WITH_STATS_COLUMNS =
ImmutableList.<String>builder().addAll(SCAN_COLUMNS).addAll(STATS_COLUMNS).build();

protected static final List<String> DELETE_SCAN_COLUMNS =
ImmutableList.of(
"snapshot_id",
"content",
"file_path",
"file_ordinal",
"file_format",
"block_size_in_bytes",
"file_size_in_bytes",
"record_count",
"partition",
"key_metadata",
"split_offsets");

protected static final List<String> DELETE_SCAN_WITH_STATS_COLUMNS =
ImmutableList.<String>builder().addAll(DELETE_SCAN_COLUMNS).addAll(STATS_COLUMNS).build();

private static final boolean PLAN_SCANS_WITH_WORKER_POOL =
SystemProperties.getBoolean(SystemProperties.SCAN_THREAD_POOL_ENABLED, true);

Expand All @@ -78,7 +96,7 @@ protected TableOperations tableOps() {
return ops;
}

protected Table table() {
public Table table() {
return table;
}

Expand All @@ -90,6 +108,10 @@ protected TableScanContext context() {
return context;
}

protected Map<String, String> options() {
return context().options();
}

protected List<String> scanColumns() {
return context.returnColumnStats() ? SCAN_WITH_STATS_COLUMNS : SCAN_COLUMNS;
}
Expand Down
139 changes: 1 addition & 138 deletions core/src/main/java/org/apache/iceberg/BaseTableScan.java
Original file line number Diff line number Diff line change
Expand Up @@ -18,35 +18,12 @@
*/
package org.apache.iceberg;

import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.iceberg.events.Listeners;
import org.apache.iceberg.events.ScanEvent;
import org.apache.iceberg.expressions.ExpressionUtil;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.metrics.DefaultMetricsContext;
import org.apache.iceberg.metrics.ImmutableScanReport;
import org.apache.iceberg.metrics.ScanMetrics;
import org.apache.iceberg.metrics.ScanMetricsResult;
import org.apache.iceberg.metrics.ScanReport;
import org.apache.iceberg.metrics.Timer;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.util.DateTimeUtil;
import org.apache.iceberg.util.SnapshotUtil;
import org.apache.iceberg.util.TableScanUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/** Base class for {@link TableScan} implementations. */
abstract class BaseTableScan extends BaseScan<TableScan, FileScanTask, CombinedScanTask>
abstract class BaseTableScan extends SnapshotScan<TableScan, FileScanTask, CombinedScanTask>
implements TableScan {
private static final Logger LOG = LoggerFactory.getLogger(BaseTableScan.class);
private ScanMetrics scanMetrics;

protected BaseTableScan(TableOperations ops, Table table, Schema schema) {
this(ops, table, schema, new TableScanContext());
Expand All @@ -57,29 +34,6 @@ protected BaseTableScan(
super(ops, table, schema, context);
}

protected Long snapshotId() {
return context().snapshotId();
}

protected Map<String, String> options() {
return context().options();
}

protected abstract CloseableIterable<FileScanTask> doPlanFiles();

protected ScanMetrics scanMetrics() {
if (scanMetrics == null) {
this.scanMetrics = ScanMetrics.of(new DefaultMetricsContext());
}

return scanMetrics;
}

@Override
public Table table() {
return super.table();
}

@Override
public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) {
throw new UnsupportedOperationException("Incremental scan is not supported");
Expand All @@ -90,79 +44,6 @@ public TableScan appendsAfter(long fromSnapshotId) {
throw new UnsupportedOperationException("Incremental scan is not supported");
}

@Override
public TableScan useSnapshot(long scanSnapshotId) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override snapshot, already set snapshot id=%s", snapshotId());
Preconditions.checkArgument(
tableOps().current().snapshot(scanSnapshotId) != null,
"Cannot find snapshot with ID %s",
scanSnapshotId);
return newRefinedScan(
tableOps(), table(), tableSchema(), context().useSnapshotId(scanSnapshotId));
}

@Override
public TableScan useRef(String name) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override ref, already set snapshot id=%s", snapshotId());
Snapshot snapshot = table().snapshot(name);
Preconditions.checkArgument(snapshot != null, "Cannot find ref %s", name);
return newRefinedScan(
tableOps(), table(), tableSchema(), context().useSnapshotId(snapshot.snapshotId()));
}

@Override
public TableScan asOfTime(long timestampMillis) {
Preconditions.checkArgument(
snapshotId() == null, "Cannot override snapshot, already set snapshot id=%s", snapshotId());

return useSnapshot(SnapshotUtil.snapshotIdAsOfTime(table(), timestampMillis));
}

@Override
public CloseableIterable<FileScanTask> planFiles() {
Snapshot snapshot = snapshot();
if (snapshot != null) {
LOG.info(
"Scanning table {} snapshot {} created at {} with filter {}",
table(),
snapshot.snapshotId(),
DateTimeUtil.formatTimestampMillis(snapshot.timestampMillis()),
ExpressionUtil.toSanitizedString(filter()));

Listeners.notifyAll(new ScanEvent(table().name(), snapshot.snapshotId(), filter(), schema()));
List<Integer> projectedFieldIds = Lists.newArrayList(TypeUtil.getProjectedIds(schema()));
List<String> projectedFieldNames =
projectedFieldIds.stream().map(schema()::findColumnName).collect(Collectors.toList());

Timer.Timed planningDuration = scanMetrics().totalPlanningDuration().start();

return CloseableIterable.whenComplete(
doPlanFiles(),
() -> {
planningDuration.stop();
Map<String, String> metadata = Maps.newHashMap(context().options());
metadata.putAll(EnvironmentContext.get());
ScanReport scanReport =
ImmutableScanReport.builder()
.schemaId(schema().schemaId())
.projectedFieldIds(projectedFieldIds)
.projectedFieldNames(projectedFieldNames)
.tableName(table().name())
.snapshotId(snapshot.snapshotId())
.filter(ExpressionUtil.sanitize(filter()))
.scanMetrics(ScanMetricsResult.fromScanMetrics(scanMetrics()))
.metadata(metadata)
.build();
context().metricsReporter().report(scanReport);
});
} else {
LOG.info("Scanning empty table {}", table());
return CloseableIterable.empty();
}
}

@Override
public CloseableIterable<CombinedScanTask> planTasks() {
CloseableIterable<FileScanTask> fileScanTasks = planFiles();
Expand All @@ -171,22 +52,4 @@ public CloseableIterable<CombinedScanTask> planTasks() {
return TableScanUtil.planTasks(
splitFiles, targetSplitSize(), splitLookback(), splitOpenFileCost());
}

@Override
public Snapshot snapshot() {
return snapshotId() != null
? tableOps().current().snapshot(snapshotId())
: tableOps().current().currentSnapshot();
}

@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("table", table())
.add("projection", schema().asStruct())
.add("filter", filter())
.add("ignoreResiduals", shouldIgnoreResiduals())
.add("caseSensitive", isCaseSensitive())
.toString();
}
}
4 changes: 4 additions & 0 deletions core/src/main/java/org/apache/iceberg/MetadataColumns.java
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@ private MetadataColumns() {}
Types.LongType.get(),
"Commit snapshot ID");

public static final int POSITION_DELETE_TABLE_PARTITION_FIELD_ID = Integer.MAX_VALUE - 107;
public static final int POSITION_DELETE_TABLE_SPEC_ID = Integer.MAX_VALUE - 108;
public static final int POSITION_DELETE_TABLE_FILE_PATH_ID = Integer.MAX_VALUE - 109;

private static final Map<String, NestedField> META_COLUMNS =
ImmutableMap.of(
FILE_PATH.name(), FILE_PATH,
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/java/org/apache/iceberg/MetadataTableType.java
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ public enum MetadataTableType {
ALL_DELETE_FILES,
ALL_FILES,
ALL_MANIFESTS,
ALL_ENTRIES;
ALL_ENTRIES,
POSITION_DELETES;

public static MetadataTableType from(String name) {
try {
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/java/org/apache/iceberg/MetadataTableUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ private static Table createMetadataTableInstance(
return new AllManifestsTable(ops, baseTable, metadataTableName);
case ALL_ENTRIES:
return new AllEntriesTable(ops, baseTable, metadataTableName);
case POSITION_DELETES:
return new PositionDeletesTable(ops, baseTable, metadataTableName);
default:
throw new NoSuchTableException(
"Unknown metadata table type: %s for %s", type, metadataTableName);
Expand Down
Loading

0 comments on commit e053f26

Please sign in to comment.