Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions api/src/main/java/org/apache/iceberg/PartitionStatistics.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

public interface PartitionStatistics extends StructLike {

/** Returns the partition of these partition statistics */
StructLike partition();

/** Returns the spec ID of the partition of these partition statistics */
int specId();

/** Returns the number of data records in the partition */
long dataRecordCount();

/** Returns the number of data files in the partition */
int dataFileCount();

/** Returns the total size of data files in bytes in the partition */
long totalDataFileSizeInBytes();

/** Returns the number of positional delete records in the partition */
long positionDeleteRecordCount();

/** Returns the number of positional delete files in the partition */
int positionDeleteFileCount();

/** Returns the number of equality delete records in the partition */
long equalityDeleteRecordCount();

/** Returns the number of equality delete files in the partition */
int equalityDeleteFileCount();

/** Returns the total number of records in the partition */
Long totalRecords();

/** Returns the timestamp in milliseconds when the partition was last updated */
Long lastUpdatedAt();

/** Returns the ID of the snapshot that last updated this partition */
Long lastUpdatedSnapshotId();

/** Returns the number of delete vectors in the partition */
int dvCount();
}
58 changes: 58 additions & 0 deletions api/src/main/java/org/apache/iceberg/PartitionStatisticsScan.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;

public interface PartitionStatisticsScan {

/**
* Create a new scan from this scan's configuration that will use the given snapshot by ID.
*
* @param snapshotId a snapshot ID
* @return a new scan based on this with the given snapshot ID
* @throws IllegalArgumentException if the snapshot cannot be found
*/
PartitionStatisticsScan useSnapshot(long snapshotId);

/**
* Create a new scan from the results of this, where partitions are filtered by the {@link
* Expression}.
*
* @param filter a filter expression
* @return a new scan based on this with results filtered by the expression
*/
PartitionStatisticsScan filter(Expression filter);

/**
* Create a new scan from this with the schema as its projection.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe describe what will happen with the PartitionStatistics attributes which are not part of the schema.

*
* @param schema a projection schema
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How does the user create the Schema?

I would prefer something like the DataFile where the possible columns are available as constants, and the type is available as well. Maybe copy/move/deprecate the schema from the old place.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right. Let me add this to the possible follow-up steps in my comment at the top

* @return a new scan based on this with the given projection
*/
PartitionStatisticsScan project(Schema schema);

/**
* Scans a partition statistics file belonging to a particular snapshot
*
* @return an Iterable of partition statistics
*/
CloseableIterable<PartitionStatistics> scan();
}
12 changes: 12 additions & 0 deletions api/src/main/java/org/apache/iceberg/Table.java
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,18 @@ default IncrementalChangelogScan newIncrementalChangelogScan() {
throw new UnsupportedOperationException("Incremental changelog scan is not supported");
}

/**
* Create a new {@link PartitionStatisticsScan} for this table.
*
* <p>Once a partition statistics scan is created, it can be refined to project columns and filter
* data.
*
* @return a partition statistics scan for this table
*/
default PartitionStatisticsScan newPartitionStatisticsScan() {
throw new UnsupportedOperationException("Partition statistics scan is not supported");
}

/**
* Return the {@link Schema schema} for this table.
*
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import java.util.List;
import java.util.Optional;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.types.Types;

public class BasePartitionStatisticsScan implements PartitionStatisticsScan {

private final Table table;
private Long snapshotId;

public BasePartitionStatisticsScan(Table table) {
this.table = table;
}

@Override
public PartitionStatisticsScan useSnapshot(long newSnapshotId) {
Preconditions.checkArgument(
table.snapshot(newSnapshotId) != null, "Cannot find snapshot with ID %s", newSnapshotId);

this.snapshotId = newSnapshotId;
return this;
}

@Override
public PartitionStatisticsScan filter(Expression newFilter) {
throw new UnsupportedOperationException("Filtering is not supported");
}

@Override
public PartitionStatisticsScan project(Schema newSchema) {
throw new UnsupportedOperationException("Projection is not supported");
}

@Override
public CloseableIterable<PartitionStatistics> scan() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we have tests for this?

if (snapshotId == null) {
if (table.currentSnapshot() == null) {
return CloseableIterable.of(List.of());
}

snapshotId = table.currentSnapshot().snapshotId();
}

Optional<PartitionStatisticsFile> statsFile =
table.partitionStatisticsFiles().stream()
.filter(f -> f.snapshotId() == snapshotId)
.findFirst();

if (statsFile.isEmpty()) {
return CloseableIterable.of(List.of());
}

Types.StructType partitionType = Partitioning.partitionType(table);
Schema schema = PartitionStatsHandler.schema(partitionType, TableUtil.formatVersion(table));

FileFormat fileFormat = FileFormat.fromFileName(statsFile.get().path());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think that getting the file format from the name is brittle. Maybe not in this PR, but I would love to have this fixed

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can take a look at this in a separate PR. Currently, PartitionStatisticsFile doesn't know the file format. We can extend that and force the internal writer to populate such field, however, since this is already released there might be tables where the additional info for file format isn't written, so we'd still need to keep the old logic there. Otherwise we'd break the stats for those tables.
Also, this would require a spec change too. Not sure, but would this mean that such an addition could only be part of V4?

Preconditions.checkNotNull(
fileFormat != null, "Unable to determine format of file: %s", statsFile.get().path());

CloseableIterable<StructLike> records =
InternalData.read(fileFormat, table.io().newInputFile(statsFile.get().path()))
.project(schema)
.build();

return CloseableIterable.transform(records, PartitionStatsHandler::recordToPartitionStats);
}
}
5 changes: 5 additions & 0 deletions core/src/main/java/org/apache/iceberg/BaseTable.java
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,11 @@ public IncrementalChangelogScan newIncrementalChangelogScan() {
return new BaseIncrementalChangelogScan(this);
}

@Override
public PartitionStatisticsScan newPartitionStatisticsScan() {
return new BasePartitionStatisticsScan(this);
}

@Override
public Schema schema() {
return ops.current().schema();
Expand Down
52 changes: 37 additions & 15 deletions core/src/main/java/org/apache/iceberg/PartitionStats.java
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

import org.apache.iceberg.relocated.com.google.common.base.Preconditions;

public class PartitionStats implements StructLike {
public class PartitionStats implements PartitionStatistics {

private static final int STATS_COUNT = 13;

Expand All @@ -43,54 +43,67 @@ public PartitionStats(StructLike partition, int specId) {
this.specId = specId;
}

@Override
public StructLike partition() {
return partition;
}

@Override
public int specId() {
return specId;
}

@Override
public long dataRecordCount() {
return dataRecordCount;
}

@Override
public int dataFileCount() {
return dataFileCount;
}

@Override
public long totalDataFileSizeInBytes() {
return totalDataFileSizeInBytes;
}

@Override
public long positionDeleteRecordCount() {
return positionDeleteRecordCount;
}

@Override
public int positionDeleteFileCount() {
return positionDeleteFileCount;
}

@Override
public long equalityDeleteRecordCount() {
return equalityDeleteRecordCount;
}

@Override
public int equalityDeleteFileCount() {
return equalityDeleteFileCount;
}

@Override
public Long totalRecords() {
return totalRecordCount;
}

@Override
public Long lastUpdatedAt() {
return lastUpdatedAt;
}

@Override
public Long lastUpdatedSnapshotId() {
return lastUpdatedSnapshotId;
}

@Override
public int dvCount() {
return dvCount;
}
Expand Down Expand Up @@ -187,31 +200,40 @@ void deletedEntryForIncrementalCompute(ContentFile<?> file, Snapshot snapshot) {
*
* @param entry the entry from which statistics will be sourced.
*/
void appendStats(PartitionStats entry) {
void appendStats(PartitionStatistics entry) {
Preconditions.checkArgument(specId == entry.specId(), "Spec IDs must match");

this.dataRecordCount += entry.dataRecordCount;
this.dataFileCount += entry.dataFileCount;
this.totalDataFileSizeInBytes += entry.totalDataFileSizeInBytes;
this.positionDeleteRecordCount += entry.positionDeleteRecordCount;
this.positionDeleteFileCount += entry.positionDeleteFileCount;
this.equalityDeleteRecordCount += entry.equalityDeleteRecordCount;
this.equalityDeleteFileCount += entry.equalityDeleteFileCount;
this.dvCount += entry.dvCount;
this.dataRecordCount += entry.dataRecordCount();
this.dataFileCount += entry.dataFileCount();
this.totalDataFileSizeInBytes += entry.totalDataFileSizeInBytes();
this.positionDeleteRecordCount += entry.positionDeleteRecordCount();
this.positionDeleteFileCount += entry.positionDeleteFileCount();
this.equalityDeleteRecordCount += entry.equalityDeleteRecordCount();
this.equalityDeleteFileCount += entry.equalityDeleteFileCount();
this.dvCount += entry.dvCount();

if (entry.totalRecordCount != null) {
if (entry.totalRecords() != null) {
if (totalRecordCount == null) {
this.totalRecordCount = entry.totalRecordCount;
this.totalRecordCount = entry.totalRecords();
} else {
this.totalRecordCount += entry.totalRecordCount;
this.totalRecordCount += entry.totalRecords();
}
}

if (entry.lastUpdatedAt != null) {
updateSnapshotInfo(entry.lastUpdatedSnapshotId, entry.lastUpdatedAt);
if (entry.lastUpdatedAt() != null) {
updateSnapshotInfo(entry.lastUpdatedSnapshotId(), entry.lastUpdatedAt());
}
}

/**
* @deprecated will be removed in 1.12.0. Use {@link
* PartitionStats#appendStats(PartitionStatistics) instead}
*/
@Deprecated
void appendStats(PartitionStats entry) {
appendStats((PartitionStatistics) entry);
}

private void updateSnapshotInfo(long snapshotId, long updatedAt) {
if (lastUpdatedAt == null || lastUpdatedAt < updatedAt) {
this.lastUpdatedAt = updatedAt;
Expand Down
Loading