Skip to content

Commit

Permalink
[opt](split) add session variable to control the list partition style
Browse files Browse the repository at this point in the history
  • Loading branch information
AshinGau committed Jun 26, 2024
1 parent d768a40 commit d60593c
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,21 @@

@Data
public class TablePartitionValues {
public enum PartitionOrdering {
NATURAL,
REVERSE,
SHUFFLE;

public static PartitionOrdering parse(String ordering) {
for (PartitionOrdering order : PartitionOrdering.values()) {
if (order.name().equalsIgnoreCase(ordering)) {
return order;
}
}
return null;
}
}

public static final String HIVE_DEFAULT_PARTITION = "__HIVE_DEFAULT_PARTITION__";

private final ReadWriteLock readWriteLock;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.FileQueryScanNode;
import org.apache.doris.datasource.FileSplit;
import org.apache.doris.datasource.TablePartitionValues.PartitionOrdering;
import org.apache.doris.datasource.hive.HMSExternalCatalog;
import org.apache.doris.datasource.hive.HMSExternalTable;
import org.apache.doris.datasource.hive.HiveMetaStoreCache;
Expand All @@ -57,6 +58,7 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
import lombok.Setter;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
Expand Down Expand Up @@ -137,6 +139,17 @@ protected void doInitialize() throws UserException {
}
}

protected List<HivePartition> orderingPartitions(List<HivePartition> partitions) {
PartitionOrdering ordering = PartitionOrdering.parse(
ConnectContext.get().getSessionVariable().getPartitionOrdering());
if (ordering == PartitionOrdering.REVERSE) {
return Ordering.natural().onResultOf(HivePartition::getPath).reverse().sortedCopy(partitions);
} else if (ordering == PartitionOrdering.SHUFFLE) {
return Ordering.arbitrary().onResultOf(HivePartition::getPath).sortedCopy(partitions);
}
return partitions;
}

protected List<HivePartition> getPartitions() throws AnalysisException {
List<HivePartition> resPartitions = Lists.newArrayList();
long start = System.currentTimeMillis();
Expand Down Expand Up @@ -211,7 +224,7 @@ public List<Split> getSplits() throws UserException {
long start = System.currentTimeMillis();
try {
if (!partitionInit) {
prunedPartitions = getPartitions();
prunedPartitions = orderingPartitions(getPartitions());
partitionInit = true;
}
HiveMetaStoreCache cache = Env.getCurrentEnv().getExtMetaCacheMgr()
Expand Down Expand Up @@ -289,7 +302,7 @@ public void startSplit() {
public boolean isBatchMode() {
if (!partitionInit) {
try {
prunedPartitions = getPartitions();
prunedPartitions = orderingPartitions(getPartitions());
} catch (Exception e) {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,9 +386,9 @@ public List<Split> getSplits() throws UserException {
return getIncrementalSplits();
}
if (!partitionInit) {
prunedPartitions = HiveMetaStoreClientHelper.ugiDoAs(
prunedPartitions = orderingPartitions(HiveMetaStoreClientHelper.ugiDoAs(
HiveMetaStoreClientHelper.getConfiguration(hmsTable),
() -> getPrunedPartitions(hudiClient, snapshotTimestamp));
() -> getPrunedPartitions(hudiClient, snapshotTimestamp)));
partitionInit = true;
}
List<Split> splits = Collections.synchronizedList(new ArrayList<>());
Expand Down Expand Up @@ -448,9 +448,9 @@ public boolean isBatchMode() {
}
if (!partitionInit) {
// Non partition table will get one dummy partition
prunedPartitions = HiveMetaStoreClientHelper.ugiDoAs(
prunedPartitions = orderingPartitions(HiveMetaStoreClientHelper.ugiDoAs(
HiveMetaStoreClientHelper.getConfiguration(hmsTable),
() -> getPrunedPartitions(hudiClient, snapshotTimestamp));
() -> getPrunedPartitions(hudiClient, snapshotTimestamp)));
partitionInit = true;
}
int numPartitions = ConnectContext.get().getSessionVariable().getNumPartitionsInBatchMode();
Expand Down
17 changes: 17 additions & 0 deletions fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,8 @@ public class SessionVariable implements Serializable, Writable {

public static final String FETCH_SPLITS_MAX_WAIT_TIME = "fetch_splits_max_wait_time";

public static final String PARTITION_ORDERING = "partition_ordering";

/**
* use insert stmt as the unified backend for all loads
*/
Expand Down Expand Up @@ -1470,6 +1472,13 @@ public void setEnableLeftZigZag(boolean enableLeftZigZag) {
needForward = true)
public long fetchSplitsMaxWaitTime = 4000;

@VariableMgr.VarAttr(
name = PARTITION_ORDERING,
description = {"list partition的排序方式",
"Ordering style of list partition."},
needForward = true)
public String partitionOrdering = "natural";

@VariableMgr.VarAttr(
name = ENABLE_PARQUET_LAZY_MAT,
description = {"控制 parquet reader 是否启用延迟物化技术。默认为 true。",
Expand Down Expand Up @@ -2730,6 +2739,14 @@ public void setFetchSplitsMaxWaitTime(long fetchSplitsMaxWaitTime) {
this.fetchSplitsMaxWaitTime = fetchSplitsMaxWaitTime;
}

public String getPartitionOrdering() {
return partitionOrdering;
}

public void setPartitionOrdering(String partitionOrdering) {
this.partitionOrdering = partitionOrdering;
}

public boolean isEnableParquetLazyMat() {
return enableParquetLazyMat;
}
Expand Down

0 comments on commit d60593c

Please sign in to comment.