From daf014a642cbf2f0a493ec34a63d4a2f3d547bf1 Mon Sep 17 00:00:00 2001 From: manuzhang Date: Wed, 27 Nov 2024 11:55:48 +0800 Subject: [PATCH] Core, Spark3.5: Fix tests failure due to timeout --- .../org/apache/iceberg/hadoop/TestHadoopCommits.java | 10 +++++++++- .../apache/iceberg/spark/extensions/TestDelete.java | 3 +++ .../org/apache/iceberg/spark/extensions/TestMerge.java | 3 +++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java index a8139180ca7d..0142c032704a 100644 --- a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java +++ b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCommits.java @@ -18,6 +18,8 @@ */ package org.apache.iceberg.hadoop; +import static org.apache.iceberg.CatalogProperties.LOCK_ACQUIRE_TIMEOUT_MS; +import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; @@ -421,7 +423,13 @@ public void testConcurrentFastAppends(@TempDir File dir) throws Exception { TABLES.create( SCHEMA, SPEC, - ImmutableMap.of(COMMIT_NUM_RETRIES, String.valueOf(threadsCount)), + ImmutableMap.of( + COMMIT_NUM_RETRIES, + String.valueOf(threadsCount), + COMMIT_MIN_RETRY_WAIT_MS, + "10", + LOCK_ACQUIRE_TIMEOUT_MS, + "0"), dir.toURI().toString()); String fileName = UUID.randomUUID().toString(); diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestDelete.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestDelete.java index 42eb2af774e9..490b1d00a163 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestDelete.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestDelete.java @@ -21,6 +21,8 @@ import static org.apache.iceberg.DataOperations.DELETE; import static org.apache.iceberg.RowLevelOperationMode.COPY_ON_WRITE; import static org.apache.iceberg.SnapshotSummary.ADD_POS_DELETE_FILES_PROP; +import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; +import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static org.apache.iceberg.TableProperties.DELETE_DISTRIBUTION_MODE; import static org.apache.iceberg.TableProperties.DELETE_ISOLATION_LEVEL; import static org.apache.iceberg.TableProperties.DELETE_MODE; @@ -1144,6 +1146,7 @@ public synchronized void testDeleteWithSnapshotIsolation() sql( "ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, DELETE_ISOLATION_LEVEL, "snapshot"); + sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MIN_RETRY_WAIT_MS, "10"); sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName); createBranchIfNeeded(); diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMerge.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMerge.java index a26707ef38aa..382773f798ce 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMerge.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMerge.java @@ -19,6 +19,8 @@ package org.apache.iceberg.spark.extensions; import static org.apache.iceberg.RowLevelOperationMode.COPY_ON_WRITE; +import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; +import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static org.apache.iceberg.TableProperties.MERGE_DISTRIBUTION_MODE; import static org.apache.iceberg.TableProperties.MERGE_ISOLATION_LEVEL; import static org.apache.iceberg.TableProperties.MERGE_MODE; @@ -1611,6 +1613,7 @@ public synchronized void testMergeWithSnapshotIsolation() sql( "ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, MERGE_ISOLATION_LEVEL, "snapshot"); + sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MIN_RETRY_WAIT_MS, "10"); sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName); createBranchIfNeeded();