Skip to content

Commit

Permalink
Core, Spark3.5: Fix tests failure due to timeout
Browse files Browse the repository at this point in the history
  • Loading branch information
manuzhang committed Dec 17, 2024
1 parent eddf9a1 commit 7961140
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@
*/
package org.apache.iceberg.hadoop;

import static org.apache.iceberg.CatalogProperties.LOCK_ACQUIRE_TIMEOUT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
Expand Down Expand Up @@ -421,7 +424,16 @@ public void testConcurrentFastAppends(@TempDir File dir) throws Exception {
TABLES.create(
SCHEMA,
SPEC,
ImmutableMap.of(COMMIT_NUM_RETRIES, String.valueOf(threadsCount)),
ImmutableMap.of(
COMMIT_NUM_RETRIES,
String.valueOf(threadsCount),
COMMIT_MIN_RETRY_WAIT_MS,
"10",
COMMIT_MAX_RETRY_WAIT_MS,
"1000",
// Disable extra retry on lock acquire failure since commit will fail anyway.
LOCK_ACQUIRE_TIMEOUT_MS,
"0"),
dir.toURI().toString());

String fileName = UUID.randomUUID().toString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import static org.apache.iceberg.DataOperations.DELETE;
import static org.apache.iceberg.RowLevelOperationMode.COPY_ON_WRITE;
import static org.apache.iceberg.SnapshotSummary.ADD_POS_DELETE_FILES_PROP;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.DELETE_DISTRIBUTION_MODE;
import static org.apache.iceberg.TableProperties.DELETE_ISOLATION_LEVEL;
import static org.apache.iceberg.TableProperties.DELETE_MODE;
Expand Down Expand Up @@ -1144,6 +1146,8 @@ public synchronized void testDeleteWithSnapshotIsolation()
sql(
"ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')",
tableName, DELETE_ISOLATION_LEVEL, "snapshot");
sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MIN_RETRY_WAIT_MS, "10");
sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MAX_RETRY_WAIT_MS, "1000");

sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName);
createBranchIfNeeded();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
package org.apache.iceberg.spark.extensions;

import static org.apache.iceberg.RowLevelOperationMode.COPY_ON_WRITE;
import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static org.apache.iceberg.TableProperties.MERGE_DISTRIBUTION_MODE;
import static org.apache.iceberg.TableProperties.MERGE_ISOLATION_LEVEL;
import static org.apache.iceberg.TableProperties.MERGE_MODE;
Expand Down Expand Up @@ -1611,6 +1613,8 @@ public synchronized void testMergeWithSnapshotIsolation()
sql(
"ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')",
tableName, MERGE_ISOLATION_LEVEL, "snapshot");
sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MIN_RETRY_WAIT_MS, "10");
sql("ALTER TABLE %s SET TBLPROPERTIES('%s' '%s')", tableName, COMMIT_MAX_RETRY_WAIT_MS, "1000");

sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName);
createBranchIfNeeded();
Expand Down

0 comments on commit 7961140

Please sign in to comment.