diff --git a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala index c9ea8772438e1..c2b71e928209c 100644 --- a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala +++ b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnHDFSSuite.scala @@ -48,15 +48,15 @@ class GlutenClickHouseMergeTreeWriteOnHDFSSuite val fs = FileSystem.get(conf) fs.delete(new org.apache.hadoop.fs.Path("/test"), true) FileUtils.deleteDirectory(new File(HDFS_METADATA_PATH)) - FileUtils.deleteDirectory(new File(HDFS_CACHE_PATH)) +// FileUtils.deleteDirectory(new File(HDFS_CACHE_PATH)) FileUtils.forceMkdir(new File(HDFS_METADATA_PATH)) - FileUtils.forceMkdir(new File(HDFS_CACHE_PATH)) +// FileUtils.forceMkdir(new File(HDFS_CACHE_PATH)) } override protected def afterEach(): Unit = { super.afterEach() FileUtils.deleteDirectory(new File(HDFS_METADATA_PATH)) - FileUtils.deleteDirectory(new File(HDFS_CACHE_PATH)) +// FileUtils.deleteDirectory(new File(HDFS_CACHE_PATH)) } test("test mergetree table write") { diff --git a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnObjectStorageAbstractSuite.scala b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnObjectStorageAbstractSuite.scala index d30462b695a6e..085e726714ba2 100644 --- a/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnObjectStorageAbstractSuite.scala +++ b/backends-clickhouse/src/test/scala/io/glutenproject/execution/GlutenClickHouseMergeTreeWriteOnObjectStorageAbstractSuite.scala @@ -133,6 +133,12 @@ class GlutenClickHouseMergeTreeWriteOnObjectStorageAbstractSuite "spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.policies.__hdfs_main.volumes.main.disk", "hdfs_cache") .set("spark.gluten.sql.columnar.backend.ch.shuffle.hash.algorithm", "sparkMurmurHash3_32") + .set( + "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.dfs_client_read_shortcircuit", + "false") + .set( + "spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.dfs_default_replica", + "1") } override protected def createTPCHNotNullTables(): Unit = { createNotNullTPCHTablesInParquet(tablesPath)