Skip to content

Commit

Permalink
Use location as the table path
Browse files Browse the repository at this point in the history
  • Loading branch information
Fokko committed Jul 4, 2024
1 parent 9743983 commit 2c6775c
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 16 deletions.
7 changes: 2 additions & 5 deletions examples/scala/src/main/scala/example/IcebergCompatV2.scala
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,16 @@ import org.apache.spark.sql.SparkSession
* A standalone HMS can be created using the following docker command.
* ************************************************************
* docker run -d -p 9083:9083 --env SERVICE_NAME=metastore \
* --name metastore-standalone apache/hive:4.0.0-beta-1
* --name metastore-standalone apache/hive:4.0.0
* ************************************************************
* The URL of this standalone HMS is thrift://localhost:9083
*
* By default this hms will use `/opt/hive/data/warehouse` as warehouse path.
* Please make sure this path exists or change it prior to running the example.
*/
object IcebergCompatV2 {

def main(args: Array[String]): Unit = {
// Update this according to the metastore config
val port = 9083
val warehousePath = "/opt/hive/data/warehouse/"
val warehousePath = Utils.createTempDir("IcebergCompatV2")

if (!UniForm.hmsReady(port)) {
print("HMS not available. Exit.")
Expand Down
7 changes: 2 additions & 5 deletions examples/scala/src/main/scala/example/UniForm.scala
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,16 @@ import org.apache.spark.sql.SparkSession
* A standalone HMS can be created using the following docker command.
* ************************************************************
* docker run -d -p 9083:9083 --env SERVICE_NAME=metastore \
* --name metastore-standalone apache/hive:4.0.0-beta-1
* --name metastore-standalone apache/hive:4.0.0
* ************************************************************
* The URL of this standalone HMS is thrift://localhost:9083
*
* By default this hms will use `/opt/hive/data/warehouse` as warehouse path.
* Please make sure this path exists or change it prior to running the example.
*/
object UniForm {

def main(args: Array[String]): Unit = {
// Update this according to the metastore config
val port = 9083
val warehousePath = "/opt/hive/data/warehouse/"
val warehousePath = Utils.createTempDir("Uniform")

if (!hmsReady(port)) {
print("HMS not available. Exit.")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,7 @@ class IcebergConversionTransaction(
hiveCatalog
.buildTable(icebergTableId, icebergSchema)
.withPartitionSpec(partitionSpec)
.withLocation(this.tablePath.toString)
.withProperties(properties.asJava)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,9 @@ import org.apache.spark.util.Utils
* A standalone HMS can be created using the following docker command.
* ************************************************************
* docker run -d -p 9083:9083 --env SERVICE_NAME=metastore \
* --name metastore-standalone apache/hive:4.0.0-beta-1
* --name metastore-standalone apache/hive:4.0.0
* ************************************************************
* The URL of this standalone HMS is thrift://localhost:9083
*
* By default this hms will use `/opt/hive/data/warehouse` as warehouse path.
* Please make sure this path exists prior to running the suite.
*/
class ConvertToIcebergSuite extends QueryTest with Eventually {

Expand All @@ -50,10 +47,10 @@ class ConvertToIcebergSuite extends QueryTest with Eventually {
private var _sparkSessionWithIceberg: SparkSession = null

private val PORT = 9083
private val WAREHOUSE_PATH = "/opt/hive/data/warehouse/"
private val WAREHOUSE_PATH = Utils.createTempDir(this.suiteName)

private val testTableName: String = "deltatable"
private var testTablePath: String = s"$WAREHOUSE_PATH$testTableName"
private val testTablePath: String = s"$WAREHOUSE_PATH/$testTableName"

override def spark: SparkSession = _sparkSession

Expand Down Expand Up @@ -108,7 +105,9 @@ class ConvertToIcebergSuite extends QueryTest with Eventually {
if (hmsReady(PORT)) {
runDeltaSql(
s"""CREATE TABLE `${testTableName}` (col1 INT) USING DELTA
|LOCATION '$testTablePath'
|TBLPROPERTIES (
| 'delta.enableIcebergCompatV2' = 'true',
| 'delta.columnMapping.mode' = 'name',
| 'delta.universalFormat.enabledFormats' = 'iceberg'
|)""".stripMargin)
Expand All @@ -123,13 +122,15 @@ class ConvertToIcebergSuite extends QueryTest with Eventually {
withDefaultTablePropsInSQLConf {
deltaSpark.range(10).write.format("delta")
.option("path", testTablePath)
.option("delta.enableIcebergCompatV2", "true")
.saveAsTable(testTableName)
}
}
withDeltaSparkSession { deltaSpark =>
deltaSpark.range(10, 20, 1)
.write.format("delta").mode("append")
.option("path", testTablePath)
.option("delta.enableIcebergCompatV2", "true")
.saveAsTable(testTableName)
}
verifyReadWithIceberg(testTableName, 0 to 19 map (Row(_)))
Expand Down

0 comments on commit 2c6775c

Please sign in to comment.