diff --git a/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableChangeDatabricks.java b/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableChangeDatabricks.java index e2a086e2..87797ac0 100644 --- a/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableChangeDatabricks.java +++ b/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableChangeDatabricks.java @@ -4,7 +4,11 @@ import liquibase.change.DatabaseChange; import liquibase.change.DatabaseChangeProperty; import liquibase.change.core.CreateTableChange; +import liquibase.database.Database; +import liquibase.exception.ValidationErrors; +import liquibase.ext.databricks.database.DatabricksDatabase; import liquibase.statement.core.CreateTableStatement; +import liquibase.util.ObjectUtil; @DatabaseChange(name = "createTable", description = "Create Table", priority = ChangeMetaData.PRIORITY_DATABASE +500) @@ -12,8 +16,20 @@ public class CreateTableChangeDatabricks extends CreateTableChange { private String tableFormat; private String tableLocation; - private String clusterColumns; + private String partitionColumns; + + + @Override + public ValidationErrors validate(Database database) { + ValidationErrors validationErrors = new ValidationErrors(); + validationErrors.addAll(super.validate(database)); + + if (partitionColumns != null & clusterColumns != null) { + validationErrors.addError("Databricks does not support CLUSTER columns AND PARTITION BY columns, please pick one. And do not supply the other"); + } + return validationErrors; + } @DatabaseChangeProperty public String getTableFormat() {return tableFormat;} @@ -30,6 +46,9 @@ public String getClusterColumns() { return clusterColumns; } + @DatabaseChangeProperty + public String getPartitionColumns() {return partitionColumns; } + public void setTableLocation(String tableLocation) {this.tableLocation = tableLocation;} @DatabaseChangeProperty @@ -37,6 +56,8 @@ public void setClusterColumns(String clusterColumns) { this.clusterColumns = clusterColumns; } + @DatabaseChangeProperty + public void setPartitionColumns(String partitionColumns) { this.partitionColumns = partitionColumns; } @Override protected CreateTableStatement generateCreateTableStatement() { @@ -46,6 +67,7 @@ protected CreateTableStatement generateCreateTableStatement() { ctas.setTableFormat(this.getTableFormat()); ctas.setTableLocation(this.getTableLocation()); ctas.setClusterColumns(this.getClusterColumns()); + ctas.setPartitionColumns(this.getPartitionColumns()); return ctas; } diff --git a/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableStatementDatabricks.java b/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableStatementDatabricks.java index 77d061ce..949040df 100644 --- a/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableStatementDatabricks.java +++ b/src/main/java/liquibase/ext/databricks/change/createTable/CreateTableStatementDatabricks.java @@ -14,6 +14,8 @@ public class CreateTableStatementDatabricks extends CreateTableStatement { private ArrayList clusterColumns; + private ArrayList partitionColumns; + public CreateTableStatementDatabricks(String catalogName, String schemaName, String tableName) { super(catalogName, schemaName, tableName); @@ -30,6 +32,21 @@ public ArrayList getClusterColumns () { return clusterColumns; } + public ArrayList getPartitionColumns () { + return partitionColumns; + } + + + public void setPartitionColumns (String partitionColumns) { + if (partitionColumns == null) { + this.partitionColumns = new ArrayList<>(); + return; + } + this.partitionColumns = new ArrayList<>(Arrays.asList(partitionColumns.split("\\s*,\\s*"))); + } + + + public void setClusterColumns (String clusterColumns) { if (clusterColumns == null) { this.clusterColumns = new ArrayList<>(); diff --git a/src/main/java/liquibase/ext/databricks/sqlgenerator/CreateTableGeneratorDatabricks.java b/src/main/java/liquibase/ext/databricks/sqlgenerator/CreateTableGeneratorDatabricks.java index 3377e5d5..553e2682 100644 --- a/src/main/java/liquibase/ext/databricks/sqlgenerator/CreateTableGeneratorDatabricks.java +++ b/src/main/java/liquibase/ext/databricks/sqlgenerator/CreateTableGeneratorDatabricks.java @@ -1,6 +1,7 @@ package liquibase.ext.databricks.sqlgenerator; +import liquibase.exception.ValidationErrors; import liquibase.ext.databricks.change.createTable.CreateTableStatementDatabricks; import liquibase.ext.databricks.database.DatabricksDatabase; import liquibase.sqlgenerator.core.CreateTableGenerator; @@ -28,6 +29,15 @@ public boolean supports(CreateTableStatement statement, Database database) { return super.supports(statement, database) && (database instanceof DatabricksDatabase); } + public ValidationErrors validate(CreateTableStatementDatabricks createStatement, Database database, SqlGeneratorChain sqlGeneratorChain) { + ValidationErrors validationErrors = new ValidationErrors(); + if (!(createStatement.getPartitionColumns().isEmpty()) && !(createStatement.getClusterColumns().isEmpty())){ + validationErrors.addError("WARNING! Databricks does not supported creating tables with PARTITION and CLUSTER columns, please one supply one option."); + } + return validationErrors; + } + + @Override public Sql[] generateSql(CreateTableStatement statement, Database database, SqlGeneratorChain sqlGeneratorChain) { @@ -49,8 +59,11 @@ public Sql[] generateSql(CreateTableStatement statement, Database database, SqlG } ArrayList clusterCols = thisStatement.getClusterColumns(); + ArrayList partitionCols = thisStatement.getPartitionColumns(); + // If there are any cluster columns, add the clause + // ONLY if there are NOT cluster columns, then do partitions, but never both. if (clusterCols.size() >= 1 ) { finalsql += " CLUSTER BY ("; @@ -67,6 +80,21 @@ public Sql[] generateSql(CreateTableStatement statement, Database database, SqlG finalsql += ")"; } } + } else if (partitionCols.size() >=1) { + finalsql += " PARTITIONED BY ("; + + int val = 0; + while (partitionCols.size() > val) { + finalsql += partitionCols.get(val); + + val +=1; + if (partitionCols.size() > val) { + finalsql += ", "; + } + else { + finalsql += ")"; + } + } } diff --git a/src/test/resources/liquibase/harness/change/changelogs/databricks/createPartitionedTable.xml b/src/test/resources/liquibase/harness/change/changelogs/databricks/createPartitionedTable.xml new file mode 100644 index 00000000..66ecf107 --- /dev/null +++ b/src/test/resources/liquibase/harness/change/changelogs/databricks/createPartitionedTable.xml @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/test/resources/liquibase/harness/change/expectedSnapshot/databricks/createPartitionedTable.json b/src/test/resources/liquibase/harness/change/expectedSnapshot/databricks/createPartitionedTable.json new file mode 100644 index 00000000..06db8b4e --- /dev/null +++ b/src/test/resources/liquibase/harness/change/expectedSnapshot/databricks/createPartitionedTable.json @@ -0,0 +1,30 @@ +{ + "snapshot": { + "objects": { + "liquibase.structure.core.Table": [ + { + "table": { + "name": "test_table_partitioned" + } + } + ], + "liquibase.structure.core.Column": [ + { + "column": { + "name": "test_id" + } + }, + { + "column": { + "name": "test_column" + } + }, + { + "column": { + "name": "partition_column" + } + } + ] + } + } +} \ No newline at end of file diff --git a/src/test/resources/liquibase/harness/change/expectedSql/databricks/createPartitionedTable.sql b/src/test/resources/liquibase/harness/change/expectedSql/databricks/createPartitionedTable.sql new file mode 100644 index 00000000..da63f56a --- /dev/null +++ b/src/test/resources/liquibase/harness/change/expectedSql/databricks/createPartitionedTable.sql @@ -0,0 +1 @@ +CREATE TABLE main.liquibase_harness_test_ds.test_table_partitioned (test_id INT NOT NULL, test_column VARCHAR(50) NOT NULL, partition_column STRING NOT NULL, CONSTRAINT PK_TEST_TABLE_PARTITIONED PRIMARY KEY (test_id)) USING delta TBLPROPERTIES('delta.feature.allowColumnDefaults' = 'supported', 'delta.columnMapping.mode' = 'name') PARTITIONED BY (partition_column) \ No newline at end of file