Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Databricks Support #81

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion dbt2looker/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,19 +163,41 @@
'BYTE': 'number',
'SHORT': 'number',
'INTEGER': 'number',
'INT': 'number',
'TINYINT': 'number',
'SMALLINT': 'number',
'BIGINT': 'number',
'LONG': 'number',
'FLOAT': 'number',
'DOUBLE': 'number',
'REAL': 'number',
'DECIMAL': 'number',
'DEC': 'number',
'NUMERIC': 'number',
'STRING': 'string',
'VARCHAR': 'string',
'CHAR': 'string',
'BINARY': 'string',
'BOOLEAN': 'yesno',
'TIMESTAMP': 'timestamp',
'DATE': 'datetime',
# ARRAY not supported
# STRUCT not supported
# INTERVAL <timeframe> not supported
# MAP not supported
}
}

# Databricks is built on the Spark connector and uses the same datatypes
LOOKER_DTYPE_MAP['databricks'] = LOOKER_DTYPE_MAP['spark']


spark_like_adapters = [
models.SupportedDbtAdapters.databricks.value,
models.SupportedDbtAdapters.spark.value
]


looker_date_time_types = ['datetime', 'timestamp']
looker_date_types = ['date']
looker_scalar_types = ['number', 'yesno', 'string']
Expand All @@ -196,7 +218,7 @@ def normalise_spark_types(column_type: str) -> str:


def map_adapter_type_to_looker(adapter_type: models.SupportedDbtAdapters, column_type: str):
normalised_column_type = (normalise_spark_types(column_type) if adapter_type == models.SupportedDbtAdapters.spark.value else column_type).upper()
normalised_column_type = (normalise_spark_types(column_type) if adapter_type in spark_like_adapters else column_type).upper()
looker_type = LOOKER_DTYPE_MAP[adapter_type].get(normalised_column_type)
if (column_type is not None) and (looker_type is None):
logging.warning(f'Column type {column_type} not supported for conversion from {adapter_type} to looker. No dimension will be created.')
Expand Down
1 change: 1 addition & 0 deletions dbt2looker/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class SupportedDbtAdapters(str, Enum):
redshift = 'redshift'
snowflake = 'snowflake'
spark = 'spark'
databricks = 'databricks'


# Lookml types
Expand Down