Skip to content

Commit

Permalink
Set Python script's column mappings from schema API
Browse files Browse the repository at this point in the history
  • Loading branch information
anero committed Jun 14, 2022
1 parent 08cfe54 commit 561df69
Show file tree
Hide file tree
Showing 3 changed files with 114 additions and 125 deletions.
33 changes: 28 additions & 5 deletions glue_job.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ resource "aws_glue_catalog_database" "catalog_db" {

locals {
signatures_s3_path = "s3://agra-data-exports-${var.controlshift_environment}/${var.controlshift_organization_slug}/full/signatures"
signatures_table_index = index(local.parsed_bulk_data_schemas.tables.*.table.name, "signatures")
signatures_table_columns = local.parsed_bulk_data_schemas.tables[local.signatures_table_index].table.columns
}

resource "aws_glue_crawler" "signatures_crawler" {
Expand Down Expand Up @@ -35,22 +37,43 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "glue_resources" {
}
}

data "template_file" "signatures_script" {
template = file("${path.module}/templates/signatures_job.py.tpl")
vars = {
locals {
# Unsupported column types read from CSV files: all of these will be read as 'string'
unsupported_input_column_types = [
"boolean",
"character varying.*",
"decimal.*",
"hstore",
"jsonb",
"numeric.*",
"timestamp"
]

# Unsupported columnt types for Redshift: these will be replaced by the mapped type
unsupported_output_column_types = {
"hstore" = "string"
"jsonb" = "string"
"numeric\\(3,2\\)" = "decimal(3,2)"
"timestamp without time zone" = "timestamp"
}

signatures_script = templatefile("${path.module}/templates/signatures_job.py.tftpl", {
catalog_database_name = aws_glue_catalog_database.catalog_db.name
unsupported_input_column_types = local.unsupported_input_column_types
unsupported_output_column_types = local.unsupported_output_column_types
redshift_database_name = var.redshift_database_name
redshift_schema = var.redshift_schema
redshift_connection_name = aws_glue_connection.redshift_connection.name
}
signatures_table_columns = local.signatures_table_columns
})
}

resource "aws_s3_object" "signatures_script" {
bucket = aws_s3_bucket.glue_resources.id
key = "${var.controlshift_environment}/signatures_job.py"
acl = "private"

content = data.template_file.signatures_script.rendered
content = local.signatures_script
}

resource "aws_iam_role" "glue_service_role" {
Expand Down
86 changes: 86 additions & 0 deletions templates/signatures_job.py.tftpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import pyspark.sql.functions as func

## @params: [TempDir, JOB_NAME]
args = getResolvedOptions(sys.argv, ['TempDir','JOB_NAME'])

sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)

# Step 1: Read from the table in the data catalog
## @type: DataSource
## @args: [database = "${catalog_database_name}", table_name = "signatures", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "${catalog_database_name}", table_name = "signatures", transformation_ctx = "datasource0")

# Step 2: Identify the latest partition in the data catalog.
# This will correspond to the latest full export, stamped with the date.
# Create a new DynamicFrame to read only that partition from the catalog.
## @type: DataSource
## @args: [database = "${catalog_database_name}", table_name = "signatures", push_down_predicate= f"(partition_0 == {latestpartition})", transformation_ctx = "datasource1"]
## @return: datasource1
## @inputs: []
latestpartition = datasource0.toDF().agg(func.max("partition_0").alias("last_partition")).collect()[0]["last_partition"]
datasource1 = glueContext.create_dynamic_frame.from_catalog(
database = "${catalog_database_name}",
table_name = "signatures",
push_down_predicate = f"(partition_0 == {latestpartition})",
transformation_ctx = "datasource1")

# Step 3: Map the columns in the data catalog / S3 bucket to the columns we want in Redshift
## @type: ApplyMapping
## @args: [mapping = [dynamically generated from schema read from the API], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource1]
applymapping1 = ApplyMapping.apply(
frame = datasource1,
mappings = [
%{ for column in keys(signatures_table_columns) }
(
"${column}",
%{ if length(regexall(join("|", unsupported_input_column_types), signatures_table_columns[column].sql_type)) > 0 }
"string",
%{ else }
"${signatures_table_columns[column].sql_type}",
%{ endif }
"${column}",
%{ if length(regexall(join("|", keys(unsupported_output_column_types)), signatures_table_columns[column].sql_type)) > 0 }
"${[ for k,v in unsupported_output_column_types : "${v}" if length(regexall(k, signatures_table_columns[column].sql_type)) > 0 ][0]}",
%{ else }
"${signatures_table_columns[column].sql_type}"
%{ endif }
),
%{ endfor }
],
transformation_ctx = "applymapping1")

# Step 4: Deal with column types that aren't consistent
## @type: ResolveChoice
## @args: [choice = "make_cols", transformation_ctx = "resolvechoice2"]
## @return: resolvechoice2
## @inputs: [frame = applymapping1]
resolvechoice2 = ResolveChoice.apply(frame = applymapping1, choice = "make_cols", transformation_ctx = "resolvechoice2")

# Step 5: Write the transformed data into Redshift, replacing whatever data was in the redshift table previously
## @type: DataSink
## @args: [catalog_connection = "${redshift_connection_name}", connection_options = {"dbtable": "signatures", "database": "${redshift_database_name}"}, redshift_tmp_dir = TempDir, transformation_ctx = "datasink4"]
## @return: datasink4
## @inputs: [frame = resolvechoice2]
datasink4 = glueContext.write_dynamic_frame.from_jdbc_conf(
frame = resolvechoice2,
catalog_connection = "${redshift_connection_name}",
connection_options = {"preactions": "truncate table ${redshift_schema}.signatures;",
"dbtable": "${redshift_schema}.signatures",
"database": "${redshift_database_name}"},
redshift_tmp_dir = args["TempDir"], transformation_ctx = "datasink4")

job.commit()
120 changes: 0 additions & 120 deletions templates/signatures_job.py.tpl

This file was deleted.

0 comments on commit 561df69

Please sign in to comment.