diff --git a/README.md b/README.md index 0d93e9f..a3a27dd 100644 --- a/README.md +++ b/README.md @@ -528,9 +528,9 @@ module "firehose" { s3_bucket_arn = "" enable_lambda_transform = true transform_lambda_arn = "" - transform_lambda_buffer_size = 3 - transform_lambda_buffer_interval = 60 - transform_lambda_number_retries = 3 + transform_lambda_buffer_size = 2 # Don't configure this parameter if you want use default value (1) + transform_lambda_buffer_interval = 90 # Don't configure this parameter if you want use default value (60) + transform_lambda_number_retries = 4 # Don't configure this parameter if you want use default value (3) } ``` @@ -805,6 +805,7 @@ The destination variable configured in module is mapped to firehose valid destin ## Examples - [Direct Put](https://github.com/fdmsantos/terraform-aws-kinesis-firehose/tree/main/examples/s3/direct-put-to-s3) - Creates an encrypted Kinesis firehose stream with Direct Put as source and S3 as destination. +- [Direct Put With Lambda](https://github.com/fdmsantos/terraform-aws-kinesis-firehose/tree/main/examples/s3/direct-put-to-s3-with-lambda) - Creates a Kinesis firehose stream with Direct Put as source and S3 as destination with transformation lambda. - [Kinesis Data Stream Source](https://github.com/fdmsantos/terraform-aws-kinesis-firehose/tree/main/examples/s3/kinesis-to-s3-basic) - Creates a basic Kinesis Firehose stream with Kinesis data stream as source and s3 as destination. - [WAF Source](https://github.com/fdmsantos/terraform-aws-kinesis-firehose/tree/main/examples/s3/waf-to-s3) - Creates a Kinesis Firehose Stream with AWS Web WAF as source and S3 as destination. - [MSK Source](https://github.com/fdmsantos/terraform-aws-kinesis-firehose/tree/main/examples/s3/msk-to-s3) - Creates a Kinesis Firehose Stream with MSK Cluster as source and S3 as destination. @@ -1077,9 +1078,9 @@ No modules. | [sumologic\_deployment\_name](#input\_sumologic\_deployment\_name) | Deployment Name to use in Sumo Logic destination | `string` | `null` | no | | [tags](#input\_tags) | A map of tags to assign to resources. | `map(string)` | `{}` | no | | [transform\_lambda\_arn](#input\_transform\_lambda\_arn) | Lambda ARN to Transform source records | `string` | `null` | no | -| [transform\_lambda\_buffer\_interval](#input\_transform\_lambda\_buffer\_interval) | The period of time during which Kinesis Data Firehose buffers incoming data before invoking the AWS Lambda function. The AWS Lambda function is invoked once the value of the buffer size or the buffer interval is reached. | `number` | `60` | no | -| [transform\_lambda\_buffer\_size](#input\_transform\_lambda\_buffer\_size) | The AWS Lambda function has a 6 MB invocation payload quota. Your data can expand in size after it's processed by the AWS Lambda function. A smaller buffer size allows for more room should the data expand after processing. | `number` | `3` | no | -| [transform\_lambda\_number\_retries](#input\_transform\_lambda\_number\_retries) | Number of retries for AWS Transformation lambda | `number` | `3` | no | +| [transform\_lambda\_buffer\_interval](#input\_transform\_lambda\_buffer\_interval) | The period of time during which Kinesis Data Firehose buffers incoming data before invoking the AWS Lambda function. The AWS Lambda function is invoked once the value of the buffer size or the buffer interval is reached. | `number` | `null` | no | +| [transform\_lambda\_buffer\_size](#input\_transform\_lambda\_buffer\_size) | The AWS Lambda function has a 6 MB invocation payload quota. Your data can expand in size after it's processed by the AWS Lambda function. A smaller buffer size allows for more room should the data expand after processing. | `number` | `null` | no | +| [transform\_lambda\_number\_retries](#input\_transform\_lambda\_number\_retries) | Number of retries for AWS Transformation lambda | `number` | `null` | no | | [transform\_lambda\_role\_arn](#input\_transform\_lambda\_role\_arn) | The ARN of the role to execute the transform lambda. If null use the Firehose Stream role | `string` | `null` | no | | [vpc\_create\_destination\_security\_group](#input\_vpc\_create\_destination\_security\_group) | Indicates if want create destination security group to associate to firehose destinations | `bool` | `false` | no | | [vpc\_create\_security\_group](#input\_vpc\_create\_security\_group) | Indicates if want create security group to associate to kinesis firehose | `bool` | `false` | no | diff --git a/examples/s3/direct-put-to-s3-with-lambda/README.md b/examples/s3/direct-put-to-s3-with-lambda/README.md new file mode 100644 index 0000000..22d165d --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/README.md @@ -0,0 +1,60 @@ +# Kinesis Firehose: Direct Put To S3 With Lambda + +Configuration in this directory creates kinesis firehose stream with Direct Put as source and S3 bucket as destination with a transformation lambda. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [random](#requirement\_random) | >= 2.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | +| [random](#provider\_random) | >= 2.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [firehose](#module\_firehose) | ../../../ | n/a | +| [lambda\_function](#module\_lambda\_function) | terraform-aws-modules/lambda/aws | n/a | + +## Resources + +| Name | Type | +|------|------| +| [aws_s3_bucket.s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [random_pet.this](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [name\_prefix](#input\_name\_prefix) | Name prefix to use in resources | `string` | `"direct-put-to-s3"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [kinesis\_firehose\_arn](#output\_kinesis\_firehose\_arn) | The ARN of the Kinesis Firehose Stream | +| [kinesis\_firehose\_destination\_id](#output\_kinesis\_firehose\_destination\_id) | The Destination id of the Kinesis Firehose Stream | +| [kinesis\_firehose\_role\_arn](#output\_kinesis\_firehose\_role\_arn) | The ARN of the IAM role created for Kinesis Firehose Stream | + diff --git a/examples/s3/direct-put-to-s3-with-lambda/lambda/index.py b/examples/s3/direct-put-to-s3-with-lambda/lambda/index.py new file mode 100644 index 0000000..396c505 --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/lambda/index.py @@ -0,0 +1,4 @@ +def lambda_handler(event, context): + print("Hello from app1!") + + return event diff --git a/examples/s3/direct-put-to-s3-with-lambda/lambda/requirements.txt b/examples/s3/direct-put-to-s3-with-lambda/lambda/requirements.txt new file mode 100644 index 0000000..7169fdc --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/lambda/requirements.txt @@ -0,0 +1 @@ +colorful diff --git a/examples/s3/direct-put-to-s3-with-lambda/main.tf b/examples/s3/direct-put-to-s3-with-lambda/main.tf new file mode 100644 index 0000000..61a6346 --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/main.tf @@ -0,0 +1,28 @@ +resource "random_pet" "this" { + length = 2 +} + +resource "aws_s3_bucket" "s3" { + bucket = "${var.name_prefix}-destination-bucket-${random_pet.this.id}" + force_destroy = true +} + +module "lambda_function" { + source = "terraform-aws-modules/lambda/aws" + function_name = "lambda" + description = "My awesome lambda function" + handler = "index.lambda_handler" + runtime = "python3.10" + source_path = "lambda" + +} + +module "firehose" { + source = "../../../" + name = "${var.name_prefix}-delivery-stream" + input_source = "direct-put" + destination = "s3" + s3_bucket_arn = aws_s3_bucket.s3.arn + enable_lambda_transform = true + transform_lambda_arn = module.lambda_function.lambda_function_arn +} diff --git a/examples/s3/direct-put-to-s3-with-lambda/outputs.tf b/examples/s3/direct-put-to-s3-with-lambda/outputs.tf new file mode 100644 index 0000000..e3bd560 --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/outputs.tf @@ -0,0 +1,14 @@ +output "kinesis_firehose_arn" { + description = "The ARN of the Kinesis Firehose Stream" + value = module.firehose.kinesis_firehose_arn +} + +output "kinesis_firehose_destination_id" { + description = "The Destination id of the Kinesis Firehose Stream" + value = module.firehose.kinesis_firehose_destination_id +} + +output "kinesis_firehose_role_arn" { + description = "The ARN of the IAM role created for Kinesis Firehose Stream" + value = module.firehose.kinesis_firehose_role_arn +} diff --git a/examples/s3/direct-put-to-s3-with-lambda/variables.tf b/examples/s3/direct-put-to-s3-with-lambda/variables.tf new file mode 100644 index 0000000..7b58969 --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/variables.tf @@ -0,0 +1,5 @@ +variable "name_prefix" { + description = "Name prefix to use in resources" + type = string + default = "direct-put-to-s3" +} diff --git a/examples/s3/direct-put-to-s3-with-lambda/versions.tf b/examples/s3/direct-put-to-s3-with-lambda/versions.tf new file mode 100644 index 0000000..bee969b --- /dev/null +++ b/examples/s3/direct-put-to-s3-with-lambda/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 0.13.1" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + random = { + source = "hashicorp/random" + version = ">= 2.0" + } + } +} diff --git a/locals.tf b/locals.tf index 302ef29..3bca99f 100644 --- a/locals.tf +++ b/locals.tf @@ -30,30 +30,35 @@ locals { # Data Transformation enable_processing = var.enable_lambda_transform || var.enable_dynamic_partitioning || var.enable_cloudwatch_logs_decompression || var.dynamic_partition_append_delimiter_to_record || var.append_delimiter_to_record + lambda_processor_parameters = [ + { + name = "LambdaArn" + value = var.transform_lambda_arn + }, + var.transform_lambda_buffer_size != null ? + { + name = "BufferSizeInMBs" + value = var.transform_lambda_buffer_size + } : null, + var.transform_lambda_buffer_interval != null ? + { + name = "BufferIntervalInSeconds" + value = var.transform_lambda_buffer_interval + } : null, + var.transform_lambda_number_retries != null ? + { + name = "NumberOfRetries" + value = var.transform_lambda_number_retries + } : null, + var.transform_lambda_role_arn != null ? + { + name = "RoleArn" + value = var.transform_lambda_role_arn + } : null, + ] lambda_processor = var.enable_lambda_transform ? { - type = "Lambda" - parameters = [ - { - name = "LambdaArn" - value = var.transform_lambda_arn - }, - { - name = "BufferSizeInMBs" - value = var.transform_lambda_buffer_size - }, - { - name = "BufferIntervalInSeconds" - value = var.transform_lambda_buffer_interval - }, - { - name = "NumberOfRetries" - value = var.transform_lambda_number_retries - }, - { - name = "RoleArn" - value = var.transform_lambda_role_arn != null ? var.transform_lambda_role_arn : local.firehose_role_arn - }, - ] + type = "Lambda" + parameters = [for parameter in local.lambda_processor_parameters : parameter if parameter != null] } : null metadata_extractor_processor = var.enable_dynamic_partitioning && var.dynamic_partition_metadata_extractor_query != null ? { type = "MetadataExtraction" diff --git a/variables.tf b/variables.tf index c9ee9cc..db3474e 100644 --- a/variables.tf +++ b/variables.tf @@ -83,30 +83,30 @@ variable "transform_lambda_role_arn" { variable "transform_lambda_buffer_size" { description = "The AWS Lambda function has a 6 MB invocation payload quota. Your data can expand in size after it's processed by the AWS Lambda function. A smaller buffer size allows for more room should the data expand after processing." type = number - default = 3 + default = null validation { error_message = "Valid Values: minimum: 1 MB, maximum: 3 MB." - condition = var.transform_lambda_buffer_size >= 1 && var.transform_lambda_buffer_size <= 3 + condition = var.transform_lambda_buffer_size == null || (coalesce(var.transform_lambda_buffer_size, 1) >= 1 && coalesce(var.transform_lambda_buffer_size, 1) <= 3) } } variable "transform_lambda_buffer_interval" { description = "The period of time during which Kinesis Data Firehose buffers incoming data before invoking the AWS Lambda function. The AWS Lambda function is invoked once the value of the buffer size or the buffer interval is reached." type = number - default = 60 + default = null validation { error_message = "Valid Values: minimum: 60 seconds, maximum: 900 seconds." - condition = var.transform_lambda_buffer_interval >= 60 && var.transform_lambda_buffer_interval <= 900 + condition = var.transform_lambda_buffer_interval == null || (coalesce(var.transform_lambda_buffer_interval, 60) >= 60 && coalesce(var.transform_lambda_buffer_interval, 60) <= 900) } } variable "transform_lambda_number_retries" { description = "Number of retries for AWS Transformation lambda" type = number - default = 3 + default = null validation { error_message = "Number of retries for lambda must be between 0 and 300." - condition = var.transform_lambda_number_retries >= 0 && var.transform_lambda_number_retries <= 300 + condition = var.transform_lambda_number_retries == null || (coalesce(var.transform_lambda_number_retries, 3) >= 0 && coalesce(var.transform_lambda_number_retries, 3) <= 300) } }