diff --git a/README.md b/README.md index af7c2b1..eeed1a4 100644 --- a/README.md +++ b/README.md @@ -886,27 +886,27 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [application\_role\_description](#input\_application\_role\_description) | Description of IAM Application role to use for Kinesis Firehose Stream Source | `string` | `null` | no | -| [application\_role\_force\_detach\_policies](#input\_application\_role\_force\_detach\_policies) | Specifies to force detaching any policies the IAM Application role has before destroying it | `bool` | `true` | no | -| [application\_role\_name](#input\_application\_role\_name) | Name of IAM Application role to use for Kinesis Firehose Stream Source | `string` | `null` | no | -| [application\_role\_path](#input\_application\_role\_path) | Path of IAM Application role to use for Kinesis Firehose Stream Source | `string` | `null` | no | -| [application\_role\_permissions\_boundary](#input\_application\_role\_permissions\_boundary) | The ARN of the policy that is used to set the permissions boundary for the IAM Application role used by Kinesis Firehose Stream Source | `string` | `null` | no | -| [application\_role\_policy\_actions](#input\_application\_role\_policy\_actions) | List of Actions to Application Role Policy | `list(string)` |
[| no | -| [application\_role\_service\_principal](#input\_application\_role\_service\_principal) | AWS Service Principal to assume application role | `string` | `null` | no | -| [application\_role\_tags](#input\_application\_role\_tags) | A map of tags to assign to IAM Application role | `map(string)` | `{}` | no | -| [associate\_role\_to\_redshift\_cluster](#input\_associate\_role\_to\_redshift\_cluster) | Set it to false if don't want the module associate the role to redshift cluster | `bool` | `true` | no | -| [buffering\_interval](#input\_buffering\_interval) | Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination | `number` | `300` | no | +| [application\_role\_description](#input\_application\_role\_description) | Description of IAM Application role to use for Kinesis Firehose Stream Source. | `string` | `null` | no | +| [application\_role\_force\_detach\_policies](#input\_application\_role\_force\_detach\_policies) | Specifies to force detaching any policies the IAM Application role has before destroying it. | `bool` | `true` | no | +| [application\_role\_name](#input\_application\_role\_name) | Name of IAM Application role to use for Kinesis Firehose Stream Source. | `string` | `null` | no | +| [application\_role\_path](#input\_application\_role\_path) | Path of IAM Application role to use for Kinesis Firehose Stream Source. | `string` | `null` | no | +| [application\_role\_permissions\_boundary](#input\_application\_role\_permissions\_boundary) | The ARN of the policy that is used to set the permissions boundary for the IAM Application role used by Kinesis Firehose Stream Source. | `string` | `null` | no | +| [application\_role\_policy\_actions](#input\_application\_role\_policy\_actions) | List of Actions to Application Role Policy. | `list(string)` |
"firehose:PutRecord",
"firehose:PutRecordBatch"
]
[| no | +| [application\_role\_service\_principal](#input\_application\_role\_service\_principal) | AWS Service Principal to assume application role. | `string` | `null` | no | +| [application\_role\_tags](#input\_application\_role\_tags) | A map of tags to assign to IAM Application role. | `map(string)` | `{}` | no | +| [associate\_role\_to\_redshift\_cluster](#input\_associate\_role\_to\_redshift\_cluster) | Set it to false if don't want the module associate the role to redshift cluster. | `bool` | `true` | no | +| [buffering\_interval](#input\_buffering\_interval) | Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. | `number` | `300` | no | | [buffering\_size](#input\_buffering\_size) | Buffer incoming data to the specified size, in MBs, before delivering it to the destination. | `number` | `5` | no | -| [configure\_existing\_application\_role](#input\_configure\_existing\_application\_role) | Set it to True if want use existing application role to add the firehose Policy | `bool` | `false` | no | -| [coralogix\_endpoint\_location](#input\_coralogix\_endpoint\_location) | Endpoint Location to coralogix destination | `string` | `"ireland"` | no | -| [coralogix\_parameter\_application\_name](#input\_coralogix\_parameter\_application\_name) | By default, your delivery stream arn will be used as applicationName | `string` | `null` | no | -| [coralogix\_parameter\_subsystem\_name](#input\_coralogix\_parameter\_subsystem\_name) | By default, your delivery stream name will be used as subsystemName | `string` | `null` | no | -| [coralogix\_parameter\_use\_dynamic\_values](#input\_coralogix\_parameter\_use\_dynamic\_values) | To use dynamic values for applicationName and subsystemName | `bool` | `false` | no | -| [create](#input\_create) | Controls if kinesis firehose should be created (it affects almost all resources) | `bool` | `true` | no | -| [create\_application\_role](#input\_create\_application\_role) | Set it to true to create role to be used by the source | `bool` | `false` | no | -| [create\_application\_role\_policy](#input\_create\_application\_role\_policy) | Set it to true to create policy to the role used by the source | `bool` | `false` | no | -| [create\_destination\_cw\_log\_group](#input\_create\_destination\_cw\_log\_group) | Enables or disables the cloudwatch log group creation to destination | `bool` | `true` | no | -| [create\_role](#input\_create\_role) | Controls whether IAM role for Kinesis Firehose Stream should be created | `bool` | `true` | no | +| [configure\_existing\_application\_role](#input\_configure\_existing\_application\_role) | Set it to True if want use existing application role to add the firehose Policy. | `bool` | `false` | no | +| [coralogix\_endpoint\_location](#input\_coralogix\_endpoint\_location) | Endpoint Location to coralogix destination. | `string` | `"ireland"` | no | +| [coralogix\_parameter\_application\_name](#input\_coralogix\_parameter\_application\_name) | By default, your delivery stream arn will be used as applicationName. | `string` | `null` | no | +| [coralogix\_parameter\_subsystem\_name](#input\_coralogix\_parameter\_subsystem\_name) | By default, your delivery stream name will be used as subsystemName. | `string` | `null` | no | +| [coralogix\_parameter\_use\_dynamic\_values](#input\_coralogix\_parameter\_use\_dynamic\_values) | To use dynamic values for applicationName and subsystemName. | `bool` | `false` | no | +| [create](#input\_create) | Controls if kinesis firehose should be created (it affects almost all resources). | `bool` | `true` | no | +| [create\_application\_role](#input\_create\_application\_role) | Set it to true to create role to be used by the source. | `bool` | `false` | no | +| [create\_application\_role\_policy](#input\_create\_application\_role\_policy) | Set it to true to create policy to the role used by the source. | `bool` | `false` | no | +| [create\_destination\_cw\_log\_group](#input\_create\_destination\_cw\_log\_group) | Enables or disables the cloudwatch log group creation to destination. | `bool` | `true` | no | +| [create\_role](#input\_create\_role) | Controls whether IAM role for Kinesis Firehose Stream should be created. | `bool` | `true` | no | | [cw\_log\_retention\_in\_days](#input\_cw\_log\_retention\_in\_days) | Specifies the number of days you want to retain log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653. | `number` | `null` | no | | [cw\_tags](#input\_cw\_tags) | A map of tags to assign to the resource. | `map(string)` | `{}` | no | | [data\_format\_conversion\_block\_size](#input\_data\_format\_conversion\_block\_size) | The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The Value is in Bytes. | `number` | `268435456` | no | @@ -914,11 +914,11 @@ No modules. | [data\_format\_conversion\_glue\_database](#input\_data\_format\_conversion\_glue\_database) | Name of the AWS Glue database that contains the schema for the output data. | `string` | `null` | no | | [data\_format\_conversion\_glue\_region](#input\_data\_format\_conversion\_glue\_region) | If you don't specify an AWS Region, the default is the current region. | `string` | `null` | no | | [data\_format\_conversion\_glue\_role\_arn](#input\_data\_format\_conversion\_glue\_role\_arn) | The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed. | `string` | `null` | no | -| [data\_format\_conversion\_glue\_table\_name](#input\_data\_format\_conversion\_glue\_table\_name) | Specifies the AWS Glue table that contains the column information that constitutes your data schema | `string` | `null` | no | +| [data\_format\_conversion\_glue\_table\_name](#input\_data\_format\_conversion\_glue\_table\_name) | Specifies the AWS Glue table that contains the column information that constitutes your data schema. | `string` | `null` | no | | [data\_format\_conversion\_glue\_use\_existing\_role](#input\_data\_format\_conversion\_glue\_use\_existing\_role) | Indicates if want use the kinesis firehose role to glue access. | `bool` | `true` | no | | [data\_format\_conversion\_glue\_version\_id](#input\_data\_format\_conversion\_glue\_version\_id) | Specifies the table version for the output data schema. | `string` | `"LATEST"` | no | | [data\_format\_conversion\_hive\_timestamps](#input\_data\_format\_conversion\_hive\_timestamps) | A list of how you want Kinesis Data Firehose to parse the date and time stamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. | `list(string)` | `[]` | no | -| [data\_format\_conversion\_input\_format](#input\_data\_format\_conversion\_input\_format) | Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe | `string` | `"OpenX"` | no | +| [data\_format\_conversion\_input\_format](#input\_data\_format\_conversion\_input\_format) | Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. | `string` | `"OpenX"` | no | | [data\_format\_conversion\_openx\_case\_insensitive](#input\_data\_format\_conversion\_openx\_case\_insensitive) | When set to true, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them. | `bool` | `true` | no | | [data\_format\_conversion\_openx\_column\_to\_json\_key\_mappings](#input\_data\_format\_conversion\_openx\_column\_to\_json\_key\_mappings) | A map of column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. | `map(string)` | `null` | no | | [data\_format\_conversion\_openx\_convert\_dots\_to\_underscores](#input\_data\_format\_conversion\_openx\_convert\_dots\_to\_underscores) | Specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. | `bool` | `false` | no | @@ -931,132 +931,132 @@ No modules. | [data\_format\_conversion\_orc\_padding\_tolerance](#input\_data\_format\_conversion\_orc\_padding\_tolerance) | A float between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. | `number` | `0.05` | no | | [data\_format\_conversion\_orc\_row\_index\_stripe](#input\_data\_format\_conversion\_orc\_row\_index\_stripe) | The number of rows between index entries. | `number` | `10000` | no | | [data\_format\_conversion\_orc\_stripe\_size](#input\_data\_format\_conversion\_orc\_stripe\_size) | he number of bytes in each strip. | `number` | `67108864` | no | -| [data\_format\_conversion\_output\_format](#input\_data\_format\_conversion\_output\_format) | Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe | `string` | `"PARQUET"` | no | +| [data\_format\_conversion\_output\_format](#input\_data\_format\_conversion\_output\_format) | Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. | `string` | `"PARQUET"` | no | | [data\_format\_conversion\_parquet\_compression](#input\_data\_format\_conversion\_parquet\_compression) | The compression code to use over data blocks. | `string` | `"SNAPPY"` | no | | [data\_format\_conversion\_parquet\_dict\_compression](#input\_data\_format\_conversion\_parquet\_dict\_compression) | Indicates whether to enable dictionary compression. | `bool` | `false` | no | -| [data\_format\_conversion\_parquet\_max\_padding](#input\_data\_format\_conversion\_parquet\_max\_padding) | The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The value is in bytes | `number` | `0` | no | -| [data\_format\_conversion\_parquet\_page\_size](#input\_data\_format\_conversion\_parquet\_page\_size) | Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The value is in bytes | `number` | `1048576` | no | +| [data\_format\_conversion\_parquet\_max\_padding](#input\_data\_format\_conversion\_parquet\_max\_padding) | The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The value is in bytes. | `number` | `0` | no | +| [data\_format\_conversion\_parquet\_page\_size](#input\_data\_format\_conversion\_parquet\_page\_size) | Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The value is in bytes. | `number` | `1048576` | no | | [data\_format\_conversion\_parquet\_writer\_version](#input\_data\_format\_conversion\_parquet\_writer\_version) | Indicates the version of row format to output. | `string` | `"V1"` | no | -| [datadog\_endpoint\_type](#input\_datadog\_endpoint\_type) | Endpoint type to datadog destination | `string` | `"logs_eu"` | no | -| [destination](#input\_destination) | This is the destination to where the data is delivered | `string` | n/a | yes | -| [destination\_cross\_account](#input\_destination\_cross\_account) | Indicates if destination is in a different account. Only supported to Elasticsearch and OpenSearch | `bool` | `false` | no | -| [destination\_log\_group\_name](#input\_destination\_log\_group\_name) | The CloudWatch group name for destination logs | `string` | `null` | no | -| [destination\_log\_stream\_name](#input\_destination\_log\_stream\_name) | The CloudWatch log stream name for destination logs | `string` | `null` | no | +| [datadog\_endpoint\_type](#input\_datadog\_endpoint\_type) | Endpoint type to datadog destination. | `string` | `"logs_eu"` | no | +| [destination](#input\_destination) | This is the destination to where the data is delivered. | `string` | n/a | yes | +| [destination\_cross\_account](#input\_destination\_cross\_account) | Indicates if destination is in a different account. Only supported to Elasticsearch and OpenSearch. | `bool` | `false` | no | +| [destination\_log\_group\_name](#input\_destination\_log\_group\_name) | The CloudWatch group name for destination logs. | `string` | `null` | no | +| [destination\_log\_stream\_name](#input\_destination\_log\_stream\_name) | The CloudWatch log stream name for destination logs. | `string` | `null` | no | | [dynamic\_partition\_append\_delimiter\_to\_record](#input\_dynamic\_partition\_append\_delimiter\_to\_record) | To configure your delivery stream to add a new line delimiter between records in objects that are delivered to Amazon S3. | `bool` | `false` | no | -| [dynamic\_partition\_enable\_record\_deaggregation](#input\_dynamic\_partition\_enable\_record\_deaggregation) | Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter | `bool` | `false` | no | +| [dynamic\_partition\_enable\_record\_deaggregation](#input\_dynamic\_partition\_enable\_record\_deaggregation) | Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter. | `bool` | `false` | no | | [dynamic\_partition\_metadata\_extractor\_query](#input\_dynamic\_partition\_metadata\_extractor\_query) | Dynamic Partition JQ query. | `string` | `null` | no | -| [dynamic\_partition\_record\_deaggregation\_delimiter](#input\_dynamic\_partition\_record\_deaggregation\_delimiter) | Specifies the delimiter to be used for parsing through the records in the delivery stream and deaggregating them | `string` | `null` | no | -| [dynamic\_partition\_record\_deaggregation\_type](#input\_dynamic\_partition\_record\_deaggregation\_type) | Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter | `string` | `"JSON"` | no | -| [dynamic\_partitioning\_retry\_duration](#input\_dynamic\_partitioning\_retry\_duration) | Total amount of seconds Firehose spends on retries | `number` | `300` | no | -| [dynatrace\_api\_url](#input\_dynatrace\_api\_url) | API URL to Dynatrace destination | `string` | `null` | no | -| [dynatrace\_endpoint\_location](#input\_dynatrace\_endpoint\_location) | Endpoint Location to Dynatrace destination | `string` | `"eu"` | no | -| [elasticsearch\_domain\_arn](#input\_elasticsearch\_domain\_arn) | The ARN of the Amazon ES domain. The pattern needs to be arn:.* | `string` | `null` | no | -| [elasticsearch\_index\_name](#input\_elasticsearch\_index\_name) | The Elasticsearch index name | `string` | `null` | no | -| [elasticsearch\_index\_rotation\_period](#input\_elasticsearch\_index\_rotation\_period) | The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data | `string` | `"OneDay"` | no | -| [elasticsearch\_retry\_duration](#input\_elasticsearch\_retry\_duration) | The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt | `string` | `300` | no | -| [elasticsearch\_type\_name](#input\_elasticsearch\_type\_name) | The Elasticsearch type name with maximum length of 100 characters | `string` | `null` | no | +| [dynamic\_partition\_record\_deaggregation\_delimiter](#input\_dynamic\_partition\_record\_deaggregation\_delimiter) | Specifies the delimiter to be used for parsing through the records in the delivery stream and deaggregating them. | `string` | `null` | no | +| [dynamic\_partition\_record\_deaggregation\_type](#input\_dynamic\_partition\_record\_deaggregation\_type) | Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter. | `string` | `"JSON"` | no | +| [dynamic\_partitioning\_retry\_duration](#input\_dynamic\_partitioning\_retry\_duration) | Total amount of seconds Firehose spends on retries. | `number` | `300` | no | +| [dynatrace\_api\_url](#input\_dynatrace\_api\_url) | API URL to Dynatrace destination. | `string` | `null` | no | +| [dynatrace\_endpoint\_location](#input\_dynatrace\_endpoint\_location) | Endpoint Location to Dynatrace destination. | `string` | `"eu"` | no | +| [elasticsearch\_domain\_arn](#input\_elasticsearch\_domain\_arn) | The ARN of the Amazon ES domain. The pattern needs to be arn:.*. | `string` | `null` | no | +| [elasticsearch\_index\_name](#input\_elasticsearch\_index\_name) | The Elasticsearch index name. | `string` | `null` | no | +| [elasticsearch\_index\_rotation\_period](#input\_elasticsearch\_index\_rotation\_period) | The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. | `string` | `"OneDay"` | no | +| [elasticsearch\_retry\_duration](#input\_elasticsearch\_retry\_duration) | The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. | `string` | `300` | no | +| [elasticsearch\_type\_name](#input\_elasticsearch\_type\_name) | The Elasticsearch type name with maximum length of 100 characters. | `string` | `null` | no | | [enable\_data\_format\_conversion](#input\_enable\_data\_format\_conversion) | Set it to true if you want to disable format conversion. | `bool` | `false` | no | -| [enable\_destination\_log](#input\_enable\_destination\_log) | The CloudWatch Logging Options for the delivery stream | `bool` | `true` | no | -| [enable\_dynamic\_partitioning](#input\_enable\_dynamic\_partitioning) | Enables or disables dynamic partitioning | `bool` | `false` | no | -| [enable\_lambda\_transform](#input\_enable\_lambda\_transform) | Set it to true to enable data transformation with lambda | `bool` | `false` | no | -| [enable\_s3\_backup](#input\_enable\_s3\_backup) | The Amazon S3 backup mode | `bool` | `false` | no | +| [enable\_destination\_log](#input\_enable\_destination\_log) | The CloudWatch Logging Options for the delivery stream. | `bool` | `true` | no | +| [enable\_dynamic\_partitioning](#input\_enable\_dynamic\_partitioning) | Enables or disables dynamic partitioning. | `bool` | `false` | no | +| [enable\_lambda\_transform](#input\_enable\_lambda\_transform) | Set it to true to enable data transformation with lambda. | `bool` | `false` | no | +| [enable\_s3\_backup](#input\_enable\_s3\_backup) | The Amazon S3 backup mode. | `bool` | `false` | no | | [enable\_s3\_encryption](#input\_enable\_s3\_encryption) | Indicates if want use encryption in S3 bucket. | `bool` | `false` | no | -| [enable\_sse](#input\_enable\_sse) | Whether to enable encryption at rest. Only makes sense when source is Direct Put | `bool` | `false` | no | +| [enable\_sse](#input\_enable\_sse) | Whether to enable encryption at rest. Only makes sense when source is Direct Put. | `bool` | `false` | no | | [enable\_vpc](#input\_enable\_vpc) | Indicates if destination is configured in VPC. Supports Elasticsearch and Opensearch destinations. | `bool` | `false` | no | | [firehose\_role](#input\_firehose\_role) | IAM role ARN attached to the Kinesis Firehose Stream. | `string` | `null` | no | -| [honeycomb\_api\_host](#input\_honeycomb\_api\_host) | If you use a Secure Tenancy or other proxy, put its schema://host[:port] here | `string` | `"https://api.honeycomb.io"` | no | -| [honeycomb\_dataset\_name](#input\_honeycomb\_dataset\_name) | Your Honeycomb dataset name to Honeycomb destination | `string` | `null` | no | -| [http\_endpoint\_access\_key](#input\_http\_endpoint\_access\_key) | The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination | `string` | `null` | no | -| [http\_endpoint\_enable\_request\_configuration](#input\_http\_endpoint\_enable\_request\_configuration) | The request configuration | `bool` | `false` | no | -| [http\_endpoint\_name](#input\_http\_endpoint\_name) | The HTTP endpoint name | `string` | `null` | no | +| [honeycomb\_api\_host](#input\_honeycomb\_api\_host) | If you use a Secure Tenancy or other proxy, put its schema://host[:port] here. | `string` | `"https://api.honeycomb.io"` | no | +| [honeycomb\_dataset\_name](#input\_honeycomb\_dataset\_name) | Your Honeycomb dataset name to Honeycomb destination. | `string` | `null` | no | +| [http\_endpoint\_access\_key](#input\_http\_endpoint\_access\_key) | The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination. | `string` | `null` | no | +| [http\_endpoint\_enable\_request\_configuration](#input\_http\_endpoint\_enable\_request\_configuration) | The request configuration. | `bool` | `false` | no | +| [http\_endpoint\_name](#input\_http\_endpoint\_name) | The HTTP endpoint name. | `string` | `null` | no | | [http\_endpoint\_request\_configuration\_common\_attributes](#input\_http\_endpoint\_request\_configuration\_common\_attributes) | Describes the metadata sent to the HTTP endpoint destination. The variable is list. Each element is map with two keys , name and value, that corresponds to common attribute name and value | `list(map(string))` | `[]` | no | -| [http\_endpoint\_request\_configuration\_content\_encoding](#input\_http\_endpoint\_request\_configuration\_content\_encoding) | Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination | `string` | `"GZIP"` | no | -| [http\_endpoint\_retry\_duration](#input\_http\_endpoint\_retry\_duration) | Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt | `number` | `300` | no | -| [http\_endpoint\_url](#input\_http\_endpoint\_url) | The HTTP endpoint URL to which Kinesis Firehose sends your data | `string` | `null` | no | -| [input\_source](#input\_input\_source) | This is the kinesis firehose source | `string` | `"direct-put"` | no | -| [kinesis\_source\_is\_encrypted](#input\_kinesis\_source\_is\_encrypted) | Indicates if Kinesis data stream source is encrypted | `bool` | `false` | no | -| [kinesis\_source\_kms\_arn](#input\_kinesis\_source\_kms\_arn) | Kinesis Source KMS Key to add Firehose role to decrypt the records | `string` | `null` | no | -| [kinesis\_source\_role\_arn](#input\_kinesis\_source\_role\_arn) | The ARN of the role that provides access to the source Kinesis stream | `string` | `null` | no | -| [kinesis\_source\_stream\_arn](#input\_kinesis\_source\_stream\_arn) | The kinesis stream used as the source of the firehose delivery stream | `string` | `null` | no | +| [http\_endpoint\_request\_configuration\_content\_encoding](#input\_http\_endpoint\_request\_configuration\_content\_encoding) | Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. | `string` | `"GZIP"` | no | +| [http\_endpoint\_retry\_duration](#input\_http\_endpoint\_retry\_duration) | Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt. | `number` | `300` | no | +| [http\_endpoint\_url](#input\_http\_endpoint\_url) | The HTTP endpoint URL to which Kinesis Firehose sends your data. | `string` | `null` | no | +| [input\_source](#input\_input\_source) | This is the kinesis firehose source. | `string` | `"direct-put"` | no | +| [kinesis\_source\_is\_encrypted](#input\_kinesis\_source\_is\_encrypted) | Indicates if Kinesis data stream source is encrypted. | `bool` | `false` | no | +| [kinesis\_source\_kms\_arn](#input\_kinesis\_source\_kms\_arn) | Kinesis Source KMS Key to add Firehose role to decrypt the records. | `string` | `null` | no | +| [kinesis\_source\_role\_arn](#input\_kinesis\_source\_role\_arn) | The ARN of the role that provides access to the source Kinesis stream. | `string` | `null` | no | +| [kinesis\_source\_stream\_arn](#input\_kinesis\_source\_stream\_arn) | The kinesis stream used as the source of the firehose delivery stream. | `string` | `null` | no | | [kinesis\_source\_use\_existing\_role](#input\_kinesis\_source\_use\_existing\_role) | Indicates if want use the kinesis firehose role to kinesis data stream access. | `bool` | `true` | no | -| [logicmonitor\_account](#input\_logicmonitor\_account) | Account to use in Logic Monitor destination | `string` | `null` | no | -| [mongodb\_realm\_webhook\_url](#input\_mongodb\_realm\_webhook\_url) | Realm Webhook URL to use in MongoDB destination | `string` | `null` | no | -| [name](#input\_name) | A name to identify the stream. This is unique to the AWS account and region the Stream is created in | `string` | n/a | yes | -| [newrelic\_endpoint\_type](#input\_newrelic\_endpoint\_type) | Endpoint type to New Relic destination | `string` | `"logs_eu"` | no | +| [logicmonitor\_account](#input\_logicmonitor\_account) | Account to use in Logic Monitor destination. | `string` | `null` | no | +| [mongodb\_realm\_webhook\_url](#input\_mongodb\_realm\_webhook\_url) | Realm Webhook URL to use in MongoDB destination. | `string` | `null` | no | +| [name](#input\_name) | A name to identify the stream. This is unique to the AWS account and region the Stream is created in. | `string` | n/a | yes | +| [newrelic\_endpoint\_type](#input\_newrelic\_endpoint\_type) | Endpoint type to New Relic destination. | `string` | `"logs_eu"` | no | | [opensearch\_document\_id\_options](#input\_opensearch\_document\_id\_options) | The method for setting up document ID. | `string` | `"FIREHOSE_DEFAULT"` | no | | [opensearch\_domain\_arn](#input\_opensearch\_domain\_arn) | The ARN of the Amazon Opensearch domain. The pattern needs to be arn:.*. Conflicts with cluster\_endpoint. | `string` | `null` | no | | [opensearch\_index\_name](#input\_opensearch\_index\_name) | The Opensearch (And OpenSearch Serverless) index name. | `string` | `null` | no | -| [opensearch\_index\_rotation\_period](#input\_opensearch\_index\_rotation\_period) | The Opensearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data | `string` | `"OneDay"` | no | +| [opensearch\_index\_rotation\_period](#input\_opensearch\_index\_rotation\_period) | The Opensearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. | `string` | `"OneDay"` | no | | [opensearch\_retry\_duration](#input\_opensearch\_retry\_duration) | After an initial failure to deliver to Amazon OpenSearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. | `string` | `300` | no | | [opensearch\_type\_name](#input\_opensearch\_type\_name) | The opensearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch\_1.1. TypeName must be empty. | `string` | `null` | no | | [opensearch\_vpc\_create\_service\_linked\_role](#input\_opensearch\_vpc\_create\_service\_linked\_role) | Set it to True if want create Opensearch Service Linked Role to Access VPC. | `bool` | `false` | no | | [opensearchserverless\_collection\_arn](#input\_opensearchserverless\_collection\_arn) | The ARN of the Amazon Opensearch Serverless Collection. The pattern needs to be arn:.*. | `string` | `null` | no | | [opensearchserverless\_collection\_endpoint](#input\_opensearchserverless\_collection\_endpoint) | The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service. | `string` | `null` | no | -| [policy\_path](#input\_policy\_path) | Path of policies to that should be added to IAM role for Kinesis Firehose Stream | `string` | `null` | no | -| [redshift\_cluster\_endpoint](#input\_redshift\_cluster\_endpoint) | The redshift endpoint | `string` | `null` | no | -| [redshift\_cluster\_identifier](#input\_redshift\_cluster\_identifier) | Redshift Cluster identifier. Necessary to associate the iam role to cluster | `string` | `null` | no | -| [redshift\_copy\_options](#input\_redshift\_copy\_options) | Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter | `string` | `null` | no | -| [redshift\_data\_table\_columns](#input\_redshift\_data\_table\_columns) | The data table columns that will be targeted by the copy command | `string` | `null` | no | -| [redshift\_database\_name](#input\_redshift\_database\_name) | The redshift database name | `string` | `null` | no | -| [redshift\_password](#input\_redshift\_password) | The password for the redshift username above | `string` | `null` | no | -| [redshift\_retry\_duration](#input\_redshift\_retry\_duration) | The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt | `string` | `3600` | no | -| [redshift\_table\_name](#input\_redshift\_table\_name) | The name of the table in the redshift cluster that the s3 bucket will copy to | `string` | `null` | no | -| [redshift\_username](#input\_redshift\_username) | The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions | `string` | `null` | no | -| [role\_description](#input\_role\_description) | Description of IAM role to use for Kinesis Firehose Stream | `string` | `null` | no | -| [role\_force\_detach\_policies](#input\_role\_force\_detach\_policies) | Specifies to force detaching any policies the IAM role has before destroying it | `bool` | `true` | no | -| [role\_name](#input\_role\_name) | Name of IAM role to use for Kinesis Firehose Stream | `string` | `null` | no | -| [role\_path](#input\_role\_path) | Path of IAM role to use for Kinesis Firehose Stream | `string` | `null` | no | -| [role\_permissions\_boundary](#input\_role\_permissions\_boundary) | The ARN of the policy that is used to set the permissions boundary for the IAM role used by Kinesis Firehose Stream | `string` | `null` | no | -| [role\_tags](#input\_role\_tags) | A map of tags to assign to IAM role | `map(string)` | `{}` | no | -| [s3\_backup\_bucket\_arn](#input\_s3\_backup\_bucket\_arn) | The ARN of the S3 backup bucket | `string` | `null` | no | +| [policy\_path](#input\_policy\_path) | Path of policies to that should be added to IAM role for Kinesis Firehose Stream. | `string` | `null` | no | +| [redshift\_cluster\_endpoint](#input\_redshift\_cluster\_endpoint) | The redshift endpoint. | `string` | `null` | no | +| [redshift\_cluster\_identifier](#input\_redshift\_cluster\_identifier) | Redshift Cluster identifier. Necessary to associate the iam role to cluster. | `string` | `null` | no | +| [redshift\_copy\_options](#input\_redshift\_copy\_options) | Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. | `string` | `null` | no | +| [redshift\_data\_table\_columns](#input\_redshift\_data\_table\_columns) | The data table columns that will be targeted by the copy command. | `string` | `null` | no | +| [redshift\_database\_name](#input\_redshift\_database\_name) | The redshift database name. | `string` | `null` | no | +| [redshift\_password](#input\_redshift\_password) | The password for the redshift username above. | `string` | `null` | no | +| [redshift\_retry\_duration](#input\_redshift\_retry\_duration) | The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. | `string` | `3600` | no | +| [redshift\_table\_name](#input\_redshift\_table\_name) | The name of the table in the redshift cluster that the s3 bucket will copy to. | `string` | `null` | no | +| [redshift\_username](#input\_redshift\_username) | The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. | `string` | `null` | no | +| [role\_description](#input\_role\_description) | Description of IAM role to use for Kinesis Firehose Stream. | `string` | `null` | no | +| [role\_force\_detach\_policies](#input\_role\_force\_detach\_policies) | Specifies to force detaching any policies the IAM role has before destroying it. | `bool` | `true` | no | +| [role\_name](#input\_role\_name) | Name of IAM role to use for Kinesis Firehose Stream. | `string` | `null` | no | +| [role\_path](#input\_role\_path) | Path of IAM role to use for Kinesis Firehose Stream. | `string` | `null` | no | +| [role\_permissions\_boundary](#input\_role\_permissions\_boundary) | The ARN of the policy that is used to set the permissions boundary for the IAM role used by Kinesis Firehose Stream. | `string` | `null` | no | +| [role\_tags](#input\_role\_tags) | A map of tags to assign to IAM role. | `map(string)` | `{}` | no | +| [s3\_backup\_bucket\_arn](#input\_s3\_backup\_bucket\_arn) | The ARN of the S3 backup bucket. | `string` | `null` | no | | [s3\_backup\_buffering\_interval](#input\_s3\_backup\_buffering\_interval) | Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. | `number` | `300` | no | | [s3\_backup\_buffering\_size](#input\_s3\_backup\_buffering\_size) | Buffer incoming data to the specified size, in MBs, before delivering it to the destination. | `number` | `5` | no | -| [s3\_backup\_compression](#input\_s3\_backup\_compression) | The compression format | `string` | `"UNCOMPRESSED"` | no | -| [s3\_backup\_create\_cw\_log\_group](#input\_s3\_backup\_create\_cw\_log\_group) | Enables or disables the cloudwatch log group creation | `bool` | `true` | no | +| [s3\_backup\_compression](#input\_s3\_backup\_compression) | The compression format. | `string` | `"UNCOMPRESSED"` | no | +| [s3\_backup\_create\_cw\_log\_group](#input\_s3\_backup\_create\_cw\_log\_group) | Enables or disables the cloudwatch log group creation. | `bool` | `true` | no | | [s3\_backup\_enable\_encryption](#input\_s3\_backup\_enable\_encryption) | Indicates if want enable KMS Encryption in S3 Backup Bucket. | `bool` | `false` | no | -| [s3\_backup\_enable\_log](#input\_s3\_backup\_enable\_log) | Enables or disables the logging | `bool` | `true` | no | -| [s3\_backup\_error\_output\_prefix](#input\_s3\_backup\_error\_output\_prefix) | Prefix added to failed records before writing them to S3 | `string` | `null` | no | +| [s3\_backup\_enable\_log](#input\_s3\_backup\_enable\_log) | Enables or disables the logging. | `bool` | `true` | no | +| [s3\_backup\_error\_output\_prefix](#input\_s3\_backup\_error\_output\_prefix) | Prefix added to failed records before writing them to S3. | `string` | `null` | no | | [s3\_backup\_kms\_key\_arn](#input\_s3\_backup\_kms\_key\_arn) | Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used. | `string` | `null` | no | -| [s3\_backup\_log\_group\_name](#input\_s3\_backup\_log\_group\_name) | he CloudWatch group name for logging | `string` | `null` | no | -| [s3\_backup\_log\_stream\_name](#input\_s3\_backup\_log\_stream\_name) | The CloudWatch log stream name for logging | `string` | `null` | no | -| [s3\_backup\_mode](#input\_s3\_backup\_mode) | Defines how documents should be delivered to Amazon S3. Used to elasticsearch, opensearch, splunk, http configurations. For S3 and Redshift use enable\_s3\_backup | `string` | `"FailedOnly"` | no | -| [s3\_backup\_prefix](#input\_s3\_backup\_prefix) | The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket | `string` | `null` | no | +| [s3\_backup\_log\_group\_name](#input\_s3\_backup\_log\_group\_name) | he CloudWatch group name for logging. | `string` | `null` | no | +| [s3\_backup\_log\_stream\_name](#input\_s3\_backup\_log\_stream\_name) | The CloudWatch log stream name for logging. | `string` | `null` | no | +| [s3\_backup\_mode](#input\_s3\_backup\_mode) | Defines how documents should be delivered to Amazon S3. Used to elasticsearch, opensearch, splunk, http configurations. For S3 and Redshift use enable\_s3\_backup. | `string` | `"FailedOnly"` | no | +| [s3\_backup\_prefix](#input\_s3\_backup\_prefix) | The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. | `string` | `null` | no | | [s3\_backup\_role\_arn](#input\_s3\_backup\_role\_arn) | The role that Kinesis Data Firehose can use to access S3 Backup. | `string` | `null` | no | | [s3\_backup\_use\_existing\_role](#input\_s3\_backup\_use\_existing\_role) | Indicates if want use the kinesis firehose role to s3 backup bucket access. | `bool` | `true` | no | -| [s3\_bucket\_arn](#input\_s3\_bucket\_arn) | The ARN of the S3 destination bucket | `string` | `null` | no | -| [s3\_compression\_format](#input\_s3\_compression\_format) | The compression format | `string` | `"UNCOMPRESSED"` | no | +| [s3\_bucket\_arn](#input\_s3\_bucket\_arn) | The ARN of the S3 destination bucket. | `string` | `null` | no | +| [s3\_compression\_format](#input\_s3\_compression\_format) | The compression format. | `string` | `"UNCOMPRESSED"` | no | | [s3\_configuration\_buffering\_interval](#input\_s3\_configuration\_buffering\_interval) | Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. | `number` | `300` | no | | [s3\_configuration\_buffering\_size](#input\_s3\_configuration\_buffering\_size) | Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5. We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher. | `number` | `5` | no | -| [s3\_cross\_account](#input\_s3\_cross\_account) | Indicates if S3 bucket destination is in a different account | `bool` | `false` | no | +| [s3\_cross\_account](#input\_s3\_cross\_account) | Indicates if S3 bucket destination is in a different account. | `bool` | `false` | no | | [s3\_error\_output\_prefix](#input\_s3\_error\_output\_prefix) | Prefix added to failed records before writing them to S3. This prefix appears immediately following the bucket name. | `string` | `null` | no | -| [s3\_kms\_key\_arn](#input\_s3\_kms\_key\_arn) | Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used | `string` | `null` | no | -| [s3\_own\_bucket](#input\_s3\_own\_bucket) | Indicates if you own the bucket. If not, will be configure permissions to grants the bucket owner full access to the objects delivered by Kinesis Data Firehose | `bool` | `true` | no | -| [s3\_prefix](#input\_s3\_prefix) | The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket | `string` | `null` | no | -| [splunk\_hec\_acknowledgment\_timeout](#input\_splunk\_hec\_acknowledgment\_timeout) | The amount of time, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data | `number` | `600` | no | -| [splunk\_hec\_endpoint](#input\_splunk\_hec\_endpoint) | The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data | `string` | `null` | no | +| [s3\_kms\_key\_arn](#input\_s3\_kms\_key\_arn) | Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used. | `string` | `null` | no | +| [s3\_own\_bucket](#input\_s3\_own\_bucket) | Indicates if you own the bucket. If not, will be configure permissions to grants the bucket owner full access to the objects delivered by Kinesis Data Firehose. | `bool` | `true` | no | +| [s3\_prefix](#input\_s3\_prefix) | The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket. | `string` | `null` | no | +| [splunk\_hec\_acknowledgment\_timeout](#input\_splunk\_hec\_acknowledgment\_timeout) | The amount of time, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. | `number` | `600` | no | +| [splunk\_hec\_endpoint](#input\_splunk\_hec\_endpoint) | The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. | `string` | `null` | no | | [splunk\_hec\_endpoint\_type](#input\_splunk\_hec\_endpoint\_type) | The HEC endpoint type | `string` | `"Raw"` | no | -| [splunk\_hec\_token](#input\_splunk\_hec\_token) | The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint | `string` | `null` | no | -| [splunk\_retry\_duration](#input\_splunk\_retry\_duration) | After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt) | `number` | `300` | no | -| [sse\_kms\_key\_arn](#input\_sse\_kms\_key\_arn) | Amazon Resource Name (ARN) of the encryption key | `string` | `null` | no | +| [splunk\_hec\_token](#input\_splunk\_hec\_token) | The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. | `string` | `null` | no | +| [splunk\_retry\_duration](#input\_splunk\_retry\_duration) | After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). | `number` | `300` | no | +| [sse\_kms\_key\_arn](#input\_sse\_kms\_key\_arn) | Amazon Resource Name (ARN) of the encryption key. | `string` | `null` | no | | [sse\_kms\_key\_type](#input\_sse\_kms\_key\_type) | Type of encryption key. | `string` | `"AWS_OWNED_CMK"` | no | -| [sumologic\_data\_type](#input\_sumologic\_data\_type) | Data Type to use in Sumo Logic destination | `string` | `"log"` | no | -| [sumologic\_deployment\_name](#input\_sumologic\_deployment\_name) | Deployment Name to use in Sumo Logic destination | `string` | `null` | no | +| [sumologic\_data\_type](#input\_sumologic\_data\_type) | Data Type to use in Sumo Logic destination. | `string` | `"log"` | no | +| [sumologic\_deployment\_name](#input\_sumologic\_deployment\_name) | Deployment Name to use in Sumo Logic destination. | `string` | `null` | no | | [tags](#input\_tags) | A map of tags to assign to resources. | `map(string)` | `{}` | no | -| [transform\_lambda\_arn](#input\_transform\_lambda\_arn) | Lambda ARN to Transform source records | `string` | `null` | no | +| [transform\_lambda\_arn](#input\_transform\_lambda\_arn) | Lambda ARN to Transform source records. | `string` | `null` | no | | [transform\_lambda\_buffer\_interval](#input\_transform\_lambda\_buffer\_interval) | The period of time during which Kinesis Data Firehose buffers incoming data before invoking the AWS Lambda function. The AWS Lambda function is invoked once the value of the buffer size or the buffer interval is reached. | `number` | `60` | no | | [transform\_lambda\_buffer\_size](#input\_transform\_lambda\_buffer\_size) | The AWS Lambda function has a 6 MB invocation payload quota. Your data can expand in size after it's processed by the AWS Lambda function. A smaller buffer size allows for more room should the data expand after processing. | `number` | `3` | no | -| [transform\_lambda\_number\_retries](#input\_transform\_lambda\_number\_retries) | Number of retries for AWS Transformation lambda | `number` | `3` | no | -| [transform\_lambda\_role\_arn](#input\_transform\_lambda\_role\_arn) | The ARN of the role to execute the transform lambda. If null use the Firehose Stream role | `string` | `null` | no | -| [vpc\_create\_destination\_security\_group](#input\_vpc\_create\_destination\_security\_group) | Indicates if want create destination security group to associate to firehose destinations | `bool` | `false` | no | -| [vpc\_create\_security\_group](#input\_vpc\_create\_security\_group) | Indicates if want create security group to associate to kinesis firehose | `bool` | `false` | no | +| [transform\_lambda\_number\_retries](#input\_transform\_lambda\_number\_retries) | Number of retries for AWS Transformation lambda. | `number` | `3` | no | +| [transform\_lambda\_role\_arn](#input\_transform\_lambda\_role\_arn) | The ARN of the role to execute the transform lambda. If null use the Firehose Stream role. | `string` | `null` | no | +| [vpc\_create\_destination\_security\_group](#input\_vpc\_create\_destination\_security\_group) | Indicates if want create destination security group to associate to firehose destinations. | `bool` | `false` | no | +| [vpc\_create\_security\_group](#input\_vpc\_create\_security\_group) | Indicates if want create security group to associate to kinesis firehose. | `bool` | `false` | no | | [vpc\_role\_arn](#input\_vpc\_role\_arn) | The ARN of the IAM role to be assumed by Firehose for calling the Amazon EC2 configuration API and for creating network interfaces. Supports Elasticsearch and Opensearch destinations. | `string` | `null` | no | -| [vpc\_security\_group\_destination\_configure\_existing](#input\_vpc\_security\_group\_destination\_configure\_existing) | Indicates if want configure an existing destination security group with the necessary rules | `bool` | `false` | no | -| [vpc\_security\_group\_destination\_ids](#input\_vpc\_security\_group\_destination\_ids) | A list of security group IDs associated to destinations to allow firehose traffic | `list(string)` | `null` | no | -| [vpc\_security\_group\_destination\_vpc\_id](#input\_vpc\_security\_group\_destination\_vpc\_id) | VPC ID to create the destination security group. Only supported to Redshift and splunk destinations | `string` | `null` | no | -| [vpc\_security\_group\_firehose\_configure\_existing](#input\_vpc\_security\_group\_firehose\_configure\_existing) | Indicates if want configure an existing firehose security group with the necessary rules | `bool` | `false` | no | +| [vpc\_security\_group\_destination\_configure\_existing](#input\_vpc\_security\_group\_destination\_configure\_existing) | Indicates if want configure an existing destination security group with the necessary rules. | `bool` | `false` | no | +| [vpc\_security\_group\_destination\_ids](#input\_vpc\_security\_group\_destination\_ids) | A list of security group IDs associated to destinations to allow firehose traffic. | `list(string)` | `null` | no | +| [vpc\_security\_group\_destination\_vpc\_id](#input\_vpc\_security\_group\_destination\_vpc\_id) | VPC ID to create the destination security group. Only supported to Redshift and splunk destinations. | `string` | `null` | no | +| [vpc\_security\_group\_firehose\_configure\_existing](#input\_vpc\_security\_group\_firehose\_configure\_existing) | Indicates if want configure an existing firehose security group with the necessary rules. | `bool` | `false` | no | | [vpc\_security\_group\_firehose\_ids](#input\_vpc\_security\_group\_firehose\_ids) | A list of security group IDs to associate with Kinesis Firehose. | `list(string)` | `null` | no | | [vpc\_security\_group\_same\_as\_destination](#input\_vpc\_security\_group\_same\_as\_destination) | Indicates if the firehose security group is the same as destination. | `bool` | `true` | no | -| [vpc\_security\_group\_tags](#input\_vpc\_security\_group\_tags) | A map of tags to assign to security group | `map(string)` | `{}` | no | +| [vpc\_security\_group\_tags](#input\_vpc\_security\_group\_tags) | A map of tags to assign to security group. | `map(string)` | `{}` | no | | [vpc\_subnet\_ids](#input\_vpc\_subnet\_ids) | A list of subnet IDs to associate with Kinesis Firehose. Supports Elasticsearch and Opensearch destinations. | `list(string)` | `null` | no | | [vpc\_use\_existing\_role](#input\_vpc\_use\_existing\_role) | Indicates if want use the kinesis firehose role to VPC access. Supports Elasticsearch and Opensearch destinations. | `bool` | `true` | no | diff --git a/variables.tf b/variables.tf index 0702971..6a95401 100644 --- a/variables.tf +++ b/variables.tf @@ -1,16 +1,16 @@ variable "create" { - description = "Controls if kinesis firehose should be created (it affects almost all resources)" + description = "Controls if kinesis firehose should be created (it affects almost all resources)." type = bool default = true } variable "name" { - description = "A name to identify the stream. This is unique to the AWS account and region the Stream is created in" + description = "A name to identify the stream. This is unique to the AWS account and region the Stream is created in." type = string } variable "input_source" { - description = "This is the kinesis firehose source" + description = "This is the kinesis firehose source." type = string default = "direct-put" validation { @@ -20,7 +20,7 @@ variable "input_source" { } variable "destination" { - description = "This is the destination to where the data is delivered" + description = "This is the destination to where the data is delivered." type = string validation { error_message = "Please use a valid destination!" @@ -29,7 +29,7 @@ variable "destination" { } variable "create_role" { - description = "Controls whether IAM role for Kinesis Firehose Stream should be created" + description = "Controls whether IAM role for Kinesis Firehose Stream should be created." type = bool default = true } @@ -53,7 +53,7 @@ variable "buffering_size" { } variable "buffering_interval" { - description = "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination" + description = "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination." type = number default = 300 validation { @@ -63,19 +63,19 @@ variable "buffering_interval" { } variable "enable_lambda_transform" { - description = "Set it to true to enable data transformation with lambda" + description = "Set it to true to enable data transformation with lambda." type = bool default = false } variable "transform_lambda_arn" { - description = "Lambda ARN to Transform source records" + description = "Lambda ARN to Transform source records." type = string default = null } variable "transform_lambda_role_arn" { - description = "The ARN of the role to execute the transform lambda. If null use the Firehose Stream role" + description = "The ARN of the role to execute the transform lambda. If null use the Firehose Stream role." type = string default = null } @@ -101,7 +101,7 @@ variable "transform_lambda_buffer_interval" { } variable "transform_lambda_number_retries" { - description = "Number of retries for AWS Transformation lambda" + description = "Number of retries for AWS Transformation lambda." type = number default = 3 validation { @@ -131,19 +131,19 @@ variable "s3_configuration_buffering_interval" { } variable "enable_s3_backup" { - description = "The Amazon S3 backup mode" + description = "The Amazon S3 backup mode." type = bool default = false } variable "s3_backup_bucket_arn" { - description = "The ARN of the S3 backup bucket" + description = "The ARN of the S3 backup bucket." type = string default = null } variable "s3_backup_prefix" { - description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket" + description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket." type = string default = null } @@ -169,7 +169,7 @@ variable "s3_backup_buffering_interval" { } variable "s3_backup_compression" { - description = "The compression format" + description = "The compression format." type = string default = "UNCOMPRESSED" validation { @@ -179,7 +179,7 @@ variable "s3_backup_compression" { } variable "s3_backup_error_output_prefix" { - description = "Prefix added to failed records before writing them to S3" + description = "Prefix added to failed records before writing them to S3." type = string default = null } @@ -209,31 +209,31 @@ variable "s3_backup_role_arn" { } variable "s3_backup_enable_log" { - description = "Enables or disables the logging" + description = "Enables or disables the logging." type = bool default = true } variable "s3_backup_create_cw_log_group" { - description = "Enables or disables the cloudwatch log group creation" + description = "Enables or disables the cloudwatch log group creation." type = bool default = true } variable "s3_backup_log_group_name" { - description = "he CloudWatch group name for logging" + description = "he CloudWatch group name for logging." type = string default = null } variable "s3_backup_log_stream_name" { - description = "The CloudWatch log stream name for logging" + description = "The CloudWatch log stream name for logging." type = string default = null } variable "s3_backup_mode" { - description = "Defines how documents should be delivered to Amazon S3. Used to elasticsearch, opensearch, splunk, http configurations. For S3 and Redshift use enable_s3_backup" + description = "Defines how documents should be delivered to Amazon S3. Used to elasticsearch, opensearch, splunk, http configurations. For S3 and Redshift use enable_s3_backup." type = string default = "FailedOnly" validation { @@ -243,25 +243,25 @@ variable "s3_backup_mode" { } variable "enable_destination_log" { - description = "The CloudWatch Logging Options for the delivery stream" + description = "The CloudWatch Logging Options for the delivery stream." type = bool default = true } variable "create_destination_cw_log_group" { - description = "Enables or disables the cloudwatch log group creation to destination" + description = "Enables or disables the cloudwatch log group creation to destination." type = bool default = true } variable "destination_log_group_name" { - description = "The CloudWatch group name for destination logs" + description = "The CloudWatch group name for destination logs." type = string default = null } variable "destination_log_stream_name" { - description = "The CloudWatch log stream name for destination logs" + description = "The CloudWatch log stream name for destination logs." type = string default = null } @@ -279,13 +279,13 @@ variable "cw_tags" { } variable "s3_bucket_arn" { - description = "The ARN of the S3 destination bucket" + description = "The ARN of the S3 destination bucket." type = string default = null } variable "s3_prefix" { - description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket" + description = "The YYYY/MM/DD/HH time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket." type = string default = null } @@ -303,13 +303,13 @@ variable "enable_s3_encryption" { } variable "s3_kms_key_arn" { - description = "Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used" + description = "Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will be used." type = string default = null } variable "s3_compression_format" { - description = "The compression format" + description = "The compression format." type = string default = "UNCOMPRESSED" validation { @@ -319,19 +319,19 @@ variable "s3_compression_format" { } variable "s3_own_bucket" { - description = "Indicates if you own the bucket. If not, will be configure permissions to grants the bucket owner full access to the objects delivered by Kinesis Data Firehose" + description = "Indicates if you own the bucket. If not, will be configure permissions to grants the bucket owner full access to the objects delivered by Kinesis Data Firehose." type = bool default = true } variable "s3_cross_account" { - description = "Indicates if S3 bucket destination is in a different account" + description = "Indicates if S3 bucket destination is in a different account." type = bool default = false } variable "destination_cross_account" { - description = "Indicates if destination is in a different account. Only supported to Elasticsearch and OpenSearch" + description = "Indicates if destination is in a different account. Only supported to Elasticsearch and OpenSearch." type = bool default = false } @@ -340,7 +340,7 @@ variable "destination_cross_account" { # Kinesis Source ###### variable "enable_sse" { - description = "Whether to enable encryption at rest. Only makes sense when source is Direct Put" + description = "Whether to enable encryption at rest. Only makes sense when source is Direct Put." type = bool default = false } @@ -356,19 +356,19 @@ variable "sse_kms_key_type" { } variable "sse_kms_key_arn" { - description = "Amazon Resource Name (ARN) of the encryption key" + description = "Amazon Resource Name (ARN) of the encryption key." type = string default = null } variable "kinesis_source_stream_arn" { - description = "The kinesis stream used as the source of the firehose delivery stream" + description = "The kinesis stream used as the source of the firehose delivery stream." type = string default = null } variable "kinesis_source_role_arn" { - description = "The ARN of the role that provides access to the source Kinesis stream" + description = "The ARN of the role that provides access to the source Kinesis stream." type = string default = null } @@ -380,13 +380,13 @@ variable "kinesis_source_use_existing_role" { } variable "kinesis_source_is_encrypted" { - description = "Indicates if Kinesis data stream source is encrypted" + description = "Indicates if Kinesis data stream source is encrypted." type = bool default = false } variable "kinesis_source_kms_arn" { - description = "Kinesis Source KMS Key to add Firehose role to decrypt the records" + description = "Kinesis Source KMS Key to add Firehose role to decrypt the records." type = string default = null } @@ -395,13 +395,13 @@ variable "kinesis_source_kms_arn" { # S3 Destination Configurations ###### variable "enable_dynamic_partitioning" { - description = "Enables or disables dynamic partitioning" + description = "Enables or disables dynamic partitioning." type = bool default = false } variable "dynamic_partitioning_retry_duration" { - description = "Total amount of seconds Firehose spends on retries" + description = "Total amount of seconds Firehose spends on retries." type = number default = 300 validation { @@ -423,13 +423,13 @@ variable "dynamic_partition_metadata_extractor_query" { } variable "dynamic_partition_enable_record_deaggregation" { - description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter" + description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter." type = bool default = false } variable "dynamic_partition_record_deaggregation_type" { - description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter" + description = "Data deaggregation is the process of parsing through the records in a delivery stream and separating the records based either on valid JSON or on the specified delimiter." type = string default = "JSON" validation { @@ -439,7 +439,7 @@ variable "dynamic_partition_record_deaggregation_type" { } variable "dynamic_partition_record_deaggregation_delimiter" { - description = "Specifies the delimiter to be used for parsing through the records in the delivery stream and deaggregating them" + description = "Specifies the delimiter to be used for parsing through the records in the delivery stream and deaggregating them." type = string default = null } @@ -469,7 +469,7 @@ variable "data_format_conversion_glue_role_arn" { } variable "data_format_conversion_glue_table_name" { - description = "Specifies the AWS Glue table that contains the column information that constitutes your data schema" + description = "Specifies the AWS Glue table that contains the column information that constitutes your data schema." type = string default = null } @@ -493,7 +493,7 @@ variable "data_format_conversion_glue_version_id" { } variable "data_format_conversion_input_format" { - description = "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe" + description = "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe." type = string default = "OpenX" validation { @@ -527,7 +527,7 @@ variable "data_format_conversion_hive_timestamps" { } variable "data_format_conversion_output_format" { - description = "Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe" + description = "Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe." type = string default = "PARQUET" validation { @@ -563,13 +563,13 @@ variable "data_format_conversion_parquet_dict_compression" { } variable "data_format_conversion_parquet_max_padding" { - description = "The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The value is in bytes" + description = "The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The value is in bytes." type = number default = 0 } variable "data_format_conversion_parquet_page_size" { - description = "Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The value is in bytes" + description = "Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The value is in bytes." type = number default = 1048576 validation { @@ -674,51 +674,51 @@ variable "data_format_conversion_orc_stripe_size" { # Redshift Destination Variables ###### variable "redshift_cluster_endpoint" { - description = "The redshift endpoint" + description = "The redshift endpoint." type = string default = null } variable "redshift_username" { - description = "The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions" + description = "The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions." type = string default = null sensitive = true } variable "redshift_password" { - description = "The password for the redshift username above" + description = "The password for the redshift username above." type = string default = null sensitive = true } variable "redshift_database_name" { - description = "The redshift database name" + description = "The redshift database name." type = string default = null } variable "redshift_table_name" { - description = "The name of the table in the redshift cluster that the s3 bucket will copy to" + description = "The name of the table in the redshift cluster that the s3 bucket will copy to." type = string default = null } variable "redshift_copy_options" { - description = "Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter" + description = "Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter." type = string default = null } variable "redshift_data_table_columns" { - description = "The data table columns that will be targeted by the copy command" + description = "The data table columns that will be targeted by the copy command." type = string default = null } variable "redshift_retry_duration" { - description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt" + description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt." type = string default = 3600 validation { @@ -728,13 +728,13 @@ variable "redshift_retry_duration" { } variable "redshift_cluster_identifier" { - description = "Redshift Cluster identifier. Necessary to associate the iam role to cluster" + description = "Redshift Cluster identifier. Necessary to associate the iam role to cluster." type = string default = null } variable "associate_role_to_redshift_cluster" { - description = "Set it to false if don't want the module associate the role to redshift cluster" + description = "Set it to false if don't want the module associate the role to redshift cluster." type = bool default = true } @@ -743,19 +743,19 @@ variable "associate_role_to_redshift_cluster" { # Elasticsearch Destination Variables ###### variable "elasticsearch_domain_arn" { - description = "The ARN of the Amazon ES domain. The pattern needs to be arn:.*" + description = "The ARN of the Amazon ES domain. The pattern needs to be arn:.*." type = string default = null } variable "elasticsearch_index_name" { - description = "The Elasticsearch index name" + description = "The Elasticsearch index name." type = string default = null } variable "elasticsearch_index_rotation_period" { - description = "The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data" + description = "The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data." type = string default = "OneDay" validation { @@ -765,13 +765,13 @@ variable "elasticsearch_index_rotation_period" { } variable "elasticsearch_type_name" { - description = "The Elasticsearch type name with maximum length of 100 characters" + description = "The Elasticsearch type name with maximum length of 100 characters." type = string default = null } variable "elasticsearch_retry_duration" { - description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt" + description = "The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt." type = string default = 300 validation { @@ -796,7 +796,7 @@ variable "opensearch_index_name" { } variable "opensearch_index_rotation_period" { - description = "The Opensearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data" + description = "The Opensearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data." type = string default = "OneDay" validation { @@ -888,43 +888,43 @@ variable "vpc_security_group_firehose_ids" { } variable "vpc_create_security_group" { - description = "Indicates if want create security group to associate to kinesis firehose" + description = "Indicates if want create security group to associate to kinesis firehose." type = bool default = false } variable "vpc_security_group_firehose_configure_existing" { - description = "Indicates if want configure an existing firehose security group with the necessary rules" + description = "Indicates if want configure an existing firehose security group with the necessary rules." type = bool default = false } variable "vpc_security_group_tags" { - description = "A map of tags to assign to security group" + description = "A map of tags to assign to security group." type = map(string) default = {} } variable "vpc_security_group_destination_ids" { - description = "A list of security group IDs associated to destinations to allow firehose traffic" + description = "A list of security group IDs associated to destinations to allow firehose traffic." type = list(string) default = null } variable "vpc_create_destination_security_group" { - description = "Indicates if want create destination security group to associate to firehose destinations" + description = "Indicates if want create destination security group to associate to firehose destinations." type = bool default = false } variable "vpc_security_group_destination_configure_existing" { - description = "Indicates if want configure an existing destination security group with the necessary rules" + description = "Indicates if want configure an existing destination security group with the necessary rules." type = bool default = false } variable "vpc_security_group_destination_vpc_id" { - description = "VPC ID to create the destination security group. Only supported to Redshift and splunk destinations" + description = "VPC ID to create the destination security group. Only supported to Redshift and splunk destinations." type = string default = null } @@ -932,20 +932,20 @@ variable "vpc_security_group_destination_vpc_id" { # Splunk Destination Variables ###### variable "splunk_hec_endpoint" { - description = "The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data" + description = "The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data." type = string default = null } variable "splunk_hec_token" { - description = "The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint" + description = "The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint." type = string default = null sensitive = true } variable "splunk_hec_acknowledgment_timeout" { - description = "The amount of time, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data" + description = "The amount of time, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data." type = number default = 600 validation { @@ -965,7 +965,7 @@ variable "splunk_hec_endpoint_type" { } variable "splunk_retry_duration" { - description = "After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt)" + description = "After an initial failure to deliver to Splunk, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt)." type = number default = 300 validation { @@ -978,26 +978,26 @@ variable "splunk_retry_duration" { # Http Endpoint Destination Variables ###### variable "http_endpoint_url" { - description = "The HTTP endpoint URL to which Kinesis Firehose sends your data" + description = "The HTTP endpoint URL to which Kinesis Firehose sends your data." type = string default = null } variable "http_endpoint_name" { - description = "The HTTP endpoint name" + description = "The HTTP endpoint name." type = string default = null } variable "http_endpoint_access_key" { - description = "The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination" + description = "The access key required for Kinesis Firehose to authenticate with the HTTP endpoint selected as the destination." type = string default = null sensitive = true } variable "http_endpoint_retry_duration" { - description = "Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt" + description = "Total amount of seconds Firehose spends on retries. This duration starts after the initial attempt fails, It does not include the time periods during which Firehose waits for acknowledgment from the specified destination after each attempt." type = number default = 300 validation { @@ -1007,13 +1007,13 @@ variable "http_endpoint_retry_duration" { } variable "http_endpoint_enable_request_configuration" { - description = "The request configuration" + description = "The request configuration." type = bool default = false } variable "http_endpoint_request_configuration_content_encoding" { - description = "Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination" + description = "Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination." type = string default = "GZIP" validation { @@ -1032,7 +1032,7 @@ variable "http_endpoint_request_configuration_common_attributes" { # Datadog Destination Variables ###### variable "datadog_endpoint_type" { - description = "Endpoint type to datadog destination" + description = "Endpoint type to datadog destination." type = string default = "logs_eu" validation { @@ -1045,7 +1045,7 @@ variable "datadog_endpoint_type" { # New Relic Destination Variables ###### variable "newrelic_endpoint_type" { - description = "Endpoint type to New Relic destination" + description = "Endpoint type to New Relic destination." type = string default = "logs_eu" validation { @@ -1058,7 +1058,7 @@ variable "newrelic_endpoint_type" { # Dynatrace Destination Variables ###### variable "dynatrace_endpoint_location" { - description = "Endpoint Location to Dynatrace destination" + description = "Endpoint Location to Dynatrace destination." type = string default = "eu" validation { @@ -1068,7 +1068,7 @@ variable "dynatrace_endpoint_location" { } variable "dynatrace_api_url" { - description = "API URL to Dynatrace destination" + description = "API URL to Dynatrace destination." type = string default = null } @@ -1079,12 +1079,12 @@ variable "dynatrace_api_url" { variable "honeycomb_api_host" { type = string default = "https://api.honeycomb.io" - description = "If you use a Secure Tenancy or other proxy, put its schema://host[:port] here" + description = "If you use a Secure Tenancy or other proxy, put its schema://host[:port] here." } variable "honeycomb_dataset_name" { type = string - description = "Your Honeycomb dataset name to Honeycomb destination" + description = "Your Honeycomb dataset name to Honeycomb destination." default = null } @@ -1093,7 +1093,7 @@ variable "honeycomb_dataset_name" { ###### variable "logicmonitor_account" { type = string - description = "Account to use in Logic Monitor destination" + description = "Account to use in Logic Monitor destination." default = null } @@ -1102,7 +1102,7 @@ variable "logicmonitor_account" { ###### variable "mongodb_realm_webhook_url" { type = string - description = "Realm Webhook URL to use in MongoDB destination" + description = "Realm Webhook URL to use in MongoDB destination." default = null } @@ -1111,13 +1111,13 @@ variable "mongodb_realm_webhook_url" { ###### variable "sumologic_deployment_name" { type = string - description = "Deployment Name to use in Sumo Logic destination" + description = "Deployment Name to use in Sumo Logic destination." default = null } variable "sumologic_data_type" { type = string - description = "Data Type to use in Sumo Logic destination" + description = "Data Type to use in Sumo Logic destination." default = "log" validation { error_message = "Please use a valid data type!" @@ -1129,7 +1129,7 @@ variable "sumologic_data_type" { # Coralogix Destination Variables ###### variable "coralogix_endpoint_location" { - description = "Endpoint Location to coralogix destination" + description = "Endpoint Location to coralogix destination." type = string default = "ireland" validation { @@ -1139,19 +1139,19 @@ variable "coralogix_endpoint_location" { } variable "coralogix_parameter_application_name" { - description = "By default, your delivery stream arn will be used as applicationName" + description = "By default, your delivery stream arn will be used as applicationName." type = string default = null } variable "coralogix_parameter_subsystem_name" { - description = "By default, your delivery stream name will be used as subsystemName" + description = "By default, your delivery stream name will be used as subsystemName." type = string default = null } variable "coralogix_parameter_use_dynamic_values" { - description = "To use dynamic values for applicationName and subsystemName" + description = "To use dynamic values for applicationName and subsystemName." type = bool default = false } @@ -1166,109 +1166,109 @@ variable "firehose_role" { } variable "role_name" { - description = "Name of IAM role to use for Kinesis Firehose Stream" + description = "Name of IAM role to use for Kinesis Firehose Stream." type = string default = null } variable "role_description" { - description = "Description of IAM role to use for Kinesis Firehose Stream" + description = "Description of IAM role to use for Kinesis Firehose Stream." type = string default = null } variable "role_path" { - description = "Path of IAM role to use for Kinesis Firehose Stream" + description = "Path of IAM role to use for Kinesis Firehose Stream." type = string default = null } variable "role_force_detach_policies" { - description = "Specifies to force detaching any policies the IAM role has before destroying it" + description = "Specifies to force detaching any policies the IAM role has before destroying it." type = bool default = true } variable "role_permissions_boundary" { - description = "The ARN of the policy that is used to set the permissions boundary for the IAM role used by Kinesis Firehose Stream" + description = "The ARN of the policy that is used to set the permissions boundary for the IAM role used by Kinesis Firehose Stream." type = string default = null } variable "role_tags" { - description = "A map of tags to assign to IAM role" + description = "A map of tags to assign to IAM role." type = map(string) default = {} } variable "policy_path" { - description = "Path of policies to that should be added to IAM role for Kinesis Firehose Stream" + description = "Path of policies to that should be added to IAM role for Kinesis Firehose Stream." type = string default = null } variable "create_application_role" { - description = "Set it to true to create role to be used by the source" + description = "Set it to true to create role to be used by the source." default = false type = bool } variable "application_role_name" { - description = "Name of IAM Application role to use for Kinesis Firehose Stream Source" + description = "Name of IAM Application role to use for Kinesis Firehose Stream Source." type = string default = null } variable "application_role_description" { - description = "Description of IAM Application role to use for Kinesis Firehose Stream Source" + description = "Description of IAM Application role to use for Kinesis Firehose Stream Source." type = string default = null } variable "application_role_path" { - description = "Path of IAM Application role to use for Kinesis Firehose Stream Source" + description = "Path of IAM Application role to use for Kinesis Firehose Stream Source." type = string default = null } variable "application_role_force_detach_policies" { - description = "Specifies to force detaching any policies the IAM Application role has before destroying it" + description = "Specifies to force detaching any policies the IAM Application role has before destroying it." type = bool default = true } variable "application_role_permissions_boundary" { - description = "The ARN of the policy that is used to set the permissions boundary for the IAM Application role used by Kinesis Firehose Stream Source" + description = "The ARN of the policy that is used to set the permissions boundary for the IAM Application role used by Kinesis Firehose Stream Source." type = string default = null } variable "application_role_tags" { - description = "A map of tags to assign to IAM Application role" + description = "A map of tags to assign to IAM Application role." type = map(string) default = {} } variable "configure_existing_application_role" { - description = "Set it to True if want use existing application role to add the firehose Policy" + description = "Set it to True if want use existing application role to add the firehose Policy." default = false type = bool } variable "create_application_role_policy" { - description = "Set it to true to create policy to the role used by the source" + description = "Set it to true to create policy to the role used by the source." default = false type = bool } variable "application_role_service_principal" { - description = "AWS Service Principal to assume application role" + description = "AWS Service Principal to assume application role." type = string default = null } variable "application_role_policy_actions" { - description = "List of Actions to Application Role Policy" + description = "List of Actions to Application Role Policy." type = list(string) default = [ "firehose:PutRecord",
"firehose:PutRecord",
"firehose:PutRecordBatch"
]