diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go new file mode 100644 index 000000000..2732bd64a --- /dev/null +++ b/digitalocean/database/resource_database_logsink.go @@ -0,0 +1,508 @@ +package database + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "strings" + + "github.com/digitalocean/godo" + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDigitalOceanDatabaseLogsinkCreate, + ReadContext: resourceDigitalOceanDatabaseLogsinkRead, + UpdateContext: resourceDigitalOceanDatabaseLogsinkUpdate, + DeleteContext: resourceDigitalOceanDatabaseLogsinkDelete, + Importer: &schema.ResourceImporter{ + State: resourceDigitalOceanDatabaseLogsinkImport, + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "sink_id": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + }, + "sink_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sink_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "rsyslog", + "elasticsearch", + "opensearch", + }, false), + }, + "rsyslog_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server": { + Type: schema.TypeString, + Required: true, + Description: "DNS name or IPv4 address of the rsyslog server", + }, + "port": { + Type: schema.TypeInt, + Required: true, + Description: "The internal port on which the rsyslog server is listening", + }, + "tls": { + Type: schema.TypeBool, + Required: true, + Description: "Use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it)", + }, + "format": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "rfc5424", + "rfc3164", + "custom", + }, false), + Description: "Message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom", + }, + "logline": { + Type: schema.TypeString, + Optional: true, + Description: "Conditional (required if format == custom). Syslog log line template for a custom format, supporting limited rsyslog style templating (using %tag%). Supported tags are: HOSTNAME, app-name, msg, msgid, pri, procid, structured-data, timestamp and timestamp:::date-rfc3339.", + }, + "sd": { + Type: schema.TypeString, + Optional: true, + Description: "Content of the structured data block of rfc5424 message", + }, + "ca": { + Type: schema.TypeString, + Optional: true, + Description: "PEM encoded CA certificate", + }, + "key": { + Type: schema.TypeString, + Optional: true, + Description: "(PEM format) client key if the server requires client authentication", + }, + "cert": { + Type: schema.TypeString, + Optional: true, + Description: "(PEM format) client cert to use", + }, + }, + }, + }, + "elasticsearch_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "Connection URL. Required for Elasticsearch", + }, + "index_prefix": { + Type: schema.TypeString, + Required: true, + Description: "Index prefix. Required for Elasticsearch", + }, + "index_days_max": { + Type: schema.TypeInt, + Optional: true, + Description: "Default 7 days. Maximum number of days of logs to keep", + }, + "timeout": { + Type: schema.TypeFloat, + Required: true, + Description: "Default 10 days. Required for Elasticsearch", + ValidateFunc: validation.FloatBetween(10, 120), + }, + "ca": { + Type: schema.TypeString, + Optional: true, + Description: "PEM encoded CA certificate", + }, + }, + }, + }, + "opensearch_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "Connection URL. Required for Opensearch", + }, + "index_prefix": { + Type: schema.TypeString, + Required: true, + Description: "Index prefix. Required for Opensearch", + }, + "index_days_max": { + Type: schema.TypeInt, + Optional: true, + Description: "Default 7 days. Maximum number of days of logs to keep", + }, + "timeout": { + Type: schema.TypeFloat, + Optional: true, + Description: "Default 10 days", + ValidateFunc: validation.FloatBetween(10, 120), + }, + "ca": { + Type: schema.TypeString, + Optional: true, + Description: "PEM encoded CA certificate", + }, + }, + }, + }, + }, + } +} + +func expandLogsinkRsyslogConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["server"]; ok { + logsinkConfigOpts.Server = v.(string) + } + if v, ok := configMap["port"]; ok { + logsinkConfigOpts.Port = v.(int) + } + if v, ok := configMap["tls"]; ok { + logsinkConfigOpts.TLS = v.(bool) + } + if v, ok := configMap["format"]; ok { + logsinkConfigOpts.Format = v.(string) + } + if v, ok := configMap["logline"]; ok { + logsinkConfigOpts.Logline = v.(string) + } + if v, ok := configMap["sd"]; ok { + logsinkConfigOpts.SD = v.(string) + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + if v, ok := configMap["key"]; ok { + logsinkConfigOpts.Key = v.(string) + } + if v, ok := configMap["cert"]; ok { + logsinkConfigOpts.Cert = v.(string) + } + + return logsinkConfigOpts +} + +func expandLogsinkElasticsearchConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["url"]; ok { + logsinkConfigOpts.URL = v.(string) + } + if v, ok := configMap["index_prefix"]; ok { + logsinkConfigOpts.IndexPrefix = v.(string) + } + if v, ok := configMap["index_days_max"]; ok { + logsinkConfigOpts.IndexDaysMax = v.(int) + } + if v, ok := configMap["timeout"]; ok { + if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { + logsinkConfigOpts.Timeout = float32(v.(float64)) + } + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + + return logsinkConfigOpts +} + +func expandLogsinkOpensearchConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["url"]; ok { + logsinkConfigOpts.URL = v.(string) + } + if v, ok := configMap["index_prefix"]; ok { + logsinkConfigOpts.IndexPrefix = v.(string) + } + if v, ok := configMap["index_days_max"]; ok { + logsinkConfigOpts.IndexDaysMax = v.(int) + } + if v, ok := configMap["timeout"]; ok { + if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { + logsinkConfigOpts.Timeout = float32(v.(float64)) + } + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + + return logsinkConfigOpts +} + +func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + sinkType := d.Get("sink_type").(string) + opts := &godo.DatabaseCreateLogsinkRequest{ + Name: d.Get("sink_name").(string), + Type: d.Get("sink_type").(string), + } + + var iCfg *godo.DatabaseLogsinkConfig + + switch sinkType { + case "rsyslog": + if v, ok := d.GetOk("rsyslog_config"); ok { + iCfg = expandLogsinkRsyslogConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: rsyslog_config is required when type is rsyslog") + } + case "elasticsearch": + if v, ok := d.GetOk("elasticsearch_config"); ok { + iCfg = expandLogsinkElasticsearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: elasticsearch_config is required when type is elasticsearch") + } + case "opensearch": + if v, ok := d.GetOk("opensearch_config"); ok { + iCfg = expandLogsinkOpensearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: opensearch_config is required when type is opensearch") + } + } + + opts.Config = iCfg + if opts.Config == nil { + return diag.Errorf("Error creating database logsink: config is required") + } + + logsink, _, err := client.Databases.CreateLogsink(context.Background(), clusterID, opts) + if err != nil { + return diag.Errorf("Error creating database logsink: %s", err) + } + + logsinkIDFormat := makeDatabaseLogsinkID(clusterID, logsink.ID) + d.SetId(logsinkIDFormat) + d.Set("sink_id", logsink.ID) + + return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + sinkID := d.Get("sink_id").(string) + opts := &godo.DatabaseUpdateLogsinkRequest{} + + sinkType := d.Get("sink_type").(string) + + var iCfg *godo.DatabaseLogsinkConfig + + switch sinkType { + case "rsyslog": + if v, ok := d.GetOk("rsyslog_config"); ok { + iCfg = expandLogsinkRsyslogConfig(v.([]interface{})) + } else { + return diag.Errorf("Error updating database logsink: rsyslog_config is required when type is rsyslog") + } + case "elasticsearch": + if v, ok := d.GetOk("elasticsearch_config"); ok { + iCfg = expandLogsinkElasticsearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error updating database logsink: elasticsearch_config is required when type is elasticsearch") + } + case "opensearch": + if v, ok := d.GetOk("opensearch_config"); ok { + iCfg = expandLogsinkOpensearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error updating database logsink: opensearch_config is required when type is opensearch") + } + } + + opts.Config = iCfg + + _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, sinkID, opts) + if err != nil { + return diag.Errorf("Error updating database logsink: %s", err) + } + + return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseLogsinkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + logsinkID := d.Get("sink_id").(string) + + _, err := client.Databases.DeleteLogsink(ctx, clusterID, logsinkID) + if err != nil { + return diag.Errorf("Error deleting logsink topic: %s", err) + } + + d.SetId("") + return nil +} + +func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + logsinkID := d.Get("sink_id").(string) + + logsink, resp, err := client.Databases.GetLogsink(ctx, clusterID, logsinkID) + if err != nil { + // If the logsink is somehow already destroyed, mark as + // successfully gone + if resp != nil && resp.StatusCode == 404 { + d.SetId("") + return nil + } + + return diag.Errorf("Error retrieving logsink: %s", err) + } + + d.Set("sink_name", logsink.Name) + d.Set("sink_type", logsink.Type) + + switch logsink.Type { + case "rsyslog": + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling rsyslog logsink config: %#v", err) + } + var cfg *godo.DatabaseLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling rsyslog logsink config: %#v", err) + } + if err := d.Set("rsyslog_config", flattenLogsinkRsyslogConfig(cfg)); err != nil { + return diag.Errorf("Error setting rsyslog logsink config: %#v", err) + } + case "elasticsearch": + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling elasticsearch logsink config: %#v", err) + } + var cfg *godo.DatabaseLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling elasticsearch logsink config: %#v", err) + } + if err := d.Set("elasticsearch_config", flattenLogsinkElasticsearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting elasticsearch logsink config: %#v", err) + } + case "opensearch": + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling opensearch logsink config: %#v", err) + } + var cfg *godo.DatabaseLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling opensearch logsink config: %#v", err) + } + if err := d.Set("opensearch_config", flattenLogsinkOpensearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting opensearch logsink config: %#v", err) + } + } + + return nil +} + +func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if strings.Contains(d.Id(), ",") { + s := strings.Split(d.Id(), ",") + d.SetId(makeDatabaseLogsinkID(s[0], s[1])) + d.Set("cluster_id", s[0]) + d.Set("sink_id", s[1]) + } else { + return nil, errors.New("must use the ID of the source cluster and logsink id joined with a comma (e.g. `id,sink_id`)") + } + + return []*schema.ResourceData{d}, nil +} + +func makeDatabaseLogsinkID(clusterID string, logsinkID string) string { + return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) // TODO: maybe better use for godo? +} + +func flattenLogsinkRsyslogConfig(config *godo.DatabaseLogsinkConfig) []interface{} { + result := make([]interface{}, 0) + if config != nil { + r := make(map[string]interface{}) + r["server"] = (*config).Server + r["port"] = (*config).Port + r["tls"] = (*config).TLS + r["format"] = (*config).Format + r["logline"] = (*config).Logline + r["sd"] = (*config).SD + r["ca"] = (*config).CA + r["key"] = (*config).Key + r["cert"] = (*config).Cert + result = append(result, r) + } + + return result +} + +func flattenLogsinkElasticsearchConfig(config *godo.DatabaseLogsinkConfig) []interface{} { + result := make([]interface{}, 0) + if config != nil { + r := make(map[string]interface{}) + r["ca"] = (*config).CA + r["url"] = (*config).URL + r["index_prefix"] = (*config).IndexPrefix + r["index_days_max"] = (*config).IndexDaysMax + r["timeout"] = (*config).Timeout + result = append(result, r) + } + + return result +} + +func flattenLogsinkOpensearchConfig(config *godo.DatabaseLogsinkConfig) []interface{} { + result := make([]interface{}, 0) + if config != nil { + r := make(map[string]interface{}) + r["ca"] = (*config).CA + r["url"] = (*config).URL + r["index_prefix"] = (*config).IndexPrefix + r["index_days_max"] = (*config).IndexDaysMax + r["timeout"] = (*config).Timeout + result = append(result, r) + } + + return result +} diff --git a/digitalocean/database/resource_database_logsink_test.go b/digitalocean/database/resource_database_logsink_test.go new file mode 100644 index 000000000..19cf0eebc --- /dev/null +++ b/digitalocean/database/resource_database_logsink_test.go @@ -0,0 +1,55 @@ +package database_test + +import ( + "fmt" + "testing" + + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/acceptance" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDigitalOceanDatabaseLogsink_Basic(t *testing.T) { + name := acceptance.RandomTestName() + dbConfig := fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterPostgreSQL, name, "15") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.TestAccPreCheck(t) }, + ProviderFactories: acceptance.TestAccProviderFactories, + CheckDestroy: testAccCheckDigitalOceanDatabaseClusterDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseLogsinkBasic, dbConfig, "lname", "opensearch", "https://user:passwd@192.168.0.1:25060", "logs", 5), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.url", "https://user:passwd@192.168.0.1:25060"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_prefix", "logs"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_days_max", "5"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_type", "opensearch"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_name", "lname"), + ), + }, + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseLogsinkBasic, dbConfig, "new-lname", "opensearch", "https://user:passwd@192.168.0.1:25060", "logs", 4), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_name", "new-lname"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_days_max", "4"), + ), + }, + }, + }) +} + +const testAccCheckDigitalOceanDatabaseLogsinkBasic = ` +%s + +resource "digitalocean_database_logsink" "logsink" { + cluster_id = digitalocean_database_cluster.foobar.id + sink_name = "%s" + sink_type = "%s" + + opensearch_config { + url = "%s" + index_prefix = "%s" + index_days_max = %d + timeout = 10 + } +}` diff --git a/digitalocean/provider.go b/digitalocean/provider.go index f0d682d5c..5eb63f392 100644 --- a/digitalocean/provider.go +++ b/digitalocean/provider.go @@ -159,6 +159,7 @@ func Provider() *schema.Provider { "digitalocean_database_kafka_config": database.ResourceDigitalOceanDatabaseKafkaConfig(), "digitalocean_database_opensearch_config": database.ResourceDigitalOceanDatabaseOpensearchConfig(), "digitalocean_database_kafka_topic": database.ResourceDigitalOceanDatabaseKafkaTopic(), + "digitalocean_database_logsink": database.ResourceDigitalOceanDatabaseLogsink(), "digitalocean_domain": domain.ResourceDigitalOceanDomain(), "digitalocean_droplet": droplet.ResourceDigitalOceanDroplet(), "digitalocean_droplet_snapshot": snapshot.ResourceDigitalOceanDropletSnapshot(), diff --git a/docs/resources/database_logsink.md b/docs/resources/database_logsink.md new file mode 100644 index 000000000..65ccd49e5 --- /dev/null +++ b/docs/resources/database_logsink.md @@ -0,0 +1,104 @@ +--- +page_title: "DigitalOcean: digitalocean_database_logsink" +--- + +# digitalocean\_database\_logsink + +Provides a DigitalOcean database logsink capabilities. Can be configured with rsyslog, elasticsearch, and opensearch. + +## Example Usage + +```hcl +resource "digitalocean_database_logsink" "logsink-01" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "sinkexample1" + sink_type = "opensearch" + + opensearch_config { + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 5 + } +} + +resource "digitalocean_database_logsink" "logsink-02" { + cluster_id = digitalocean_database_cluster.dotest.id + sink_name = "sinkexample2" + sink_type = "rsyslog" + + rsyslog_config { + server = "192.168.10.2" + port = 514 + tls = "true" + format = "rfc5424" + logline = "msg" + sd = "test-2" + } +} + +resource "digitalocean_database_logsink" "logsink-03" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "sinkexample3" + sink_type = "elasticsearch" + + opensearch_config { + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 3 + } +} + +resource "digitalocean_database_cluster" "doby" { + name = "dobydb" + engine = "pg" + version = "15" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 1 + tags = ["production"] +} +``` + +## Argument Reference + +The following arguments are supported. See the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#operation/databases_create_logsink) +for additional details on each option. + +* `cluster_id` - (Required) The ID of the target MySQL cluster. +* `sink_name` - (Required) The name of the Logsink. +* `sink_type` - (Required) Sink type. Enum: `rsyslog` `elasticsearch` `opensearch` +* `config` - (Required) Logsink configurations. + - `rsyslog` configuration options: + - `server` - (Required) The DNS name or IPv4 address of the rsyslog server. + - `port` - (Required) An integer of the internal port on which the rsyslog server is listening. + - `tls` - (Required) A boolean to use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it). + - `format` - (Required) A message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom. Enum: `rfc5424`, `rfc3164`, or `custom`. + - `logline` - (Optional) Only required if format == custom. Syslog log line template for a custom format, supporting limited rsyslog style templating (using %tag%). Supported tags are: HOSTNAME, app-name, msg, msgid, pri, procid, structured-data, timestamp and timestamp:::date-rfc3339. + - `sd` - (Optional) content of the structured data block of rfc5424 message. + - `ca` - (Optional) PEM encoded CA certificate. + - `key` - (Optional) (PEM format) client key if the server requires client authentication + - `cert` - (Optional) (PEM format) client cert to use + - `elasticsearch` configuration options: + - `url` - (Required) Elasticsearch connection URL. + - `index_prefix` - (Required) Elasticsearch index prefix. + - `index_days_max` - (Optional) Maximum number of days of logs to keep. + - `timeout` - (Optional) Elasticsearch request timeout limit. + - `ca` - (Optional) PEM encoded CA certificate. + - `opensearch` configuration options: + - `url` - (Required) Opensearch connection URL. + - `index_prefix` - (Required) Opensearch index prefix. + - `index_days_max` - (Optional) Maximum number of days of logs to keep. + - `timeout` - (Optional) Opensearch request timeout limit. + - `ca` - (Optional) PEM encoded CA certificate. + +## Attributes Reference + +All above attributes are exported. If an attribute was set outside of Terraform, it will be computed. + +## Import + +A MySQL database cluster's configuration can be imported using the `id` the parent cluster, e.g. + +``` +terraform import digitalocean_database_mysql_config.example 4b62829a-9c42-465b-aaa3-84051048e712 +``` diff --git a/examples/logsink/main.tf b/examples/logsink/main.tf new file mode 100644 index 000000000..7740de60a --- /dev/null +++ b/examples/logsink/main.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = ">= 2.8.0" + } + } +} + +provider "digitalocean" { + # You need to set this in your .bashrc + # export DIGITALOCEAN_TOKEN="Your API TOKEN" + # +} + +resource "digitalocean_database_logsink" "logsink-01" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "fox2" + sink_type = "opensearch" + + opensearch_config { + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 5 + } +} + +resource "digitalocean_database_cluster" "doby" { + name = "dobydb" + engine = "pg" + version = "15" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 1 + tags = ["production"] +} diff --git a/examples/tutorial/main.tf b/examples/tutorial/main.tf new file mode 100644 index 000000000..306d75bc0 --- /dev/null +++ b/examples/tutorial/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = ">= 2.8.0" + } + } +} + +provider "digitalocean" { + # You need to set this in your .bashrc + # export DIGITALOCEAN_TOKEN="Your API TOKEN" + # +} + + +resource "digitalocean_droplet" "web" { + image = "ubuntu-20-04-x64" + name = "web2" + region = "nyc2" + size = "s-1vcpu-1gb" +} diff --git a/go.mod b/go.mod index cef6e2feb..afc3b5e9e 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb + github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index 07b7e1430..832505af3 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb h1:D6/pJ6+5wMkpwaF2HMJqvhlPlxWdlbpKx6bOpy8sbBg= -github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 h1:7CBSVopixDaCNKGsRZjSvs9vPa/3IZ9gTwAIV4gWpO4= +github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 276fb4a6b..bc38c25e1 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -350,14 +350,6 @@ type DatabaseTopic struct { Config *TopicConfig `json:"config,omitempty"` } -// DatabaseLogsink represents a logsink -type DatabaseLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,omitempty"` - Type string `json:"sink_type,omitempty"` - Config *DatabaseLogsinkConfig `json:"config,omitempty"` -} - // TopicPartition represents the state of a Kafka topic partition type TopicPartition struct { EarliestOffset uint64 `json:"earliest_offset,omitempty"` @@ -507,14 +499,22 @@ type DatabaseFirewallRule struct { CreatedAt time.Time `json:"created_at"` } -// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster +// DatabaseLogsink represents a logsink. +type DatabaseLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *DatabaseLogsinkConfig `json:"config,required"` +} + +// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster. type DatabaseCreateLogsinkRequest struct { Name string `json:"sink_name"` Type string `json:"sink_type"` Config *DatabaseLogsinkConfig `json:"config"` } -// DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster +// DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster. type DatabaseUpdateLogsinkRequest struct { Config *DatabaseLogsinkConfig `json:"config"` } @@ -828,6 +828,10 @@ type databaseTopicsRoot struct { Topics []DatabaseTopic `json:"topics"` } +type databaseLogsinkRoot struct { + Sink DatabaseLogsink `json:"sink"` +} + type databaseLogsinksRoot struct { Sinks []DatabaseLogsink `json:"sinks"` } @@ -1878,7 +1882,7 @@ func (svc *DatabasesServiceOp) DeleteIndex(ctx context.Context, databaseID, name return resp, nil } -// CreateLogsink creates a new logsink for a database +// CreateLogsink creates a new logsink for a database cluster. func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinksPath, databaseID) req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) @@ -1886,15 +1890,16 @@ func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID str return nil, nil, err } - root := new(DatabaseLogsink) + root := new(databaseLogsinkRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return root, resp, nil + + return &root.Sink, resp, nil } -// GetLogsink gets a logsink for a database +// GetLogsink gets a logsink for a database cluster. func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) @@ -1910,7 +1915,7 @@ func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string return root, resp, nil } -// ListTopics returns all topics for a given kafka cluster +// ListTopics returns all logsinks for a given database cluster. func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinksPath, databaseID) path, err := addOptions(path, opts) @@ -1929,7 +1934,7 @@ func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID stri return root.Sinks, resp, nil } -// UpdateLogsink updates a logsink for a database cluster +// UpdateLogsink updates a logsink for a database cluster. func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateLogsink) @@ -1944,7 +1949,7 @@ func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID str return resp, nil } -// DeleteLogsink deletes a logsink for a database cluster +// DeleteLogsink deletes a logsink for a database cluster. func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) diff --git a/vendor/modules.txt b/vendor/modules.txt index 4be2307e9..457392592 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb +# github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics