From 420ba38dea7a98bc3215df79d7f2e6588b602105 Mon Sep 17 00:00:00 2001 From: MinGyum Kim Date: Thu, 15 Jun 2023 19:07:51 +0900 Subject: [PATCH] feat: Support NKS OpenAPI Features (#273) * update: support nks update * update: nks docs * fix: ip_acl empty entries error * fix: ip_acl empty entries error * update: docs & log * update: nks_server_products example * update: nks_server_products example * update: nks ip-acl exception for fin site * update: nks UserAgent config * update: remove named result parameter * update: nks config * update: nks_cluster ip_acl_default_action validation --------- Co-authored-by: Mingyum Kim --- .goreleaser.yml | 2 +- docs/data-sources/nks_cluster.md | 15 +- docs/data-sources/nks_node_pool.md | 7 +- docs/data-sources/nks_server_images.md | 33 ++ docs/data-sources/nks_server_products.md | 56 +++ docs/resources/nks_cluster.md | 18 +- docs/resources/nks_node_pool.md | 5 +- examples/nks/main.tf | 15 +- go.mod | 2 +- go.sum | 21 +- ncloud/config.go | 6 +- ncloud/data_source_ncloud_nks_cluster.go | 93 ++++- ncloud/data_source_ncloud_nks_node_pool.go | 17 +- .../data_source_ncloud_nks_server_images.go | 90 ++++ ...ta_source_ncloud_nks_server_images_test.go | 57 +++ .../data_source_ncloud_nks_server_products.go | 156 +++++++ ..._source_ncloud_nks_server_products_test.go | 38 ++ ncloud/data_source_ncloud_nks_versions.go | 2 +- ncloud/resource_ncloud_nks_cluster.go | 326 ++++++++++++++- ncloud/resource_ncloud_nks_cluster_test.go | 384 +++++++++++++++++- ncloud/resource_ncloud_nks_node_pool.go | 104 ++++- ncloud/resource_ncloud_nks_node_pool_test.go | 45 +- ncloud/structures.go | 152 ++++++- ncloud/structures_test.go | 145 ++++++- 24 files changed, 1713 insertions(+), 76 deletions(-) create mode 100644 docs/data-sources/nks_server_images.md create mode 100644 docs/data-sources/nks_server_products.md create mode 100644 ncloud/data_source_ncloud_nks_server_images.go create mode 100644 ncloud/data_source_ncloud_nks_server_images_test.go create mode 100644 ncloud/data_source_ncloud_nks_server_products.go create mode 100644 ncloud/data_source_ncloud_nks_server_products_test.go diff --git a/.goreleaser.yml b/.goreleaser.yml index a7bcbacac..dfd52d3b7 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -10,7 +10,7 @@ builds: flags: - -trimpath ldflags: - - "-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}" + - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X {{ replace .GitURL "https://" "" }}/ncloud.version={{ .Version }}' goos: - freebsd - windows diff --git a/docs/data-sources/nks_cluster.md b/docs/data-sources/nks_cluster.md index 19e5cd5f3..ec7f2c209 100644 --- a/docs/data-sources/nks_cluster.md +++ b/docs/data-sources/nks_cluster.md @@ -27,7 +27,7 @@ In addition to all arguments above, the following attributes are exported: * `id` - Cluster uuid. * `endpoint` - Control Plane API address. * `lb_private_subnet_no` - Subnet No. for private loadbalancer only. -* `lb_public_subnet_no` - Subnet No. for public loadbalancer only. (Available only `SGN` region`) +* `lb_public_subnet_no` - Subnet No. for public loadbalancer only. (Available only `SGN`, `JPN` region) * `subnet_no_list` - Subnet No. list. * `public_network` - Public Subnet Network * `kube_network_plugin` - Kubernetes network plugin. @@ -41,3 +41,16 @@ In addition to all arguments above, the following attributes are exported: * `audit` - Audit log availability. * `k8s_version` - Kubenretes version. * `acg_no` - The ID of cluster ACG. +* `oidc` + * `issuer_url` - Issuer URL. + * `client_id` - Client ID. + * `username_prefix` - Username prefix. + * `username_claim` - Username claim. + * `groups_prefix` - Groups prefix. + * `groups_claim` - Groups claim. + * `required_claim` - Required claim. +* `ip_acl_default_action` - IP ACL default action.(Available only `public`, `gov` site) +* `ip_acl` (Available only `public`, `gov` site) + * `action` - `allow`, `deny` + * `address` - CIDR + * `comment` - Comment \ No newline at end of file diff --git a/docs/data-sources/nks_node_pool.md b/docs/data-sources/nks_node_pool.md index df8dda633..e0d1794b2 100644 --- a/docs/data-sources/nks_node_pool.md +++ b/docs/data-sources/nks_node_pool.md @@ -31,11 +31,13 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ID of nodepool.`CusterUuid:NodePoolName` * `node_count` - Number of nodes. * `product_code` - Product code. +* `software_code` - Server image code. * `autoscale` * `enable` - Auto scaling availability. * `max` - Maximum number of nodes available for auto scaling. * `min` - Minimum number of nodes available for auto scaling. -* `subnet_no` - Subnet No. +* `subnet_no` - Subnet No.(Deprecated) +* `subnet_no_list` - Subnet No List. * `instance_no` - Nodepool instance No. * `nodes`- Running nodes in nodepool. * `name` - The name of Server instance. @@ -45,4 +47,5 @@ In addition to all arguments above, the following attributes are exported: * `public_ip` - Public IP. * `node_status` - Node Status. * `container_version` - Container version of node. - * `kernel_version` - kernel version of node. \ No newline at end of file + * `kernel_version` - kernel version of node. +* `k8s_version` - Kubenretes version . \ No newline at end of file diff --git a/docs/data-sources/nks_server_images.md b/docs/data-sources/nks_server_images.md new file mode 100644 index 000000000..24873b35b --- /dev/null +++ b/docs/data-sources/nks_server_images.md @@ -0,0 +1,33 @@ +# Data Source: ncloud_nks_server_images + +Provides list of available Kubernetes Nodepool ServerImages. + +## Example Usage + +```hcl +data "ncloud_nks_server_images" "images" {} + +data "ncloud_nks_server_images" "ubuntu20" { + filter { + name = "label" + values = ["ubuntu-20.04-64-server"] + regex = true + } +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `filter` - (Optional) Custom filter block as described below. + * `name` - (Required) The name of the field to filter by. + * `values` - (Required) Set of values that are accepted for the given field. + * `regex` - (Optional) is `values` treated as a regular expression. + +## Attributes Reference + +* `images` - A list of ServerImages + * `label` - ServerImage name + * `value` - ServerImage code diff --git a/docs/data-sources/nks_server_products.md b/docs/data-sources/nks_server_products.md new file mode 100644 index 000000000..48613934b --- /dev/null +++ b/docs/data-sources/nks_server_products.md @@ -0,0 +1,56 @@ +# Data Source: ncloud_nks_server_products + +Provides list of available Kubernetes Nodepool ServerProducts. + +## Example Usage + +```hcl +data "ncloud_nks_server_products" "products" {} + + +data "ncloud_nks_server_images" "images"{ + filter { + name = "label" + values = ["ubuntu-20.04-64-server"] + } +} + +data "ncloud_nks_server_products" "product" { + + software_code = data.ncloud_nks_server_images.images.images[0].value + zone = "KR-1" + + filter { + name = "label" + values = ["vCPU 2개, 메모리 16GB, [SSD]디스크 50GB" ] + } +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `software_code` - (Required) NKS ServerImage code. +* `zone` - (Required) zone Code. + +* `filter` - (Optional) Custom filter block as described below. + * `name` - (Required) The name of the field to filter by. + * `values` - (Required) Set of values that are accepted for the given field. + * `regex` - (Optional) is `values` treated as a regular expression. + +## Attributes Reference + +* `products` - A list of ServerProduct + * `label` - ServerProduct spec korean description + * `value` - ServerProduct code + * `detail` + * `cpu_count` - Number of cpu + * `gpu_count` - Number of gpu + * `gpu_memory_size` - Size of GPU memory(GB) + * `memory_size` - Size of memory(GB) + * `product_code` - ServerProduct code + * `product_english_desc` - ServerProduct spec english description + * `product_korean_desc` - ServerProduct spec korean description + * `product_type` - ServerProduct Type \ No newline at end of file diff --git a/docs/resources/nks_cluster.md b/docs/resources/nks_cluster.md index c35a6f29d..9d6496055 100644 --- a/docs/resources/nks_cluster.md +++ b/docs/resources/nks_cluster.md @@ -76,11 +76,23 @@ The following arguments are supported: * `subnet_no_list` - (Required) Subnet No. list. * `public_network` - (Optional) Public Subnet Network (`boolean`) * `lb_private_subnet_no` - (Required) Subnet No. for private loadbalancer only. -* `lb_public_subnet_no` - (Optional) Subnet No. for public loadbalancer only. (Available only `SGN` region) +* `lb_public_subnet_no` - (Optional) Subnet No. for public loadbalancer only. (Available only `SGN`, `JPN` region) * `log` - (Optional) * `audit` - (Required) Audit log availability. (`boolean`) -* `k8s_version` - (Optional) Kubenretes version . - +* `k8s_version` - (Optional) Kubenretes version. Only upgrade is supported. +* `oidc` - (Optional) + * `issuer_url` - (Required) Issuer URL. + * `client_id` - (Required) Client ID. + * `username_prefix` - (Optional) Username prefix. + * `username_claim` - (Optional) Username claim. + * `groups_prefix` - (Optional) Groups prefix. + * `groups_claim` - (Optional) Groups claim. + * `required_claim` - (Optional) Required claim. +* `ip_acl_default_action` - (Optional) IP ACL default action.`allow`(default), `deny` +* `ip_acl` (Optional) + * `action` - (Required) `allow`, `deny` + * `address` - (Required) CIDR + * `comment` - (Optional) Comment ## Attributes Reference In addition to all arguments above, the following attributes are exported: diff --git a/docs/resources/nks_node_pool.md b/docs/resources/nks_node_pool.md index 0e7ecde8a..468cc29d3 100644 --- a/docs/resources/nks_node_pool.md +++ b/docs/resources/nks_node_pool.md @@ -109,11 +109,14 @@ The following arguments are supported: * `cluster_uuid` - (Required) Cluster uuid. * `node_count` - (Required) Number of nodes. * `product_code` - (Required) Product code. +* `software_code` - (Optional) Server image code. * `autoscale`- (Optional) * `enable` - (Required) Auto scaling availability. * `max` - (Required) Maximum number of nodes available for auto scaling. * `min` - (Required) Minimum number of nodes available for auto scaling. -* `subnet_no` - (Optional) Subnet No. +* `subnet_no` - (Deprecated) Subnet No. +* `subnet_no_list` - Subnet no list. +* `k8s_version` - (Optional) Kubenretes version. Only upgrade is supported. ## Attributes Reference diff --git a/examples/nks/main.tf b/examples/nks/main.tf index 4489509f8..3eb227a12 100644 --- a/examples/nks/main.tf +++ b/examples/nks/main.tf @@ -60,15 +60,17 @@ resource "ncloud_nks_cluster" "cluster" { } } -data "ncloud_server_image" "image" { +data "ncloud_nks_server_images" "images"{ filter { - name = "product_name" - values = ["ubuntu-20.04"] + name = "label" + values = ["ubuntu-20.04-64-server"] } } -data "ncloud_server_product" "product" { - server_image_product_code = data.ncloud_server_image.image.product_code +data "ncloud_nks_server_products" "products" { + + software_code = data.ncloud_nks_server_images.images.images[0].value + zone = "KR-1" filter { name = "product_type" @@ -96,7 +98,8 @@ resource "ncloud_nks_node_pool" "node_pool" { cluster_uuid = ncloud_nks_cluster.cluster.uuid node_pool_name = "pool1" node_count = 1 - product_code = data.ncloud_server_product.product.product_code + product_code = data.ncloud_nks_server_products.products.products[0].value + software_code = data.ncloud_nks_server_images.images.images[0].value subnet_no = ncloud_subnet.node_subnet.id autoscale { enabled = true diff --git a/go.mod b/go.mod index 7e4df329a..7a662070b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-ncloud go 1.19 require ( - github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.2 + github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index a2ccf48da..125072438 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.2 h1:Hv2aCNGVVD4tKXYivtENi+9+/zgHL+H05uKMkqpr5Xk= -github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.2/go.mod h1:KWd9AT+YSM6qgsMzPnE23h2/r0bsPSIdJzZIg3BUcfI= +github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4 h1:/PXqrDFlF3U/jjaGkrEpXYt6EKalENZjNaHaoHL6718= +github.com/NaverCloudPlatform/ncloud-sdk-go-v2 v1.6.4/go.mod h1:sDa6EITv6z/l6+d4VJk4OiRZnXuO0uG2Cm30qtqF4TU= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= @@ -181,6 +181,7 @@ github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37w github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= @@ -192,12 +193,14 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 h1:O8uGbHCqlTp2P6QJSLmCojM4mN6UemYv8K+dCnmHmu0= golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -206,12 +209,13 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -220,6 +224,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -231,21 +236,24 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -253,6 +261,9 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/ncloud/config.go b/ncloud/config.go index 2d9a7ffa3..8e1b5cfe3 100644 --- a/ncloud/config.go +++ b/ncloud/config.go @@ -1,6 +1,7 @@ package ncloud import ( + "fmt" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vautoscaling" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vcdss" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vloadbalancer" @@ -35,6 +36,8 @@ const DefaultCreateTimeout = 1 * time.Hour const DefaultUpdateTimeout = 10 * time.Minute const DefaultStopTimeout = 5 * time.Minute +var version = "" + type Config struct { AccessKey string SecretKey string @@ -68,6 +71,7 @@ func (c *Config) Client() (*NcloudAPIClient, error) { AccessKey: c.AccessKey, SecretKey: c.SecretKey, } + return &NcloudAPIClient{ server: server.NewAPIClient(server.NewConfiguration(apiKey)), autoscaling: autoscaling.NewAPIClient(autoscaling.NewConfiguration(apiKey)), @@ -80,7 +84,7 @@ func (c *Config) Client() (*NcloudAPIClient, error) { vnas: vnas.NewAPIClient(vnas.NewConfiguration(apiKey)), vautoscaling: vautoscaling.NewAPIClient(vautoscaling.NewConfiguration(apiKey)), vloadbalancer: vloadbalancer.NewAPIClient(vloadbalancer.NewConfiguration(apiKey)), - vnks: vnks.NewAPIClient(vnks.NewConfiguration(c.Region, apiKey)), + vnks: vnks.NewAPIClient(vnks.NewConfigurationWithUserAgent(c.Region, fmt.Sprintf("Ncloud Terraform Provider/%s", version), apiKey)), sourcecommit: sourcecommit.NewAPIClient(sourcecommit.NewConfiguration(c.Region, apiKey)), sourcebuild: sourcebuild.NewAPIClient((sourcebuild.NewConfiguration(c.Region, apiKey))), sourcepipeline: sourcepipeline.NewAPIClient(sourcepipeline.NewConfiguration(c.Region, apiKey)), diff --git a/ncloud/data_source_ncloud_nks_cluster.go b/ncloud/data_source_ncloud_nks_cluster.go index 48ba204c6..61e654067 100644 --- a/ncloud/data_source_ncloud_nks_cluster.go +++ b/ncloud/data_source_ncloud_nks_cluster.go @@ -88,6 +88,69 @@ func dataSourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "oidc": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer_url": { + Type: schema.TypeString, + Required: true, + }, + "client_id": { + Type: schema.TypeString, + Required: true, + }, + "username_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "username_claim": { + Type: schema.TypeString, + Optional: true, + }, + "groups_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "groups_cliam": { + Type: schema.TypeString, + Optional: true, + }, + "required_claim": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ip_acl_default_action": { + Type: schema.TypeString, + Optional: true, + Default: "allow", + }, + "ip_acl": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + }, + "address": { + Type: schema.TypeString, + Required: true, + }, + "comment": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, }, } } @@ -104,6 +167,16 @@ func dataSourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } + oidcSpec, err := getOIDCSpec(ctx, config, uuid) + if err != nil { + return diag.FromErr(err) + } + + ipAcl, err := getIPAcl(ctx, config, uuid) + if err != nil { + return diag.FromErr(err) + } + if cluster == nil { d.SetId("") return nil @@ -121,17 +194,35 @@ func dataSourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, d.Set("lb_private_subnet_no", strconv.Itoa(int(ncloud.Int32Value(cluster.SubnetLbNo)))) d.Set("kube_network_plugin", cluster.KubeNetworkPlugin) d.Set("acg_no", strconv.Itoa(int(ncloud.Int32Value(cluster.AcgNo)))) + if cluster.LbPublicSubnetNo != nil { d.Set("lb_public_subnet_no", strconv.Itoa(int(ncloud.Int32Value(cluster.LbPublicSubnetNo)))) } if cluster.PublicNetwork != nil { d.Set("public_network", cluster.PublicNetwork) } + if err := d.Set("log", flattenNKSClusterLogInput(cluster.Log)); err != nil { log.Printf("[WARN] Error setting cluster log for (%s): %s", d.Id(), err) } + if err := d.Set("subnet_no_list", flattenInt32ListToStringList(cluster.SubnetNoList)); err != nil { - log.Printf("[WARN] Error setting subet no list set for (%s): %s", d.Id(), err) + log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) + } + + if oidcSpec != nil { + if err := d.Set("oidc", flattenNKSClusterOIDCSpec(oidcSpec)); err != nil { + log.Printf("[WARN] Error setting OIDCSpec set for (%s): %s", d.Id(), err) + } + } + + if ipAcl != nil { + d.Set("ip_acl_default_action", ipAcl.DefaultAction) + + if err := d.Set("ip_acl", flattenNKSClusterIPAclEntries(ipAcl).List()); err != nil { + log.Printf("[WARN] Error setting ip_acl list set for (%s): %s", d.Id(), err) + } + } return nil diff --git a/ncloud/data_source_ncloud_nks_node_pool.go b/ncloud/data_source_ncloud_nks_node_pool.go index c7994a076..8ad91b74a 100644 --- a/ncloud/data_source_ncloud_nks_node_pool.go +++ b/ncloud/data_source_ncloud_nks_node_pool.go @@ -38,13 +38,23 @@ func dataSourceNcloudNKSNodePool() *schema.Resource { Computed: true, }, "subnet_no": { - Type: schema.TypeString, + Type: schema.TypeString, + Computed: true, + Deprecated: "use 'subnet_no_list' instead", + }, + "subnet_no_list": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "product_code": { Type: schema.TypeString, Computed: true, }, + "software_code": { + Type: schema.TypeString, + Computed: true, + }, "autoscale": { Type: schema.TypeList, Computed: true, @@ -135,10 +145,13 @@ func dataSourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData d.Set("instance_no", strconv.Itoa(int(ncloud.Int32Value(nodePool.InstanceNo)))) d.Set("node_pool_name", nodePool.Name) d.Set("product_code", nodePool.ProductCode) + d.Set("software_code", nodePool.SoftwareCode) d.Set("node_count", nodePool.NodeCount) d.Set("k8s_version", nodePool.K8sVersion) if len(nodePool.SubnetNoList) > 0 { - d.Set("subnet_no", strconv.Itoa(int(ncloud.Int32Value(nodePool.SubnetNoList[0])))) + if err := d.Set("subnet_no_list", flattenInt32ListToStringList(nodePool.SubnetNoList)); err != nil { + log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) + } } if err := d.Set("autoscale", flattenNKSNodePoolAutoScale(nodePool.Autoscale)); err != nil { diff --git a/ncloud/data_source_ncloud_nks_server_images.go b/ncloud/data_source_ncloud_nks_server_images.go new file mode 100644 index 000000000..7524c879a --- /dev/null +++ b/ncloud/data_source_ncloud_nks_server_images.go @@ -0,0 +1,90 @@ +package ncloud + +import ( + "context" + "fmt" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func init() { + RegisterDataSource("ncloud_nks_server_images", dataSourceNcloudNKSServerImages()) +} + +func dataSourceNcloudNKSServerImages() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNcloudNKSServerImagesRead, + + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "images": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceNcloudNKSServerImagesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*ProviderConfig) + if !config.SupportVPC { + return NotSupportClassic("datasource `ncloud_nks_node_pool_server_images`") + } + + resources, err := getNKSServerImages(config) + if err != nil { + return err + } + + if f, ok := d.GetOk("filter"); ok { + resources = ApplyFilters(f.(*schema.Set), resources, dataSourceNcloudNKSServerImages().Schema["images"].Elem.(*schema.Resource).Schema) + } + + d.SetId(time.Now().UTC().String()) + if err := d.Set("images", resources); err != nil { + return fmt.Errorf("Error setting Codes: %s", err) + } + + return nil + +} + +func getNKSServerImages(config *ProviderConfig) ([]map[string]interface{}, error) { + + logCommonRequest("GetNKSServerImages", "") + resp, err := config.Client.vnks.V2Api.OptionServerImageGet(context.Background()) + + if err != nil { + logErrorResponse("GetNKSServerImages", err, "") + return nil, err + } + + logResponse("GetNKSServerImages", resp) + + resources := []map[string]interface{}{} + + for _, r := range *resp { + instance := map[string]interface{}{ + "value": ncloud.StringValue(r.Value), + "label": ncloud.StringValue(r.Label), + } + + resources = append(resources, instance) + } + + return resources, nil +} diff --git a/ncloud/data_source_ncloud_nks_server_images_test.go b/ncloud/data_source_ncloud_nks_server_images_test.go new file mode 100644 index 000000000..be0b6c535 --- /dev/null +++ b/ncloud/data_source_ncloud_nks_server_images_test.go @@ -0,0 +1,57 @@ +package ncloud + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "testing" +) + +func TestAccDataSourceNcloudNKSServerImages(t *testing.T) { + dataName := "data.ncloud_nks_server_images.images" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: getTestAccProviders(true), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceNcloudNKSServerImagesConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceID(dataName), + ), + }, + }, + }) +} + +func TestAccDataSourceNcloudNKSServerImagesFilter(t *testing.T) { + dataName := "data.ncloud_nks_server_images.filter" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: getTestAccProviders(true), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceNcloudNKSServerImagestWithFilterConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceID(dataName), + resource.TestCheckResourceAttr(dataName, "images.0.value", "SW.VSVR.OS.LNX64.UBNTU.SVR2004.WRKND.B050"), + ), + }, + }, + }) +} + +var testAccDataSourceNcloudNKSServerImagesConfig = ` +data "ncloud_nks_server_images" "images" {} +` + +func testAccDataSourceNcloudNKSServerImagestWithFilterConfig() string { + return fmt.Sprintf(` +data "ncloud_nks_server_images" "filter"{ + filter { + name = "label" + values = ["ubuntu-20.04-64-server"] + } +} +`) +} diff --git a/ncloud/data_source_ncloud_nks_server_products.go b/ncloud/data_source_ncloud_nks_server_products.go new file mode 100644 index 000000000..81971b5c7 --- /dev/null +++ b/ncloud/data_source_ncloud_nks_server_products.go @@ -0,0 +1,156 @@ +package ncloud + +import ( + "context" + "fmt" + "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "strconv" + "time" +) + +func init() { + RegisterDataSource("ncloud_nks_server_products", dataSourceNcloudNKSServerProducts()) +} + +func dataSourceNcloudNKSServerProducts() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNcloudNKSServerProductsRead, + Schema: map[string]*schema.Schema{ + "filter": dataSourceFiltersSchema(), + "products": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + "detail": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_count": { + Type: schema.TypeString, + Computed: true, + }, + "memory_size": { + Type: schema.TypeString, + Computed: true, + }, + "gpu_count": { + Type: schema.TypeString, + Computed: true, + }, + "gpu_memory_size": { + Type: schema.TypeString, + Computed: true, + }, + "product_type": { + Type: schema.TypeString, + Computed: true, + }, + "product_code": { + Type: schema.TypeString, + Computed: true, + }, + "product_korean_desc": { + Type: schema.TypeString, + Computed: true, + }, + "product_english_desc": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "software_code": { + Type: schema.TypeString, + Required: true, + }, + "zone": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceNcloudNKSServerProductsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*ProviderConfig) + if !config.SupportVPC { + return NotSupportClassic("datasource `ncloud_nks_server_products`") + } + + resources, err := getNKSServerProducts(config, d) + if err != nil { + logErrorResponse("GetNKSServerProducts", err, "") + return err + } + + if f, ok := d.GetOk("filter"); ok { + resources = ApplyFilters(f.(*schema.Set), resources, dataSourceNcloudNKSServerProducts().Schema["products"].Elem.(*schema.Resource).Schema) + } + + d.SetId(time.Now().UTC().String()) + if err := d.Set("products", resources); err != nil { + return fmt.Errorf("Error setting Codes: %s", err) + } + + return nil +} + +func getNKSServerProducts(config *ProviderConfig, d *schema.ResourceData) ([]map[string]interface{}, error) { + logCommonRequest("GetNKSServerProducts", "") + + softwareCode := StringPtrOrNil(d.GetOk("software_code")) + zoneCode := StringPtrOrNil(d.GetOk("zone")) + + opt := make(map[string]interface{}) + opt["zoneCode"] = zoneCode + resp, err := config.Client.vnks.V2Api.OptionServerProductCodeGet(context.Background(), softwareCode, opt) + + if err != nil { + logErrorResponse("GetNKSServerProducts", err, "") + return nil, err + } + + logResponse("GetNKSServerProducts", resp) + + resources := []map[string]interface{}{} + + for _, r := range *resp { + instance := map[string]interface{}{ + "label": ncloud.StringValue(r.Detail.ProductName), + "value": ncloud.StringValue(r.Detail.ProductCode), + "detail": []map[string]interface{}{ + { + "product_type": ncloud.StringValue(r.Detail.ProductType2Code), + "product_code": ncloud.StringValue(r.Detail.ProductCode), + "product_korean_desc": ncloud.StringValue(r.Detail.ProductKoreanDesc), + "product_english_desc": ncloud.StringValue(r.Detail.ProductEnglishDesc), + "cpu_count": strconv.Itoa(int(ncloud.Int32Value(r.Detail.CpuCount))), + "memory_size": strconv.Itoa(int(ncloud.Int32Value(r.Detail.MemorySizeGb))) + "GB", + "gpu_count": strconv.Itoa(int(ncloud.Int32Value(r.Detail.GpuCount))), + "gpu_memory_size": strconv.Itoa(int(ncloud.Int32Value(r.Detail.GpuMemorySizeGb))) + "GB", + }, + }, + } + + resources = append(resources, instance) + } + + return resources, nil +} diff --git a/ncloud/data_source_ncloud_nks_server_products_test.go b/ncloud/data_source_ncloud_nks_server_products_test.go new file mode 100644 index 000000000..840485295 --- /dev/null +++ b/ncloud/data_source_ncloud_nks_server_products_test.go @@ -0,0 +1,38 @@ +package ncloud + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "testing" +) + +func TestAccDataSourceNcloudNKSServerProductCodes(t *testing.T) { + dataName := "data.ncloud_nks_server_products.product_codes" + zone := "KR-1" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: getTestAccProviders(true), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceNcloudNKSServerProductConfig(zone), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceID(dataName), + ), + }, + }, + }) +} + +func testAccDataSourceNcloudNKSServerProductConfig(zone string) string { + return fmt.Sprintf(` +data "ncloud_nks_server_images" "images"{ +} + +data "ncloud_nks_server_products" "product_codes" { + software_code = data.ncloud_nks_server_images.images.images[0].value + zone = "%[1]s" +} + +`, zone) +} diff --git a/ncloud/data_source_ncloud_nks_versions.go b/ncloud/data_source_ncloud_nks_versions.go index fe39edd5f..9968f5f37 100644 --- a/ncloud/data_source_ncloud_nks_versions.go +++ b/ncloud/data_source_ncloud_nks_versions.go @@ -66,7 +66,7 @@ func dataSourceNcloudVersionsRead(d *schema.ResourceData, meta interface{}) erro func getNKSVersion(config *ProviderConfig) ([]map[string]interface{}, error) { logCommonRequest("GetNKSVersion", "") - resp, err := config.Client.vnks.V2Api.OptionVersionGet(context.Background()) + resp, err := config.Client.vnks.V2Api.OptionVersionGet(context.Background(), map[string]interface{}{}) if err != nil { logErrorResponse("GetNKSVersion", err, "") diff --git a/ncloud/resource_ncloud_nks_cluster.go b/ncloud/resource_ncloud_nks_cluster.go index a2691dc7c..15ea6e474 100644 --- a/ncloud/resource_ncloud_nks_cluster.go +++ b/ncloud/resource_ncloud_nks_cluster.go @@ -6,11 +6,14 @@ import ( "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "log" + "os" "strconv" + "strings" "time" ) @@ -32,6 +35,7 @@ func resourceNcloudNKSCluster() *schema.Resource { CreateContext: resourceNcloudNKSClusterCreate, ReadContext: resourceNcloudNKSClusterRead, DeleteContext: resourceNcloudNKSClusterDelete, + UpdateContext: resourceNcloudNKSClusterUpdate, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -40,6 +44,12 @@ func resourceNcloudNKSCluster() *schema.Resource { Update: schema.DefaultTimeout(DefaultCreateTimeout), Delete: schema.DefaultTimeout(DefaultCreateTimeout), }, + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("subnet_no_list", func(ctx context.Context, old, new, meta any) bool { + _, removed := getSubnetDiff(old, new) + return len(removed) > 0 + }), + ), Schema: map[string]*schema.Schema{ "uuid": { Type: schema.TypeString, @@ -69,7 +79,6 @@ func resourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ForceNew: true, }, "zone": { Type: schema.TypeString, @@ -90,7 +99,8 @@ func resourceNcloudNKSCluster() *schema.Resource { "subnet_no_list": { Type: schema.TypeList, Required: true, - ForceNew: true, + MaxItems: 5, + MinItems: 1, Elem: &schema.Schema{Type: schema.TypeString}, }, "lb_private_subnet_no": { @@ -113,7 +123,6 @@ func resourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, - ForceNew: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -128,6 +137,70 @@ func resourceNcloudNKSCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "oidc": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issuer_url": { + Type: schema.TypeString, + Required: true, + }, + "client_id": { + Type: schema.TypeString, + Required: true, + }, + "username_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "username_claim": { + Type: schema.TypeString, + Optional: true, + }, + "groups_prefix": { + Type: schema.TypeString, + Optional: true, + }, + "groups_claim": { + Type: schema.TypeString, + Optional: true, + }, + "required_claim": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ip_acl_default_action": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: ToDiagFunc(validation.StringInSlice([]string{"allow", "deny"}, false)), + }, + "ip_acl": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + }, + "address": { + Type: schema.TypeString, + Required: true, + }, + "comment": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, }, } } @@ -161,7 +234,20 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, } if log, ok := d.GetOk("log"); ok { - reqParams.Log = expandNKSClusterLogInput(log.([]interface{})) + reqParams.Log = expandNKSClusterLogInput(log.([]interface{}), reqParams.Log) + } + + var oidcReq *vnks.UpdateOidcDto + if oidc, ok := d.GetOk("oidc"); ok { + oidcReq = expandNKSClusterOIDCSpec(oidc.([]interface{})) + } + + ipAclReq := &vnks.IpAclsDto{ + DefaultAction: StringPtrOrNil(d.GetOk("ip_acl_default_action")), + Entries: []*vnks.IpAclsEntriesDto{}, + } + if ipAcl, ok := d.GetOk("ip_acl"); ok { + ipAclReq.Entries = expandNKSClusterIPAcl(ipAcl) } logCommonRequest("resourceNcloudNKSClusterCreate", reqParams) @@ -177,6 +263,33 @@ func resourceNcloudNKSClusterCreate(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } d.SetId(uuid) + + if (ncloud.StringValue(ipAclReq.DefaultAction) != "allow" || len(ipAclReq.Entries) > 0) && !checkFinSite(config) { + _, err = config.Client.vnks.V2Api.ClustersUuidIpAclPatch(ctx, ipAclReq, resp.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterCreate:ipAcl", err, ipAclReq) + return diag.FromErr(err) + } + } + + if oidcReq != nil { + + if err = waitForNKSClusterActive(ctx, d, config, uuid); err != nil { + return diag.FromErr(err) + } + + _, err = config.Client.vnks.V2Api.ClustersUuidOidcPatch(ctx, oidcReq, resp.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterCreate:oidc", err, oidcReq) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSClusterCreateoidc:oidc", oidcReq) + if err := waitForNKSClusterActive(ctx, d, config, uuid); err != nil { + return diag.FromErr(err) + } + } + return resourceNcloudNKSClusterRead(ctx, d, meta) } @@ -191,6 +304,16 @@ func resourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, m return diag.FromErr(err) } + oidcSpec, err := getOIDCSpec(ctx, config, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + ipAcl, err := getIPAcl(ctx, config, d.Id()) + if err != nil { + return diag.FromErr(err) + } + if cluster == nil { d.SetId("") return nil @@ -208,6 +331,7 @@ func resourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, m d.Set("lb_private_subnet_no", strconv.Itoa(int(ncloud.Int32Value(cluster.SubnetLbNo)))) d.Set("kube_network_plugin", cluster.KubeNetworkPlugin) d.Set("acg_no", strconv.Itoa(int(ncloud.Int32Value(cluster.AcgNo)))) + if cluster.LbPublicSubnetNo != nil { d.Set("lb_public_subnet_no", strconv.Itoa(int(ncloud.Int32Value(cluster.LbPublicSubnetNo)))) } @@ -223,9 +347,136 @@ func resourceNcloudNKSClusterRead(ctx context.Context, d *schema.ResourceData, m log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) } + if oidcSpec != nil { + oidc := flattenNKSClusterOIDCSpec(oidcSpec) + if err := d.Set("oidc", oidc); err != nil { + log.Printf("[WARN] Error setting OIDCSpec set for (%s): %s", d.Id(), err) + } + } + + if ipAcl != nil { + d.Set("ip_acl_default_action", ipAcl.DefaultAction) + + if err := d.Set("ip_acl", flattenNKSClusterIPAclEntries(ipAcl).List()); err != nil { + log.Printf("[WARN] Error setting ip_acl list set for (%s): %s", d.Id(), err) + } + + } + return nil } +func resourceNcloudNKSClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*ProviderConfig) + if !config.SupportVPC { + return diag.FromErr(NotSupportClassic("resource `ncloud_nks_cluster`")) + } + + cluster, err := getNKSCluster(ctx, config, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + if d.HasChanges("k8s_version") { + + if err = waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { + return diag.FromErr(err) + } + + // Cluster UPGRADE + newVersion := StringPtrOrNil(d.GetOk("k8s_version")) + _, err := config.Client.vnks.V2Api.ClustersUuidUpgradePatch(ctx, cluster.Uuid, newVersion, map[string]interface{}{}) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterUpgrade", err, newVersion) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSClusterUpgrade", newVersion) + if err := waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("oidc") { + + if err = waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { + return diag.FromErr(err) + } + + var oidcSpec *vnks.UpdateOidcDto + oidc, _ := d.GetOk("oidc") + oidcSpec = expandNKSClusterOIDCSpec(oidc.([]interface{})) + + _, err = config.Client.vnks.V2Api.ClustersUuidOidcPatch(ctx, oidcSpec, cluster.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterOIDCPatch", err, oidcSpec) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSClusterOIDCPatch", oidcSpec) + if err := waitForNKSClusterActive(ctx, d, config, *cluster.Uuid); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChanges("ip_acl", "ip_acl_default_action") && !checkFinSite(config) { + + ipAclReq := &vnks.IpAclsDto{ + DefaultAction: StringPtrOrNil(d.GetOk("ip_acl_default_action")), + Entries: []*vnks.IpAclsEntriesDto{}, + } + if ipAcl, ok := d.GetOk("ip_acl"); ok { + ipAclReq.Entries = expandNKSClusterIPAcl(ipAcl) + } + + _, err = config.Client.vnks.V2Api.ClustersUuidIpAclPatch(ctx, ipAclReq, cluster.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterIPAclPatch", err, ipAclReq) + return diag.FromErr(err) + } + } + + if d.HasChanges("log") { + + var logDto *vnks.AuditLogDto + if log, ok := d.GetOk("log"); ok { + logDto = expandNKSClusterLogInput(log.([]interface{}), logDto) + } else { + logDto.Audit = ncloud.Bool(false) + } + + _, err = config.Client.vnks.V2Api.ClustersUuidLogPatch(ctx, logDto, cluster.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterLogPatch", err, logDto) + return diag.FromErr(err) + } + + } + + if d.HasChanges("subnet_no_list") { + + oldList, newList := d.GetChange("subnet_no_list") + added, _ := getSubnetDiff(oldList, newList) + + subnets := &vnks.AddSubnetDto{ + Subnets: []*vnks.SubnetDto{}, + } + + for _, subnetNo := range added { + subnets.Subnets = append(subnets.Subnets, &vnks.SubnetDto{Number: subnetNo}) + } + + _, err = config.Client.vnks.V2Api.ClustersUuidAddSubnetPatch(ctx, subnets, cluster.Uuid) + if err != nil { + logErrorResponse("resourceNcloudNKSClusterAddSubnetsPatch", err, subnets) + return diag.FromErr(err) + } + + } + + return resourceNcloudNKSClusterRead(ctx, d, config) +} + func resourceNcloudNKSClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { config := meta.(*ProviderConfig) if !config.SupportVPC { @@ -252,7 +503,7 @@ func resourceNcloudNKSClusterDelete(ctx context.Context, d *schema.ResourceData, func waitForNKSClusterDeletion(ctx context.Context, d *schema.ResourceData, config *ProviderConfig) error { stateConf := &resource.StateChangeConf{ Pending: []string{NKSStatusDeletingCode}, - Target: []string{NKSStatusNullCode}, + Target: []string{NKSStatusNullCode, NKSStatusRunningCode}, // ToDo: remove runnig status after external autoscaler callback removed. Refresh: func() (result interface{}, state string, err error) { cluster, err := getNKSClusterFromList(ctx, config, d.Id()) if err != nil { @@ -265,7 +516,7 @@ func waitForNKSClusterDeletion(ctx context.Context, d *schema.ResourceData, conf }, Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 3 * time.Second, - Delay: 2 * time.Second, + Delay: 5 * time.Second, } if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NKS Cluster (%s) to become terminating: %s", d.Id(), err) @@ -290,7 +541,7 @@ func waitForNKSClusterActive(ctx context.Context, d *schema.ResourceData, config }, Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 3 * time.Second, - Delay: 2 * time.Second, + Delay: 5 * time.Second, } if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("error waiting for NKS Cluster (%s) to become activating: %s", uuid, err) @@ -307,6 +558,28 @@ func getNKSCluster(ctx context.Context, config *ProviderConfig, uuid string) (*v return resp.Cluster, nil } +func getOIDCSpec(ctx context.Context, config *ProviderConfig, uuid string) (*vnks.OidcRes, error) { + + resp, err := config.Client.vnks.V2Api.ClustersUuidOidcGet(ctx, &uuid) + if err != nil { + return nil, err + } + return resp, nil +} + +func getIPAcl(ctx context.Context, config *ProviderConfig, uuid string) (*vnks.IpAclsRes, error) { + + if checkFinSite(config) { + return &vnks.IpAclsRes{}, nil + } + + resp, err := config.Client.vnks.V2Api.ClustersUuidIpAclGet(ctx, &uuid) + if err != nil { + return nil, err + } + return resp, nil +} + func getNKSClusterFromList(ctx context.Context, config *ProviderConfig, uuid string) (*vnks.Cluster, error) { clusters, err := getNKSClusters(ctx, config) if err != nil { @@ -327,3 +600,42 @@ func getNKSClusters(ctx context.Context, config *ProviderConfig) ([]*vnks.Cluste } return resp.Clusters, nil } + +func getSubnetDiff(oldList interface{}, newList interface{}) (added []*int32, removed []*int32) { + oldMap := make(map[string]int) + newMap := make(map[string]int) + + for _, v := range expandStringInterfaceList(oldList.(([]interface{}))) { + oldMap[*v] += 1 + } + for _, v := range expandStringInterfaceList(newList.(([]interface{}))) { + newMap[*v] += 1 + } + + for subnet, _ := range oldMap { + if _, exist := newMap[subnet]; !exist { + intV, err := strconv.Atoi(subnet) + if err == nil { + removed = append(removed, ncloud.Int32(int32(intV))) + } + } + } + + for subnet, _ := range newMap { + if _, exist := oldMap[subnet]; !exist { + intV, err := strconv.Atoi(subnet) + if err == nil { + added = append(added, ncloud.Int32(int32(intV))) + } + } + } + return +} + +func checkFinSite(config *ProviderConfig) (result bool) { + ncloudApiGw := os.Getenv("NCLOUD_API_GW") + if config.Site == "fin" || strings.HasSuffix(ncloudApiGw, "apigw.fin-ntruss.com") { + result = true + } + return +} diff --git a/ncloud/resource_ncloud_nks_cluster_test.go b/ncloud/resource_ncloud_nks_cluster_test.go index 0823599ec..ed545c744 100644 --- a/ncloud/resource_ncloud_nks_cluster_test.go +++ b/ncloud/resource_ncloud_nks_cluster_test.go @@ -32,7 +32,7 @@ func TestAccResourceNcloudNKSCluster_basic(t *testing.T) { CheckDestroy: testAccCheckNKSClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region), + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), Check: resource.ComposeTestCheckFunc( testAccCheckNKSClusterExists(resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "name", name), @@ -41,6 +41,14 @@ func TestAccResourceNcloudNKSCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "login_key_name", TF_TEST_NKS_LOGIN_KEY), resource.TestCheckResourceAttr(resourceName, "zone", fmt.Sprintf("%s-1", region)), resource.TestMatchResourceAttr(resourceName, "vpc_no", regexp.MustCompile(`^\d+$`)), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), ), }, { @@ -99,7 +107,238 @@ func TestAccResourceNcloudNKSCluster_InvalidSubnet(t *testing.T) { }) } -func testAccResourceNcloudNKSClusterConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string) string { +func TestAccResourceNcloudNKSCluster_Update(t *testing.T) { + var cluster vnks.Cluster + name := "m3-" + getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_NoOIDCSpec(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), + ), + }, + }, + }) +} + +func TestAccResourceNcloudNKSCluster_UpdateOnce(t *testing.T) { + var cluster vnks.Cluster + name := "m3-" + getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), + resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), + resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), + ), + }, + }, + }) +} + +func TestAccResourceNcloudNKSCluster_VersionUpgrade(t *testing.T) { + var cluster vnks.Cluster + name := "m3-" + getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, "1.25.8-nks.1", TF_TEST_NKS_LOGIN_KEY, region, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), + ), + }, + }, + }) +} + +func TestAccResourceNcloudNKSCluster_OIDCSpec(t *testing.T) { + var cluster vnks.Cluster + name := getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_NoOIDCSpec(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "oidc.#", "0"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "oidc.0.issuer_url", "https://keycloak.ncp.gimmetm.net/realms/nks"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.client_id", "nks-client"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_claim", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.username_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_claim", "groups"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.groups_prefix", "oidc:"), + resource.TestCheckResourceAttr(resourceName, "oidc.0.required_claim", "iss=https://keycloak.ncp.gimmetm.net/realms/nks"), + ), + }, + }, + }) +} + +func TestAccResourceNcloudNKSCluster_AuditLog(t *testing.T) { + var cluster vnks.Cluster + name := getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "false"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "log.0.audit", "true"), + ), + }, + }, + }) +} + +func TestAccResourceNcloudNKSCluster_AddSubnet(t *testing.T) { + var cluster vnks.Cluster + name := getTestClusterName() + + region, clusterType, _, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_cluster.cluster" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSClusterConfig(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSCluster_AddSubnet(name, clusterType, k8sVersion, TF_TEST_NKS_LOGIN_KEY, region, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "subnet_no_list.#", "3"), + ), + Destroy: false, + }, + }, + }) +} + +func testAccResourceNcloudNKSClusterConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { return fmt.Sprintf(` resource "ncloud_vpc" "vpc" { name = "%[1]s" @@ -149,8 +388,20 @@ resource "ncloud_nks_cluster" "cluster" { ] vpc_no = ncloud_vpc.vpc.vpc_no zone = "%[5]s-1" + log { + audit = %[6]t + } + oidc { + issuer_url = "https://keycloak.ncp.gimmetm.net/realms/nks" + client_id = "nks-client" + username_claim = "preferred_username" + username_prefix = "oidc:" + groups_claim = "groups" + groups_prefix = "oidc:" + required_claim = "iss=https://keycloak.ncp.gimmetm.net/realms/nks" + } } -`, name, clusterType, k8sVersion, loginKeyName, region) +`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) } func testAccResourceNcloudNKSClusterPublicNetworkConfig(name string, clusterType string, k8sVersion string, loginKeyName string, region string) string { @@ -265,6 +516,131 @@ resource "ncloud_nks_cluster" "cluster" { `, name, clusterType, k8sVersion, loginKeyName, region) } +func testAccResourceNcloudNKSCluster_NoOIDCSpec(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { + return fmt.Sprintf(` +resource "ncloud_vpc" "vpc" { + name = "%[1]s" + ipv4_cidr_block = "10.2.0.0/16" +} + +resource "ncloud_subnet" "subnet1" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-1" + subnet = "10.2.1.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "GEN" +} + +resource "ncloud_subnet" "subnet2" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-2" + subnet = "10.2.2.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "GEN" +} + +resource "ncloud_subnet" "subnet_lb" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-lb" + subnet = "10.2.100.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "LOADB" +} + +resource "ncloud_nks_cluster" "cluster" { + name = "%[1]s" + cluster_type = "%[2]s" + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = ncloud_subnet.subnet_lb.id + kube_network_plugin = "cilium" + subnet_no_list = [ + ncloud_subnet.subnet1.id, + ncloud_subnet.subnet2.id, + ] + vpc_no = ncloud_vpc.vpc.vpc_no + zone = "%[5]s-1" + log { + audit = "%[6]t" + } +} +`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) +} + +func testAccResourceNcloudNKSCluster_AddSubnet(name string, clusterType string, k8sVersion string, loginKeyName string, region string, auditLog bool) string { + return fmt.Sprintf(` +resource "ncloud_vpc" "vpc" { + name = "%[1]s" + ipv4_cidr_block = "10.2.0.0/16" +} + +resource "ncloud_subnet" "subnet1" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-1" + subnet = "10.2.1.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "GEN" +} + +resource "ncloud_subnet" "subnet2" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-2" + subnet = "10.2.2.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "GEN" +} + +resource "ncloud_subnet" "subnet3" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-3" + subnet = "10.2.4.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "GEN" +} + +resource "ncloud_subnet" "subnet_lb" { + vpc_no = ncloud_vpc.vpc.vpc_no + name = "%[1]s-lb" + subnet = "10.2.100.0/24" + zone = "%[5]s-1" + network_acl_no = ncloud_vpc.vpc.default_network_acl_no + subnet_type = "PRIVATE" + usage_type = "LOADB" +} + +resource "ncloud_nks_cluster" "cluster" { + name = "%[1]s" + cluster_type = "%[2]s" + k8s_version = "%[3]s" + login_key_name = "%[4]s" + lb_private_subnet_no = ncloud_subnet.subnet_lb.id + kube_network_plugin = "cilium" + subnet_no_list = [ + ncloud_subnet.subnet1.id, + ncloud_subnet.subnet2.id, + ncloud_subnet.subnet3.id, + ] + vpc_no = ncloud_vpc.vpc.vpc_no + zone = "%[5]s-1" + log { + audit = "%[6]t" + } +} +`, name, clusterType, k8sVersion, loginKeyName, region, auditLog) +} + func testAccCheckNKSClusterExists(n string, cluster *vnks.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -326,7 +702,7 @@ func getRegionAndNKSType() (region string, clusterType string, productType strin clusterType = "SVR.VNKS.STAND.C002.M008.NET.SSD.B050.G002" productType = "SVR.VSVR.STAND.C002.M008.NET.SSD.B050.G002" } - k8sVersion = "1.23.9-nks.1" + k8sVersion = "1.24.10-nks.1" return } diff --git a/ncloud/resource_ncloud_nks_node_pool.go b/ncloud/resource_ncloud_nks_node_pool.go index 62d2fe2fd..c1fef6ad6 100644 --- a/ncloud/resource_ncloud_nks_node_pool.go +++ b/ncloud/resource_ncloud_nks_node_pool.go @@ -6,6 +6,7 @@ import ( "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/ncloud" "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -20,10 +21,13 @@ func init() { } const ( - NKSNodePoolStatusRunCode = "RUN" - NKSNodePoolStatusNodeScaleDown = "NODE_SCALE_DOWN" - NKSNodePoolStatusNodeScaleOut = "NODE_SCALE_OUT" - NKSNodePoolIDSeparator = ":" + NKSNodePoolStatusRunCode = "RUN" + NKSNodePoolStatusNodeScaleDown = "NODE_SCALE_DOWN" + NKSNodePoolStatusNodeScaleOut = "NODE_SCALE_OUT" + NKSNodePoolStatusRotateNodeScaleOut = "ROTATE_NODE_SCALE_OUT" + NKSNodePoolStatusRotateNodeScaleDown = "ROTATE_NODE_SCALE_DOWN" + NKSNodePoolStatusUpgrade = "UPGRADE" + NKSNodePoolIDSeparator = ":" ) func resourceNcloudNKSNodePool() *schema.Resource { @@ -40,6 +44,21 @@ func resourceNcloudNKSNodePool() *schema.Resource { Update: schema.DefaultTimeout(DefaultCreateTimeout), Delete: schema.DefaultTimeout(DefaultCreateTimeout), }, + CustomizeDiff: customdiff.Sequence( + // add subnet nubmer to subnet_no_list when using deprecated subnet_no parameter. + customdiff.IfValue( + "subnet_no", + func(ctx context.Context, subnetNo, meta interface{}) bool { + return subnetNo.(string) != "" + }, + func(_ context.Context, d *schema.ResourceDiff, _ interface{}) error { + if _, ok := d.GetOk("subnet_no_list"); !ok { + subnetNo := d.Get("subnet_no").(string) + return d.SetNew("subnet_no_list", []*string{ncloud.String(subnetNo)}) + } + return nil + }), + ), Schema: map[string]*schema.Schema{ "cluster_uuid": { Type: schema.TypeString, @@ -53,6 +72,7 @@ func resourceNcloudNKSNodePool() *schema.Resource { "k8s_version": { Type: schema.TypeString, Computed: true, + Optional: true, }, "node_pool_name": { Type: schema.TypeString, @@ -65,13 +85,32 @@ func resourceNcloudNKSNodePool() *schema.Resource { Required: true, }, "subnet_no": { - Type: schema.TypeString, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "use 'subnet_no_list' instead", + ConflictsWith: []string{"subnet_no_list"}, + }, + "subnet_no_list": { + Type: schema.TypeList, Optional: true, Computed: true, + ForceNew: true, + MaxItems: 5, + MinItems: 0, + Elem: &schema.Schema{Type: schema.TypeString}, }, "product_code": { Type: schema.TypeString, Required: true, + ForceNew: true, + }, + "software_code": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, }, "autoscale": { Type: schema.TypeList, @@ -150,10 +189,14 @@ func resourceNcloudNKSNodePoolCreate(ctx context.Context, d *schema.ResourceData id := NodePoolCreateResourceID(clusterUuid, nodePoolName) reqParams := &vnks.NodePoolCreationBody{ - Name: ncloud.String(nodePoolName), - NodeCount: Int32PtrOrNil(d.GetOk("node_count")), - ProductCode: StringPtrOrNil(d.GetOk("product_code")), - SubnetNo: getInt32FromString(d.GetOk("subnet_no")), + Name: ncloud.String(nodePoolName), + NodeCount: Int32PtrOrNil(d.GetOk("node_count")), + ProductCode: StringPtrOrNil(d.GetOk("product_code")), + SoftwareCode: StringPtrOrNil(d.GetOk("software_code")), + } + + if list, ok := d.GetOk("subnet_no_list"); ok { + reqParams.SubnetNoList = expandStringInterfaceListToInt32List(list.([]interface{})) } if _, ok := d.GetOk("autoscale"); ok { @@ -161,7 +204,7 @@ func resourceNcloudNKSNodePoolCreate(ctx context.Context, d *schema.ResourceData } logCommonRequest("resourceNcloudNKSNodePoolCreate", reqParams) - err := config.Client.vnks.V2Api.ClustersUuidNodePoolPost(ctx, reqParams, ncloud.String(clusterUuid)) + _, err := config.Client.vnks.V2Api.ClustersUuidNodePoolPost(ctx, reqParams, ncloud.String(clusterUuid)) if err != nil { logErrorResponse("resourceNcloudNKSNodePoolCreate", err, reqParams) return diag.FromErr(err) @@ -197,16 +240,20 @@ func resourceNcloudNKSNodePoolRead(ctx context.Context, d *schema.ResourceData, d.Set("instance_no", strconv.Itoa(int(ncloud.Int32Value(nodePool.InstanceNo)))) d.Set("node_pool_name", nodePool.Name) d.Set("product_code", nodePool.ProductCode) + d.Set("software_code", nodePool.SoftwareCode) d.Set("node_count", nodePool.NodeCount) d.Set("k8s_version", nodePool.K8sVersion) - if len(nodePool.SubnetNoList) > 0 { - d.Set("subnet_no", strconv.Itoa(int(ncloud.Int32Value(nodePool.SubnetNoList[0])))) - } if err := d.Set("autoscale", flattenNKSNodePoolAutoScale(nodePool.Autoscale)); err != nil { log.Printf("[WARN] Error setting Autoscale set for (%s): %s", d.Id(), err) } + if len(nodePool.SubnetNoList) > 0 { + if err := d.Set("subnet_no_list", flattenInt32ListToStringList(nodePool.SubnetNoList)); err != nil { + log.Printf("[WARN] Error setting subnet no list set for (%s): %s", d.Id(), err) + } + } + nodes, err := getNKSNodePoolWorkerNodes(ctx, config, clusterUuid, nodePoolName) if err != nil { return diag.FromErr(err) @@ -231,6 +278,26 @@ func resourceNcloudNKSNodePoolUpdate(ctx context.Context, d *schema.ResourceData } instanceNo := StringPtrOrNil(d.GetOk("instance_no")) + k8sVersion := StringPtrOrNil(d.GetOk("k8s_version")) + + if d.HasChanges("k8s_version") { + + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + + // Nodepool UPGRADE + _, err = config.Client.vnks.V2Api.ClustersUuidNodePoolInstanceNoUpgradePatch(ctx, ncloud.String(clusterUuid), instanceNo, k8sVersion, map[string]interface{}{}) + if err != nil { + logErrorResponse("resourceNcloudNKSNodepoolUpgrade", err, k8sVersion) + return diag.FromErr(err) + } + + logResponse("resourceNcloudNKSNodepoolUpgrade", k8sVersion) + if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { + return diag.FromErr(err) + } + } if d.HasChanges("node_count", "autoscale") { if err := waitForNKSNodePoolActive(ctx, d, config, clusterUuid, nodePoolName); err != nil { @@ -255,6 +322,7 @@ func resourceNcloudNKSNodePoolUpdate(ctx context.Context, d *schema.ResourceData return diag.FromErr(err) } } + return resourceNcloudNKSNodePoolRead(ctx, d, config) } @@ -312,7 +380,7 @@ func waitForNKSNodePoolDeletion(ctx context.Context, d *schema.ResourceData, con }, Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 3 * time.Second, - Delay: 2 * time.Second, + Delay: 5 * time.Second, } if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("Error waiting for NKS NodePool (%s) to become terminating: %s", d.Id(), err) @@ -322,7 +390,7 @@ func waitForNKSNodePoolDeletion(ctx context.Context, d *schema.ResourceData, con func waitForNKSNodePoolActive(ctx context.Context, d *schema.ResourceData, config *ProviderConfig, clusterUuid string, nodePoolName string) error { stateConf := &resource.StateChangeConf{ - Pending: []string{NKSStatusCreatingCode, NKSNodePoolStatusNodeScaleOut, NKSNodePoolStatusNodeScaleDown}, + Pending: []string{NKSStatusCreatingCode, NKSNodePoolStatusNodeScaleOut, NKSNodePoolStatusNodeScaleDown, NKSNodePoolStatusUpgrade, NKSNodePoolStatusRotateNodeScaleOut, NKSNodePoolStatusRotateNodeScaleDown}, Target: []string{NKSNodePoolStatusRunCode}, Refresh: func() (result interface{}, state string, err error) { np, err := getNKSNodePool(ctx, config, clusterUuid, nodePoolName) @@ -337,7 +405,7 @@ func waitForNKSNodePoolActive(ctx context.Context, d *schema.ResourceData, confi }, Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 3 * time.Second, - Delay: 2 * time.Second, + Delay: 5 * time.Second, } if _, err := stateConf.WaitForStateContext(ctx); err != nil { return fmt.Errorf("error waiting for NKS NodePool (%s) to become activating: %s", nodePoolName, err) @@ -345,7 +413,7 @@ func waitForNKSNodePoolActive(ctx context.Context, d *schema.ResourceData, confi return nil } -func getNKSNodePool(ctx context.Context, config *ProviderConfig, uuid string, nodePoolName string) (*vnks.NodePoolRes, error) { +func getNKSNodePool(ctx context.Context, config *ProviderConfig, uuid string, nodePoolName string) (*vnks.NodePool, error) { nps, err := getNKSNodePools(ctx, config, uuid) if err != nil { return nil, err @@ -358,7 +426,7 @@ func getNKSNodePool(ctx context.Context, config *ProviderConfig, uuid string, no return nil, nil } -func getNKSNodePools(ctx context.Context, config *ProviderConfig, uuid string) ([]*vnks.NodePoolRes, error) { +func getNKSNodePools(ctx context.Context, config *ProviderConfig, uuid string) ([]*vnks.NodePool, error) { resp, err := config.Client.vnks.V2Api.ClustersUuidNodePoolGet(ctx, ncloud.String(uuid)) if err != nil { return nil, err diff --git a/ncloud/resource_ncloud_nks_node_pool_test.go b/ncloud/resource_ncloud_nks_node_pool_test.go index 679a33db9..c67c71fec 100644 --- a/ncloud/resource_ncloud_nks_node_pool_test.go +++ b/ncloud/resource_ncloud_nks_node_pool_test.go @@ -14,7 +14,7 @@ import ( ) func TestAccResourceNcloudNKSNodePool_basic(t *testing.T) { - var nodePool vnks.NodePoolRes + var nodePool vnks.NodePool clusterName := getTestClusterName() resourceName := "ncloud_nks_node_pool.node_pool" region, clusterType, productCode, k8sVersion := getRegionAndNKSType() @@ -43,7 +43,7 @@ func TestAccResourceNcloudNKSNodePool_basic(t *testing.T) { } func TestAccResourceNcloudNKSNodePool_publicNetwork(t *testing.T) { - var nodePool vnks.NodePoolRes + var nodePool vnks.NodePool clusterName := getTestClusterName() resourceName := "ncloud_nks_node_pool.node_pool" region, clusterType, productCode, k8sVersion := getRegionAndNKSType() @@ -67,7 +67,7 @@ func TestAccResourceNcloudNKSNodePool_publicNetwork(t *testing.T) { } func TestAccResourceNcloudNKSNodePool_updateNodeCountAndAutoScale(t *testing.T) { - var nodePool vnks.NodePoolRes + var nodePool vnks.NodePool clusterName := getTestClusterName() region, clusterType, productCode, k8sVersion := getRegionAndNKSType() resourceName := "ncloud_nks_node_pool.node_pool" @@ -99,6 +99,36 @@ func TestAccResourceNcloudNKSNodePool_updateNodeCountAndAutoScale(t *testing.T) }) } +func TestAccResourceNcloudNKSNodePool_upgrade(t *testing.T) { + var nodePool vnks.NodePool + clusterName := "m3-" + getTestClusterName() + region, clusterType, productCode, k8sVersion := getRegionAndNKSType() + resourceName := "ncloud_nks_node_pool.node_pool" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNKSNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, k8sVersion, region), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSNodePoolExists(resourceName, &nodePool), + resource.TestCheckResourceAttr(resourceName, "node_count", "1"), + ), + Destroy: false, + }, + { + Config: testAccResourceNcloudNKSNodePoolConfig(clusterName, clusterType, productCode, 1, TF_TEST_NKS_LOGIN_KEY, "1.25.8-nks.1", region), + Check: resource.ComposeTestCheckFunc( + testAccCheckNKSNodePoolExists(resourceName, &nodePool), + resource.TestCheckResourceAttr(resourceName, "k8s_version", "1.25.8-nks.1"), + ), + }, + }, + }) +} + func TestAccResourceNcloudNKSNodePool_invalidNodeCount(t *testing.T) { clusterName := getTestClusterName() region, clusterType, productCode, k8sVersion := getRegionAndNKSType() @@ -161,7 +191,8 @@ resource "ncloud_nks_node_pool" "node_pool" { node_pool_name = "%[1]s" node_count = %[4]d product_code = "%[3]s" - subnet_no = ncloud_subnet.subnet.id + k8s_version = "%[6]s" + subnet_no_list = [ ncloud_subnet.subnet.id] autoscale { enabled = false min = 1 @@ -218,7 +249,7 @@ resource "ncloud_nks_node_pool" "node_pool" { node_pool_name = "%[1]s" node_count = %[4]d product_code = "%[3]s" - subnet_no = ncloud_subnet.subnet.id + subnet_no_list = [ ncloud_subnet.subnet.id] autoscale { enabled = false min = 1 @@ -274,7 +305,7 @@ resource "ncloud_nks_node_pool" "node_pool" { node_pool_name = "%[1]s" node_count = %[4]d product_code = "%[3]s" - subnet_no = ncloud_subnet.subnet.id + subnet_no_list = [ ncloud_subnet.subnet.id] autoscale { enabled = true min = 1 @@ -284,7 +315,7 @@ resource "ncloud_nks_node_pool" "node_pool" { `, name, clusterType, productCode, nodeCount, loginKey, version, region) } -func testAccCheckNKSNodePoolExists(n string, nodePool *vnks.NodePoolRes) resource.TestCheckFunc { +func testAccCheckNKSNodePoolExists(n string, nodePool *vnks.NodePool) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/ncloud/structures.go b/ncloud/structures.go index 79de7d77e..ae76ae02b 100644 --- a/ncloud/structures.go +++ b/ncloud/structures.go @@ -2,6 +2,7 @@ package ncloud import ( "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "reflect" "strconv" @@ -321,7 +322,8 @@ func getInt32FromString(v interface{}, ok bool) *int32 { } } -func expandStringInterfaceListToInt32List(list []interface{}) (res []*int32) { +func expandStringInterfaceListToInt32List(list []interface{}) []*int32 { + res := make([]*int32, 0) for _, v := range list { intV, err := strconv.Atoi(v.(string)) if err == nil { @@ -331,37 +333,158 @@ func expandStringInterfaceListToInt32List(list []interface{}) (res []*int32) { return res } -func flattenInt32ListToStringList(list []*int32) (res []*string) { +func flattenInt32ListToStringList(list []*int32) []*string { + res := make([]*string, 0) for _, v := range list { res = append(res, ncloud.IntString(int(ncloud.Int32Value(v)))) } - return + return res } -func flattenNKSClusterLogInput(logInput *vnks.ClusterLogInput) []map[string]interface{} { +func flattenNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logInput T) []map[string]interface{} { if logInput == nil { return nil } + var audit bool + switch v := any(logInput).(type) { + case *vnks.ClusterLogInput: + audit = ncloud.BoolValue(v.Audit) + case *vnks.AuditLogDto: + audit = ncloud.BoolValue(v.Audit) + default: + return nil + } + return []map[string]interface{}{ { - "audit": ncloud.BoolValue(logInput.Audit), + "audit": audit, }, } } -func expandNKSClusterLogInput(logList []interface{}) *vnks.ClusterLogInput { +func expandNKSClusterLogInput[T *vnks.ClusterLogInput | *vnks.AuditLogDto](logList []interface{}, returnType T) T { if len(logList) == 0 { return nil } log := logList[0].(map[string]interface{}) - return &vnks.ClusterLogInput{ - Audit: ncloud.Bool(log["audit"].(bool)), + switch any(returnType).(type) { + case *vnks.ClusterLogInput: + return T(&vnks.ClusterLogInput{ + Audit: ncloud.Bool(log["audit"].(bool)), + }) + case *vnks.AuditLogDto: + return T(&vnks.AuditLogDto{ + Audit: ncloud.Bool(log["audit"].(bool)), + }) + default: + return nil + } + +} + +func flattenNKSClusterOIDCSpec(oidcSpec *vnks.OidcRes) []map[string]interface{} { + res := make([]map[string]interface{}, 0) + if oidcSpec == nil || !*oidcSpec.Status { + return res + } + + res = []map[string]interface{}{ + { + "issuer_url": ncloud.StringValue(oidcSpec.IssuerURL), + "client_id": ncloud.StringValue(oidcSpec.ClientId), + "username_claim": ncloud.StringValue(oidcSpec.UsernameClaim), + "username_prefix": ncloud.StringValue(oidcSpec.UsernamePrefix), + "groups_claim": ncloud.StringValue(oidcSpec.GroupsClaim), + "groups_prefix": ncloud.StringValue(oidcSpec.GroupsPrefix), + "required_claim": ncloud.StringValue(oidcSpec.RequiredClaim), + }, + } + return res +} + +func expandNKSClusterOIDCSpec(oidc []interface{}) *vnks.UpdateOidcDto { + res := &vnks.UpdateOidcDto{Status: ncloud.Bool(false)} + if len(oidc) == 0 { + return res + } + + oidcSpec := oidc[0].(map[string]interface{}) + if oidcSpec["issuer_url"].(string) != "" && oidcSpec["client_id"].(string) != "" { + res.Status = ncloud.Bool(true) + res.IssuerURL = ncloud.String(oidcSpec["issuer_url"].(string)) + res.ClientId = ncloud.String(oidcSpec["client_id"].(string)) + + usernameClaim, ok := oidcSpec["username_claim"] + if ok { + res.UsernameClaim = ncloud.String(usernameClaim.(string)) + } + usernamePrefix, ok := oidcSpec["username_prefix"] + if ok { + res.UsernamePrefix = ncloud.String(usernamePrefix.(string)) + } + groupsClaim, ok := oidcSpec["groups_claim"] + if ok { + res.GroupsClaim = ncloud.String(groupsClaim.(string)) + } + groupsPrefix, ok := oidcSpec["groups_prefix"] + if ok { + res.GroupsPrefix = ncloud.String(groupsPrefix.(string)) + } + requiredClaims, ok := oidcSpec["required_claim"] + if ok { + res.RequiredClaim = ncloud.String(requiredClaims.(string)) + } } + + return res } -func flattenNKSNodePoolAutoScale(ao *vnks.AutoscaleOption) (res []map[string]interface{}) { +func flattenNKSClusterIPAclEntries(ipAcl *vnks.IpAclsRes) *schema.Set { + + ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) + + for _, entry := range ipAcl.Entries { + m := map[string]interface{}{ + "action": *entry.Action, + "address": *entry.Address, + } + if entry.Comment != nil { + m["comment"] = *entry.Comment + } + ipAclList.Add(m) + } + + return ipAclList + +} + +func expandNKSClusterIPAcl(acl interface{}) []*vnks.IpAclsEntriesDto { + if acl == nil { + return nil + } + + set := acl.(*schema.Set) + res := make([]*vnks.IpAclsEntriesDto, 0) + for _, raw := range set.List() { + entry := raw.(map[string]interface{}) + + add := &vnks.IpAclsEntriesDto{ + Address: ncloud.String(entry["address"].(string)), + Action: ncloud.String(entry["action"].(string)), + } + if comment, exist := entry["comment"].(string); exist { + add.Comment = ncloud.String(comment) + } + res = append(res, add) + } + + return res +} + +func flattenNKSNodePoolAutoScale(ao *vnks.AutoscaleOption) []map[string]interface{} { + res := make([]map[string]interface{}, 0) if ao == nil { - return + return res } m := map[string]interface{}{ "enabled": ncloud.BoolValue(ao.Enabled), @@ -369,7 +492,7 @@ func flattenNKSNodePoolAutoScale(ao *vnks.AutoscaleOption) (res []map[string]int "max": ncloud.Int32Value(ao.Max), } res = append(res, m) - return + return res } func expandNKSNodePoolAutoScale(as []interface{}) *vnks.AutoscalerUpdate { @@ -384,9 +507,10 @@ func expandNKSNodePoolAutoScale(as []interface{}) *vnks.AutoscalerUpdate { } } -func flattenNKSWorkerNodes(wns []*vnks.WorkerNode) (res []map[string]interface{}) { +func flattenNKSWorkerNodes(wns []*vnks.WorkerNode) []map[string]interface{} { + res := make([]map[string]interface{}, 0) if wns == nil { - return + return res } for _, wn := range wns { m := map[string]interface{}{ @@ -402,7 +526,7 @@ func flattenNKSWorkerNodes(wns []*vnks.WorkerNode) (res []map[string]interface{} res = append(res, m) } - return + return res } func expandSourceBuildEnvVarsParams(eVars []interface{}) ([]*sourcebuild.ProjectEnvEnvVars, error) { diff --git a/ncloud/structures_test.go b/ncloud/structures_test.go index 7a2fb48e4..b7d8188e0 100644 --- a/ncloud/structures_test.go +++ b/ncloud/structures_test.go @@ -2,6 +2,7 @@ package ncloud import ( "github.com/NaverCloudPlatform/ncloud-sdk-go-v2/services/vnks" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "reflect" "testing" @@ -737,7 +738,7 @@ func TestExpandNKSClusterLogInput(t *testing.T) { }, } - result := expandNKSClusterLogInput(log) + result := expandNKSClusterLogInput(log, &vnks.AuditLogDto{}) if result == nil { t.Fatal("result was nil") @@ -748,6 +749,148 @@ func TestExpandNKSClusterLogInput(t *testing.T) { } } +func TestFlattenNKSClusterOIDCSpec(t *testing.T) { + oidcSpec := &vnks.OidcRes{ + Status: ncloud.Bool(true), + UsernameClaim: ncloud.String("email"), + UsernamePrefix: ncloud.String("username:"), + IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), + ClientId: ncloud.String("testClient"), + GroupsPrefix: ncloud.String("groups:"), + GroupsClaim: ncloud.String("group"), + RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), + } + + result := flattenNKSClusterOIDCSpec(oidcSpec) + + if len(result) == 0 { + t.Fatal("empty result") + } + + r := result[0] + + if r["username_claim"].(string) != "email" { + t.Fatalf("expected result username_claim to be 'email', but was %v", r["username_claim"]) + } + + if r["username_prefix"].(string) != "username:" { + t.Fatalf("expected result username_prefix to be 'username:', but was %v", r["username_prefix"]) + } + + if r["issuer_url"].(string) != "https://sso.ntruss.com/iss" { + t.Fatalf("expected result issuer_url to be 'https://sso.ntruss.com/iss', but was %v", r["issuer_url"]) + } + + if r["client_id"].(string) != "testClient" { + t.Fatalf("expected result client_id to be 'testClient', but was %v", r["client_id"]) + } + + if r["groups_claim"].(string) != "group" { + t.Fatalf("expected result groups_claim to be 'group', but was %v", r["groups_claim"]) + } + + if r["groups_prefix"].(string) != "groups:" { + t.Fatalf("expected result groups_prefix to be 'groups:', but was %v", r["groups_prefix"]) + } + + if r["required_claim"].(string) != "iss=https://sso.ntruss.com/iss" { + t.Fatalf("expected result groups_prefix to be 'iss=https://sso.ntruss.com/iss', but was %v", r["required_claim"]) + } +} + +func TestExpandNKSClusterOIDCSpec(t *testing.T) { + oidc := []interface{}{ + map[string]interface{}{ + "issuer_url": "https://sso.ntruss.com/iss", + "client_id": "testClient", + "username_claim": "email", + "username_prefix": "username:", + "groups_claim": "group", + "groups_prefix": "groups:", + "required_claim": "iss=https://sso.ntruss.com/iss", + }, + } + + result := expandNKSClusterOIDCSpec(oidc) + + if result == nil { + t.Fatal("result was nil") + } + + expected := &vnks.UpdateOidcDto{ + Status: ncloud.Bool(true), + IssuerURL: ncloud.String("https://sso.ntruss.com/iss"), + ClientId: ncloud.String("testClient"), + UsernameClaim: ncloud.String("email"), + UsernamePrefix: ncloud.String("username:"), + GroupsClaim: ncloud.String("group"), + GroupsPrefix: ncloud.String("groups:"), + RequiredClaim: ncloud.String("iss=https://sso.ntruss.com/iss"), + } + + if reflect.DeepEqual(oidc, expected) != false { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + +func TestFlattenNKSClusterIPAcl(t *testing.T) { + ipAcl := &vnks.IpAclsRes{ + DefaultAction: ncloud.String("deny"), + Entries: []*vnks.IpAclsEntriesRes{ + {Address: ncloud.String("10.0.1.0/24"), + Action: ncloud.String("allow"), + Comment: ncloud.String("master ip"), + }, + }, + } + + result := flattenNKSClusterIPAclEntries(ipAcl) + + if len(result.List()) == 0 { + t.Fatal("empty result") + } + + r := result.List()[0] + rr := r.(map[string]interface{}) + if rr["address"].(string) != "10.0.1.0/24" { + t.Fatalf("expected result address to be '10.0.1.0/24', but was %v", rr["address"]) + } + + if rr["action"].(string) != "allow" { + t.Fatalf("expected result action to be 'allow', but was %v", rr["action"]) + } + + if rr["comment"].(string) != "master ip" { + t.Fatalf("expected result comment to be 'master ip', but was %v", rr["comment"]) + } +} + +func TestExpandNKSClusterIPAcl(t *testing.T) { + ipAclList := schema.NewSet(schema.HashResource(resourceNcloudNKSCluster().Schema["ip_acl"].Elem.(*schema.Resource)), []interface{}{}) + + ipAclList.Add(map[string]interface{}{ + "action": "allow", + "address": "10.0.1.0/24", + "comment": "master ip", + }) + + result := expandNKSClusterIPAcl(ipAclList) + + if result == nil { + t.Fatal("result was nil") + } + + expected := &vnks.IpAclsEntriesDto{ + Address: ncloud.String("10.0.1.0/24"), + Action: ncloud.String("allow"), + Comment: ncloud.String("maseter ip"), + } + + if reflect.DeepEqual(result, expected) != false { + t.Fatalf("expected %v , but got %v", expected, result) + } +} + func TestFlattenNKSNodePoolAutoscale(t *testing.T) { expanded := &vnks.AutoscaleOption{ Enabled: ncloud.Bool(true),