diff --git a/oracledatabase_cloud_vmcluster_basic/backing_file.tf b/oracledatabase_cloud_vmcluster_basic/backing_file.tf
new file mode 100644
index 00000000..c60b1199
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_basic/backing_file.tf
@@ -0,0 +1,15 @@
+# This file has some scaffolding to make sure that names are unique and that
+# a region and zone are selected when you try to create your Terraform resources.
+
+locals {
+ name_suffix = "${random_pet.suffix.id}"
+}
+
+resource "random_pet" "suffix" {
+ length = 2
+}
+
+provider "google" {
+ region = "us-central1"
+ zone = "us-central1-c"
+}
diff --git a/oracledatabase_cloud_vmcluster_basic/main.tf b/oracledatabase_cloud_vmcluster_basic/main.tf
new file mode 100644
index 00000000..789a264b
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_basic/main.tf
@@ -0,0 +1,34 @@
+resource "google_oracle_database_cloud_vm_cluster" "my_vmcluster"{
+ cloud_vm_cluster_id = "my-instance-${local.name_suffix}"
+ display_name = "my-instance-${local.name_suffix} displayname"
+ location = "us-east4"
+ project = "my-project-${local.name_suffix}"
+ exadata_infrastructure = google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id
+ network = data.google_compute_network.default.id
+ cidr = "10.5.0.0/24"
+ backup_subnet_cidr = "10.6.0.0/24"
+ properties {
+ license_type = "LICENSE_INCLUDED"
+ ssh_public_keys = ["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1X2744t+6vRLmE5u6nHi6/QWh8bQDgHmd+OIxRQIGA/IWUtCs2FnaCNZcqvZkaeyjk5v0lTA/n+9jvO42Ipib53athrfVG8gRt8fzPL66C6ZqHq+6zZophhrCdfJh/0G4x9xJh5gdMprlaCR1P8yAaVvhBQSKGc4SiIkyMNBcHJ5YTtMQMTfxaB4G1sHZ6SDAY9a6Cq/zNjDwfPapWLsiP4mRhE5SSjJX6l6EYbkm0JeLQg+AbJiNEPvrvDp1wtTxzlPJtIivthmLMThFxK7+DkrYFuLvN5AHUdo9KTDLvHtDCvV70r8v0gafsrKkM/OE9Jtzoo0e1N/5K/ZdyFRbAkFT4QSF3nwpbmBWLf2Evg//YyEuxnz4CwPqFST2mucnrCCGCVWp1vnHZ0y30nM35njLOmWdRDFy5l27pKUTwLp02y3UYiiZyP7d3/u5pKiN4vC27VuvzprSdJxWoAvluOiDeRh+/oeQDowxoT/Oop8DzB9uJmjktXw8jyMW2+Rpg+ENQqeNgF1OGlEzypaWiRskEFlkpLb4v/s3ZDYkL1oW0Nv/J8LTjTOTEaYt2Udjoe9x2xWiGnQixhdChWuG+MaoWffzUgx1tsVj/DBXijR5DjkPkrA1GA98zd3q8GKEaAdcDenJjHhNYSd4+rE9pIsnYn7fo5X/tFfcQH1XQ== nobody@google.com"]
+ cpu_core_count = "4"
+ gi_version = "19.0.0.0"
+ hostname_prefix = "hostname1"
+ }
+}
+
+resource "google_oracle_database_cloud_exadata_infrastructure" "cloudExadataInfrastructures"{
+ cloud_exadata_infrastructure_id = "my-exadata-${local.name_suffix}"
+ display_name = "my-exadata-${local.name_suffix} displayname"
+ location = "us-east4"
+ project = "my-project-${local.name_suffix}"
+ properties {
+ shape = "Exadata.X9M"
+ compute_count= "2"
+ storage_count= "3"
+ }
+}
+
+data "google_compute_network" "default" {
+ name = "new"
+ project = "my-project-${local.name_suffix}"
+}
diff --git a/oracledatabase_cloud_vmcluster_basic/motd b/oracledatabase_cloud_vmcluster_basic/motd
new file mode 100644
index 00000000..45a906e8
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_basic/motd
@@ -0,0 +1,7 @@
+===
+
+These examples use real resources that will be billed to the
+Google Cloud Platform project you use - so make sure that you
+run "terraform destroy" before quitting!
+
+===
diff --git a/oracledatabase_cloud_vmcluster_basic/tutorial.md b/oracledatabase_cloud_vmcluster_basic/tutorial.md
new file mode 100644
index 00000000..e48c56c8
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_basic/tutorial.md
@@ -0,0 +1,79 @@
+# Oracledatabase Cloud Vmcluster Basic - Terraform
+
+## Setup
+
+
+
+Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform.
+
+
+
+Terraform provisions real GCP resources, so anything you create in this session will be billed against this project.
+
+## Terraforming!
+
+Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command
+to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up
+the project name from the environment variable.
+
+```bash
+export GOOGLE_CLOUD_PROJECT={{project-id}}
+```
+
+After that, let's get Terraform started. Run the following to pull in the providers.
+
+```bash
+terraform init
+```
+
+With the providers downloaded and a project set, you're ready to use Terraform. Go ahead!
+
+```bash
+terraform apply
+```
+
+Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan.
+
+```bash
+yes
+```
+
+
+## Post-Apply
+
+### Editing your config
+
+Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed.
+
+```bash
+terraform plan
+```
+
+So let's make a change! Try editing a number, or appending a value to the name in the editor. Then,
+run a 'plan' again.
+
+```bash
+terraform plan
+```
+
+Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes
+at the 'yes' prompt.
+
+```bash
+terraform apply
+```
+
+```bash
+yes
+```
+
+## Cleanup
+
+Run the following to remove the resources Terraform provisioned:
+
+```bash
+terraform destroy
+```
+```bash
+yes
+```
diff --git a/oracledatabase_cloud_vmcluster_full/backing_file.tf b/oracledatabase_cloud_vmcluster_full/backing_file.tf
new file mode 100644
index 00000000..c60b1199
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_full/backing_file.tf
@@ -0,0 +1,15 @@
+# This file has some scaffolding to make sure that names are unique and that
+# a region and zone are selected when you try to create your Terraform resources.
+
+locals {
+ name_suffix = "${random_pet.suffix.id}"
+}
+
+resource "random_pet" "suffix" {
+ length = 2
+}
+
+provider "google" {
+ region = "us-central1"
+ zone = "us-central1-c"
+}
diff --git a/oracledatabase_cloud_vmcluster_full/main.tf b/oracledatabase_cloud_vmcluster_full/main.tf
new file mode 100644
index 00000000..c0fbb54e
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_full/main.tf
@@ -0,0 +1,61 @@
+resource "google_oracle_database_cloud_vm_cluster" "my_vmcluster"{
+ cloud_vm_cluster_id = "my-instance-${local.name_suffix}"
+ display_name = "my-instance-${local.name_suffix} displayname"
+ location = "us-east4"
+ project = "my-project-${local.name_suffix}"
+ exadata_infrastructure = google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.id
+ network = data.google_compute_network.default.id
+ cidr = "10.5.0.0/24"
+ backup_subnet_cidr = "10.6.0.0/24"
+ labels = {
+ label-one = "value-one"
+ }
+ properties {
+ license_type = "LICENSE_INCLUDED"
+ ssh_public_keys = ["ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCz1X2744t+6vRLmE5u6nHi6/QWh8bQDgHmd+OIxRQIGA/IWUtCs2FnaCNZcqvZkaeyjk5v0lTA/n+9jvO42Ipib53athrfVG8gRt8fzPL66C6ZqHq+6zZophhrCdfJh/0G4x9xJh5gdMprlaCR1P8yAaVvhBQSKGc4SiIkyMNBcHJ5YTtMQMTfxaB4G1sHZ6SDAY9a6Cq/zNjDwfPapWLsiP4mRhE5SSjJX6l6EYbkm0JeLQg+AbJiNEPvrvDp1wtTxzlPJtIivthmLMThFxK7+DkrYFuLvN5AHUdo9KTDLvHtDCvV70r8v0gafsrKkM/OE9Jtzoo0e1N/5K/ZdyFRbAkFT4QSF3nwpbmBWLf2Evg//YyEuxnz4CwPqFST2mucnrCCGCVWp1vnHZ0y30nM35njLOmWdRDFy5l27pKUTwLp02y3UYiiZyP7d3/u5pKiN4vC27VuvzprSdJxWoAvluOiDeRh+/oeQDowxoT/Oop8DzB9uJmjktXw8jyMW2+Rpg+ENQqeNgF1OGlEzypaWiRskEFlkpLb4v/s3ZDYkL1oW0Nv/J8LTjTOTEaYt2Udjoe9x2xWiGnQixhdChWuG+MaoWffzUgx1tsVj/DBXijR5DjkPkrA1GA98zd3q8GKEaAdcDenJjHhNYSd4+rE9pIsnYn7fo5X/tFfcQH1XQ== nobody@google.com"]
+ cpu_core_count = "4"
+ gi_version = "19.0.0.0"
+ time_zone {
+ id = "UTC"
+ }
+ node_count = "2"
+ ocpu_count = "4.0"
+ data_storage_size_tb = 2
+ db_node_storage_size_gb = 120
+ db_server_ocids = [data.google_oracle_database_db_servers.mydbserver.db_servers.0.properties.0.ocid, data.google_oracle_database_db_servers.mydbserver.db_servers.1.properties.0.ocid]
+ disk_redundancy = "HIGH"
+ sparse_diskgroup_enabled = false
+ local_backup_enabled = false
+ cluster_name = "pq-ppat4"
+ hostname_prefix = "hostname1"
+ diagnostics_data_collection_options {
+ diagnostics_events_enabled = true
+ health_monitoring_enabled = true
+ incident_logs_enabled = true
+ }
+ memory_size_gb = 60
+ }
+}
+
+resource "google_oracle_database_cloud_exadata_infrastructure" "cloudExadataInfrastructures"{
+ cloud_exadata_infrastructure_id = "my-exadata-${local.name_suffix}"
+ display_name = "my-exadata-${local.name_suffix} displayname"
+ location = "us-east4"
+ project = "my-project-${local.name_suffix}"
+ properties {
+ shape = "Exadata.X9M"
+ compute_count= "2"
+ storage_count= "3"
+ }
+}
+
+data "google_compute_network" "default" {
+ name = "new"
+ project = "my-project-${local.name_suffix}"
+}
+
+data "google_oracle_database_db_servers" "mydbserver" {
+ location = "us-east4"
+ project = "my-project-${local.name_suffix}"
+ cloud_exadata_infrastructure = google_oracle_database_cloud_exadata_infrastructure.cloudExadataInfrastructures.cloud_exadata_infrastructure_id
+}
diff --git a/oracledatabase_cloud_vmcluster_full/motd b/oracledatabase_cloud_vmcluster_full/motd
new file mode 100644
index 00000000..45a906e8
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_full/motd
@@ -0,0 +1,7 @@
+===
+
+These examples use real resources that will be billed to the
+Google Cloud Platform project you use - so make sure that you
+run "terraform destroy" before quitting!
+
+===
diff --git a/oracledatabase_cloud_vmcluster_full/tutorial.md b/oracledatabase_cloud_vmcluster_full/tutorial.md
new file mode 100644
index 00000000..29c57ea5
--- /dev/null
+++ b/oracledatabase_cloud_vmcluster_full/tutorial.md
@@ -0,0 +1,79 @@
+# Oracledatabase Cloud Vmcluster Full - Terraform
+
+## Setup
+
+
+
+Welcome to Terraform in Google Cloud Shell! We need you to let us know what project you'd like to use with Terraform.
+
+
+
+Terraform provisions real GCP resources, so anything you create in this session will be billed against this project.
+
+## Terraforming!
+
+Let's use {{project-id}} with Terraform! Click the Cloud Shell icon below to copy the command
+to your shell, and then run it from the shell by pressing Enter/Return. Terraform will pick up
+the project name from the environment variable.
+
+```bash
+export GOOGLE_CLOUD_PROJECT={{project-id}}
+```
+
+After that, let's get Terraform started. Run the following to pull in the providers.
+
+```bash
+terraform init
+```
+
+With the providers downloaded and a project set, you're ready to use Terraform. Go ahead!
+
+```bash
+terraform apply
+```
+
+Terraform will show you what it plans to do, and prompt you to accept. Type "yes" to accept the plan.
+
+```bash
+yes
+```
+
+
+## Post-Apply
+
+### Editing your config
+
+Now you've provisioned your resources in GCP! If you run a "plan", you should see no changes needed.
+
+```bash
+terraform plan
+```
+
+So let's make a change! Try editing a number, or appending a value to the name in the editor. Then,
+run a 'plan' again.
+
+```bash
+terraform plan
+```
+
+Afterwards you can run an apply, which implicitly does a plan and shows you the intended changes
+at the 'yes' prompt.
+
+```bash
+terraform apply
+```
+
+```bash
+yes
+```
+
+## Cleanup
+
+Run the following to remove the resources Terraform provisioned:
+
+```bash
+terraform destroy
+```
+```bash
+yes
+```