From bb386e04ca0206b2fa2485397a468340972ac932 Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Fri, 17 May 2024 18:38:03 +0200 Subject: [PATCH 1/6] refactor: remove unused variables --- proxmox/resource_vm_qemu.go | 120 ++++++++++++++++++------------------ 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index 7d0cb2b4..aed78f8b 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -2088,9 +2088,9 @@ func mapFromStruct_QemuIdeDisks(config *pxapi.QemuIdeDisks) []interface{} { if config == nil { return nil } - ide_0 := mapFromStruct_QemuIdeStorage(config.Disk_0, "ide0") - ide_1 := mapFromStruct_QemuIdeStorage(config.Disk_1, "ide1") - ide_2 := mapFromStruct_QemuIdeStorage(config.Disk_2, "ide2") + ide_0 := mapFromStruct_QemuIdeStorage(config.Disk_0) + ide_1 := mapFromStruct_QemuIdeStorage(config.Disk_1) + ide_2 := mapFromStruct_QemuIdeStorage(config.Disk_2) if ide_0 == nil && ide_1 == nil && ide_2 == nil { return nil } @@ -2103,7 +2103,7 @@ func mapFromStruct_QemuIdeDisks(config *pxapi.QemuIdeDisks) []interface{} { } } -func mapFromStruct_QemuIdeStorage(config *pxapi.QemuIdeStorage, setting string) []interface{} { +func mapFromStruct_QemuIdeStorage(config *pxapi.QemuIdeStorage) []interface{} { if config == nil { return nil } @@ -2157,17 +2157,17 @@ func mapFromStruct_QemuSataDisks(config *pxapi.QemuSataDisks) []interface{} { } return []interface{}{ map[string]interface{}{ - "sata0": mapFromStruct_QemuSataStorage(config.Disk_0, "sata0"), - "sata1": mapFromStruct_QemuSataStorage(config.Disk_1, "sata1"), - "sata2": mapFromStruct_QemuSataStorage(config.Disk_2, "sata2"), - "sata3": mapFromStruct_QemuSataStorage(config.Disk_3, "sata3"), - "sata4": mapFromStruct_QemuSataStorage(config.Disk_4, "sata4"), - "sata5": mapFromStruct_QemuSataStorage(config.Disk_5, "sata5"), + "sata0": mapFromStruct_QemuSataStorage(config.Disk_0), + "sata1": mapFromStruct_QemuSataStorage(config.Disk_1), + "sata2": mapFromStruct_QemuSataStorage(config.Disk_2), + "sata3": mapFromStruct_QemuSataStorage(config.Disk_3), + "sata4": mapFromStruct_QemuSataStorage(config.Disk_4), + "sata5": mapFromStruct_QemuSataStorage(config.Disk_5), }, } } -func mapFromStruct_QemuSataStorage(config *pxapi.QemuSataStorage, setting string) []interface{} { +func mapFromStruct_QemuSataStorage(config *pxapi.QemuSataStorage) []interface{} { if config == nil { return nil } @@ -2221,42 +2221,42 @@ func mapFromStruct_QemuScsiDisks(config *pxapi.QemuScsiDisks) []interface{} { } return []interface{}{ map[string]interface{}{ - "scsi0": mapFromStruct_QemuScsiStorage(config.Disk_0, "scsi0"), - "scsi1": mapFromStruct_QemuScsiStorage(config.Disk_1, "scsi1"), - "scsi2": mapFromStruct_QemuScsiStorage(config.Disk_2, "scsi2"), - "scsi3": mapFromStruct_QemuScsiStorage(config.Disk_3, "scsi3"), - "scsi4": mapFromStruct_QemuScsiStorage(config.Disk_4, "scsi4"), - "scsi5": mapFromStruct_QemuScsiStorage(config.Disk_5, "scsi5"), - "scsi6": mapFromStruct_QemuScsiStorage(config.Disk_6, "scsi6"), - "scsi7": mapFromStruct_QemuScsiStorage(config.Disk_7, "scsi7"), - "scsi8": mapFromStruct_QemuScsiStorage(config.Disk_8, "scsi8"), - "scsi9": mapFromStruct_QemuScsiStorage(config.Disk_9, "scsi9"), - "scsi10": mapFromStruct_QemuScsiStorage(config.Disk_10, "scsi10"), - "scsi11": mapFromStruct_QemuScsiStorage(config.Disk_11, "scsi11"), - "scsi12": mapFromStruct_QemuScsiStorage(config.Disk_12, "scsi12"), - "scsi13": mapFromStruct_QemuScsiStorage(config.Disk_13, "scsi13"), - "scsi14": mapFromStruct_QemuScsiStorage(config.Disk_14, "scsi14"), - "scsi15": mapFromStruct_QemuScsiStorage(config.Disk_15, "scsi15"), - "scsi16": mapFromStruct_QemuScsiStorage(config.Disk_16, "scsi16"), - "scsi17": mapFromStruct_QemuScsiStorage(config.Disk_17, "scsi17"), - "scsi18": mapFromStruct_QemuScsiStorage(config.Disk_18, "scsi18"), - "scsi19": mapFromStruct_QemuScsiStorage(config.Disk_19, "scsi19"), - "scsi20": mapFromStruct_QemuScsiStorage(config.Disk_20, "scsi20"), - "scsi21": mapFromStruct_QemuScsiStorage(config.Disk_21, "scsi21"), - "scsi22": mapFromStruct_QemuScsiStorage(config.Disk_22, "scsi22"), - "scsi23": mapFromStruct_QemuScsiStorage(config.Disk_23, "scsi23"), - "scsi24": mapFromStruct_QemuScsiStorage(config.Disk_24, "scsi24"), - "scsi25": mapFromStruct_QemuScsiStorage(config.Disk_25, "scsi25"), - "scsi26": mapFromStruct_QemuScsiStorage(config.Disk_26, "scsi26"), - "scsi27": mapFromStruct_QemuScsiStorage(config.Disk_27, "scsi27"), - "scsi28": mapFromStruct_QemuScsiStorage(config.Disk_28, "scsi28"), - "scsi29": mapFromStruct_QemuScsiStorage(config.Disk_29, "scsi29"), - "scsi30": mapFromStruct_QemuScsiStorage(config.Disk_30, "scsi30"), + "scsi0": mapFromStruct_QemuScsiStorage(config.Disk_0), + "scsi1": mapFromStruct_QemuScsiStorage(config.Disk_1), + "scsi2": mapFromStruct_QemuScsiStorage(config.Disk_2), + "scsi3": mapFromStruct_QemuScsiStorage(config.Disk_3), + "scsi4": mapFromStruct_QemuScsiStorage(config.Disk_4), + "scsi5": mapFromStruct_QemuScsiStorage(config.Disk_5), + "scsi6": mapFromStruct_QemuScsiStorage(config.Disk_6), + "scsi7": mapFromStruct_QemuScsiStorage(config.Disk_7), + "scsi8": mapFromStruct_QemuScsiStorage(config.Disk_8), + "scsi9": mapFromStruct_QemuScsiStorage(config.Disk_9), + "scsi10": mapFromStruct_QemuScsiStorage(config.Disk_10), + "scsi11": mapFromStruct_QemuScsiStorage(config.Disk_11), + "scsi12": mapFromStruct_QemuScsiStorage(config.Disk_12), + "scsi13": mapFromStruct_QemuScsiStorage(config.Disk_13), + "scsi14": mapFromStruct_QemuScsiStorage(config.Disk_14), + "scsi15": mapFromStruct_QemuScsiStorage(config.Disk_15), + "scsi16": mapFromStruct_QemuScsiStorage(config.Disk_16), + "scsi17": mapFromStruct_QemuScsiStorage(config.Disk_17), + "scsi18": mapFromStruct_QemuScsiStorage(config.Disk_18), + "scsi19": mapFromStruct_QemuScsiStorage(config.Disk_19), + "scsi20": mapFromStruct_QemuScsiStorage(config.Disk_20), + "scsi21": mapFromStruct_QemuScsiStorage(config.Disk_21), + "scsi22": mapFromStruct_QemuScsiStorage(config.Disk_22), + "scsi23": mapFromStruct_QemuScsiStorage(config.Disk_23), + "scsi24": mapFromStruct_QemuScsiStorage(config.Disk_24), + "scsi25": mapFromStruct_QemuScsiStorage(config.Disk_25), + "scsi26": mapFromStruct_QemuScsiStorage(config.Disk_26), + "scsi27": mapFromStruct_QemuScsiStorage(config.Disk_27), + "scsi28": mapFromStruct_QemuScsiStorage(config.Disk_28), + "scsi29": mapFromStruct_QemuScsiStorage(config.Disk_29), + "scsi30": mapFromStruct_QemuScsiStorage(config.Disk_30), }, } } -func mapFromStruct_QemuScsiStorage(config *pxapi.QemuScsiStorage, setting string) []interface{} { +func mapFromStruct_QemuScsiStorage(config *pxapi.QemuScsiStorage) []interface{} { if config == nil { return nil } @@ -2314,27 +2314,27 @@ func mapFromStruct_QemuVirtIODisks(config *pxapi.QemuVirtIODisks) []interface{} } return []interface{}{ map[string]interface{}{ - "virtio0": mapFromStruct_QemuVirtIOStorage(config.Disk_0, "virtio0"), - "virtio1": mapFromStruct_QemuVirtIOStorage(config.Disk_1, "virtio1"), - "virtio2": mapFromStruct_QemuVirtIOStorage(config.Disk_2, "virtio2"), - "virtio3": mapFromStruct_QemuVirtIOStorage(config.Disk_3, "virtio3"), - "virtio4": mapFromStruct_QemuVirtIOStorage(config.Disk_4, "virtio4"), - "virtio5": mapFromStruct_QemuVirtIOStorage(config.Disk_5, "virtio5"), - "virtio6": mapFromStruct_QemuVirtIOStorage(config.Disk_6, "virtio6"), - "virtio7": mapFromStruct_QemuVirtIOStorage(config.Disk_7, "virtio7"), - "virtio8": mapFromStruct_QemuVirtIOStorage(config.Disk_8, "virtio8"), - "virtio9": mapFromStruct_QemuVirtIOStorage(config.Disk_9, "virtio9"), - "virtio10": mapFromStruct_QemuVirtIOStorage(config.Disk_10, "virtio10"), - "virtio11": mapFromStruct_QemuVirtIOStorage(config.Disk_11, "virtio11"), - "virtio12": mapFromStruct_QemuVirtIOStorage(config.Disk_12, "virtio12"), - "virtio13": mapFromStruct_QemuVirtIOStorage(config.Disk_13, "virtio13"), - "virtio14": mapFromStruct_QemuVirtIOStorage(config.Disk_14, "virtio14"), - "virtio15": mapFromStruct_QemuVirtIOStorage(config.Disk_15, "virtio15"), + "virtio0": mapFromStruct_QemuVirtIOStorage(config.Disk_0), + "virtio1": mapFromStruct_QemuVirtIOStorage(config.Disk_1), + "virtio2": mapFromStruct_QemuVirtIOStorage(config.Disk_2), + "virtio3": mapFromStruct_QemuVirtIOStorage(config.Disk_3), + "virtio4": mapFromStruct_QemuVirtIOStorage(config.Disk_4), + "virtio5": mapFromStruct_QemuVirtIOStorage(config.Disk_5), + "virtio6": mapFromStruct_QemuVirtIOStorage(config.Disk_6), + "virtio7": mapFromStruct_QemuVirtIOStorage(config.Disk_7), + "virtio8": mapFromStruct_QemuVirtIOStorage(config.Disk_8), + "virtio9": mapFromStruct_QemuVirtIOStorage(config.Disk_9), + "virtio10": mapFromStruct_QemuVirtIOStorage(config.Disk_10), + "virtio11": mapFromStruct_QemuVirtIOStorage(config.Disk_11), + "virtio12": mapFromStruct_QemuVirtIOStorage(config.Disk_12), + "virtio13": mapFromStruct_QemuVirtIOStorage(config.Disk_13), + "virtio14": mapFromStruct_QemuVirtIOStorage(config.Disk_14), + "virtio15": mapFromStruct_QemuVirtIOStorage(config.Disk_15), }, } } -func mapFromStruct_QemuVirtIOStorage(config *pxapi.QemuVirtIOStorage, setting string) []interface{} { +func mapFromStruct_QemuVirtIOStorage(config *pxapi.QemuVirtIOStorage) []interface{} { if config == nil { return nil } From b0c6135943764645230f7f316c9eea867ba589c9 Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Sat, 18 May 2024 23:23:38 +0200 Subject: [PATCH 2/6] feat: unlock `ide3` --- proxmox/resource_vm_qemu.go | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index aed78f8b..e26f73e3 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -479,7 +479,7 @@ func resourceVmQemu() *schema.Resource { "ide0": schema_Ide("ide0"), "ide1": schema_Ide("ide1"), "ide2": schema_Ide("ide2"), - // ide3 reserved for cloudinit + "ide3": schema_Ide("ide3"), }, }, }, @@ -2088,19 +2088,12 @@ func mapFromStruct_QemuIdeDisks(config *pxapi.QemuIdeDisks) []interface{} { if config == nil { return nil } - ide_0 := mapFromStruct_QemuIdeStorage(config.Disk_0) - ide_1 := mapFromStruct_QemuIdeStorage(config.Disk_1) - ide_2 := mapFromStruct_QemuIdeStorage(config.Disk_2) - if ide_0 == nil && ide_1 == nil && ide_2 == nil { - return nil - } return []interface{}{ map[string]interface{}{ - "ide0": ide_0, - "ide1": ide_1, - "ide2": ide_2, - }, - } + "ide0": mapFromStruct_QemuIdeStorage(config.Disk_0), + "ide1": mapFromStruct_QemuIdeStorage(config.Disk_1), + "ide2": mapFromStruct_QemuIdeStorage(config.Disk_2), + "ide3": mapFromStruct_QemuIdeStorage(config.Disk_3)}} } func mapFromStruct_QemuIdeStorage(config *pxapi.QemuIdeStorage) []interface{} { @@ -2462,6 +2455,7 @@ func mapToStruct_QemuIdeDisks(ide *pxapi.QemuIdeDisks, schema map[string]interfa mapToStruct_QemuIdeStorage(ide.Disk_0, "ide0", disks) mapToStruct_QemuIdeStorage(ide.Disk_1, "ide1", disks) mapToStruct_QemuIdeStorage(ide.Disk_2, "ide2", disks) + mapToStruct_QemuIdeStorage(ide.Disk_3, "ide3", disks) } func mapToStruct_QemuIdeStorage(ide *pxapi.QemuIdeStorage, key string, schema map[string]interface{}) { From 50608ca38875c8eb793a3a09e79533c8e785bacd Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Sun, 19 May 2024 01:06:44 +0200 Subject: [PATCH 3/6] feat: move `cloudinit` to `disks` schema --- docs/guides/cloud_init.md | 10 +- docs/resources/vm_qemu.md | 43 +++++- examples/cloudinit_example.tf | 10 +- proxmox/resource_vm_qemu.go | 244 ++++++++++++++++++++++++++++------ 4 files changed, 255 insertions(+), 52 deletions(-) diff --git a/docs/guides/cloud_init.md b/docs/guides/cloud_init.md index 3f6a74a7..0aba1ed1 100644 --- a/docs/guides/cloud_init.md +++ b/docs/guides/cloud_init.md @@ -157,7 +157,15 @@ EOF */ cicustom = "user=local:snippets/user_data_vm-${count.index}.yml" /* Create the Cloud-Init drive on the "local-lvm" storage */ - cloudinit_cdrom_storage = "local-lvm" + disks { + ide { + ide3 { + cloudinit { + storage = "local-lvm" + } + } + } + } provisioner "remote-exec" { inline = [ diff --git a/docs/resources/vm_qemu.md b/docs/resources/vm_qemu.md index e7d79792..d1665772 100644 --- a/docs/resources/vm_qemu.md +++ b/docs/resources/vm_qemu.md @@ -133,7 +133,6 @@ The following arguments are supported in the top level resource block. | `ciuser` | `str` | | Override the default cloud-init user for provisioning. | | `cipassword` | `str` | | Override the default cloud-init user's password. Sensitive. | | `cicustom` | `str` | | Instead specifying ciuser, cipasword, etc... you can specify the path to a custom cloud-init config file here. Grants more flexibility in configuring cloud-init. | -| `cloudinit_cdrom_storage` | `str` | | Set the storage location for the cloud-init drive. Required when using cloud-init. | | `searchdomain` | `str` | | Sets default DNS search domain suffix. | | `nameserver` | `str` | | Sets default DNS server for guest. | | `sshkeys` | `str` | | Newline delimited list of SSH public keys to add to authorized keys file for the cloud-init user. | @@ -206,9 +205,10 @@ resource "proxmox_vm_qemu" "resource-name" { ### Disks.Ide Block -The `disks.ide` block is used to configure disks of type ide. It may only be specified once. It has the options `ide0` through `ide2`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: +The `disks.ide` block is used to configure disks of type ide. It may only be specified once. It has the options `ide0` through `ide3`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `cloudinit`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: * `cdrom`: [Disks.x.Cdrom Block](#disksxcdrom-block). +* `cloudinit`: [Disks.x.Cloudinit Block](#disksxcloudinit-block). * `disk`: [Disks.x.Disk Block](#disksxdisk-block). * `passthrough`: [Disks.x.Passthrough Block](#disksxpassthrough-block). @@ -219,11 +219,21 @@ resource "proxmox_vm_qemu" "resource-name" { disks { ide { ide0 { - disk { + cdrom { + // + } + } + ide1 { + cloudinit { // } } ide2 { + disk { + // + } + } + ide3 { passthrough { // } @@ -236,9 +246,10 @@ resource "proxmox_vm_qemu" "resource-name" { ### Disks.Sata Block -The `disks.sata` block is used to configure disks of type sata. It may only be specified once. It has the options `sata0` through `sata5`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: +The `disks.sata` block is used to configure disks of type sata. It may only be specified once. It has the options `sata0` through `sata5`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `cloudinit`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: * `cdrom`: [Disks.x.Cdrom Block](#disksxcdrom-block). +* `cloudinit`: [Disks.x.Cloudinit Block](#disksxcloudinit-block). * `disk`: [Disks.x.Disk Block](#disksxdisk-block). * `passthrough`: [Disks.x.Passthrough Block](#disksxpassthrough-block). @@ -254,11 +265,16 @@ resource "proxmox_vm_qemu" "resource-name" { } } sata1 { - disk { + cloudinit { // } } sata2 { + disk { + // + } + } + sata3 { passthrough { // } @@ -272,9 +288,10 @@ resource "proxmox_vm_qemu" "resource-name" { ### Disks.Scsi Block -The `disks.scsi` block is used to configure disks of type scsi. It may only be specified once. It has the options `scsi0` through `scsi30`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: +The `disks.scsi` block is used to configure disks of type scsi. It may only be specified once. It has the options `scsi0` through `scsi30`. Each disk can have only one of the following mutually exclusive sub types `cdrom`, `cloudinit`, `disk`, `passthrough`. Configuration for these sub types can be found in their respective chapters: * `cdrom`: [Disks.x.Cdrom Block](#disksxcdrom-block). +* `cloudinit`: [Disks.x.Cloudinit Block](#disksxcloudinit-block). * `disk`: [Disks.x.Disk Block](#disksxdisk-block). * `passthrough`: [Disks.x.Passthrough Block](#disksxpassthrough-block). @@ -290,11 +307,16 @@ resource "proxmox_vm_qemu" "resource-name" { } } scsi1 { - disk { + cloudinit { // } } scsi2 { + disk { + // + } + } + scsi3 { passthrough { // } @@ -351,6 +373,13 @@ resource "proxmox_vm_qemu" "resource-name" { When `iso` and `passthrough` are omitted an empty cdrom drive will be created. +### Disks.x.Cloudinit Block + +Only **one** `cloudinit` block can be specified globally. This block is used to configure the cloud-init drive. + +| Argument | Type | Default Value | Description| +| `cloudinit_cdrom_storage` | `str` | | Set the storage location for the cloud-init drive. Required when using cloud-init.| + ### Disks.x.Disk Block See the [docs about disks](https://pve.proxmox.com/pve-docs/chapter-qm.html#qm_hard_disk) for more details. diff --git a/examples/cloudinit_example.tf b/examples/cloudinit_example.tf index 6fe7e9bd..0a4451b3 100644 --- a/examples/cloudinit_example.tf +++ b/examples/cloudinit_example.tf @@ -33,6 +33,13 @@ resource "proxmox_vm_qemu" "cloudinit-test" { # Setup the disk disks { + ide { + ide3 { + cloudinit { + storage = "local-lvm" + } + } + } virtio { virtio0 { disk { @@ -55,8 +62,7 @@ resource "proxmox_vm_qemu" "cloudinit-test" { } # Setup the ip address using cloud-init. - cloudinit_cdrom_storage = "local-lvm" - boot = "order=virtio0;ide3" + boot = "order=virtio0" # Keep in mind to use the CIDR notation for the ip. ipconfig0 = "ip=192.168.10.20/24,gw=192.168.10.1" diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index e26f73e3..1ab431cb 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -184,10 +184,6 @@ func resourceVmQemu() *schema.Resource { ForceNew: true, ConflictsWith: []string{"pxe"}, }, - "cloudinit_cdrom_storage": { - Type: schema.TypeString, - Optional: true, - }, "full_clone": { Type: schema.TypeBool, Optional: true, @@ -913,7 +909,6 @@ func resourceVmQemuCreate(ctx context.Context, d *schema.ResourceData, meta inte } config.Disks = mapToStruct_QemuStorages(d) - setCloudInitDisk(d, &config) if len(qemuVgaList) > 0 { config.QemuVga = qemuVgaList[0].(map[string]interface{}) @@ -1203,7 +1198,6 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte } config.Disks = mapToStruct_QemuStorages(d) - setCloudInitDisk(d, &config) logger.Debug().Int("vmid", vmID).Msgf("Updating VM with the following configuration: %+v", config) @@ -1509,7 +1503,6 @@ func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("smbios", ReadSmbiosArgs(config.Smbios1)) d.Set("linked_vmid", config.LinkedVmId) d.Set("disks", mapFromStruct_ConfigQemu(config.Disks)) - d.Set("cloudinit_cdrom_storage", getCloudInitDisk(config.Disks)) mapFromStruct_QemuGuestAgent(d, config.Agent) // Some dirty hacks to populate undefined keys with default values. @@ -1989,23 +1982,6 @@ func getPrimaryIP(config *pxapi.ConfigQemu, vmr *pxapi.VmRef, client *pxapi.Clie return conn.IPs, diags } -func setCloudInitDisk(d *schema.ResourceData, config *pxapi.ConfigQemu) { - storage := d.Get("cloudinit_cdrom_storage").(string) - if storage != "" { - config.Disks.Ide.Disk_3 = &pxapi.QemuIdeStorage{CloudInit: &pxapi.QemuCloudInitDisk{ - Format: pxapi.QemuDiskFormat_Raw, - Storage: storage, - }} - } -} - -func getCloudInitDisk(config *pxapi.QemuStorages) string { - if config != nil && config.Ide != nil && config.Ide.Disk_3 != nil && config.Ide.Disk_3.CloudInit != nil { - return config.Ide.Disk_3.CloudInit.Storage - } - return "" -} - // Map struct to the terraform schema func mapFromStruct_ConfigQemu(config *pxapi.QemuStorages) []interface{} { if config == nil { @@ -2058,6 +2034,15 @@ func mapFormStruct_QemuCdRom(config *pxapi.QemuCdRom) []interface{} { } } +// nil pointer check is done by the caller +func mapFromStruct_QemuCloudInit_unsafe(config *pxapi.QemuCloudInitDisk) []interface{} { + return []interface{}{ + map[string]interface{}{ + "cloudinit": []interface{}{ + map[string]interface{}{ + "storage": string(config.Storage)}}}} +} + func mapFormStruct_QemuDiskBandwidth(params map[string]interface{}, config pxapi.QemuDiskBandwidth) { params["mbps_r_burst"] = float64(config.MBps.ReadLimit.Burst) params["mbps_r_concurrent"] = float64(config.MBps.ReadLimit.Concurrent) @@ -2141,6 +2126,9 @@ func mapFromStruct_QemuIdeStorage(config *pxapi.QemuIdeStorage) []interface{} { }, } } + if config.CloudInit != nil { + return mapFromStruct_QemuCloudInit_unsafe(config.CloudInit) + } return mapFormStruct_QemuCdRom(config.CdRom) } @@ -2205,6 +2193,9 @@ func mapFromStruct_QemuSataStorage(config *pxapi.QemuSataStorage) []interface{} }, } } + if config.CloudInit != nil { + return mapFromStruct_QemuCloudInit_unsafe(config.CloudInit) + } return mapFormStruct_QemuCdRom(config.CdRom) } @@ -2298,6 +2289,9 @@ func mapFromStruct_QemuScsiStorage(config *pxapi.QemuScsiStorage) []interface{} }, } } + if config.CloudInit != nil { + return mapFromStruct_QemuCloudInit_unsafe(config.CloudInit) + } return mapFormStruct_QemuCdRom(config.CdRom) } @@ -2409,6 +2403,14 @@ func mapToStruct_QemuCdRom(schema map[string]interface{}) (cdRom *pxapi.QemuCdRo } } +func mapToStruct_QemuCloudInit(schemaItem []interface{}) (ci *pxapi.QemuCloudInitDisk) { + ciSchema := schemaItem[0].(map[string]interface{}) + return &pxapi.QemuCloudInitDisk{ + Format: pxapi.QemuDiskFormat_Raw, + Storage: ciSchema["storage"].(string), + } +} + func mapToStruct_QemuDiskBandwidth(schema map[string]interface{}) pxapi.QemuDiskBandwidth { return pxapi.QemuDiskBandwidth{ MBps: pxapi.QemuDiskBandwidthMBps{ @@ -2510,6 +2512,10 @@ func mapToStruct_QemuIdeStorage(ide *pxapi.QemuIdeStorage, key string, schema ma } return } + if v, ok := storageSchema["cloudinit"].([]interface{}); ok && len(v) == 1 && v[0] != nil { + ide.CloudInit = mapToStruct_QemuCloudInit(v) + return + } ide.CdRom = mapToStruct_QemuCdRom(storageSchema) } @@ -2579,6 +2585,10 @@ func mapToStruct_QemuSataStorage(sata *pxapi.QemuSataStorage, key string, schema } return } + if v, ok := storageSchema["cloudinit"].([]interface{}); ok && len(v) == 1 && v[0] != nil { + sata.CloudInit = mapToStruct_QemuCloudInit(v) + return + } sata.CdRom = mapToStruct_QemuCdRom(storageSchema) } @@ -2677,6 +2687,10 @@ func mapToStruct_QemuScsiStorage(scsi *pxapi.QemuScsiStorage, key string, schema } return } + if v, ok := storageSchema["cloudinit"].([]interface{}); ok && len(v) == 1 && v[0] != nil { + scsi.CloudInit = mapToStruct_QemuCloudInit(v) + return + } scsi.CdRom = mapToStruct_QemuCdRom(storageSchema) } @@ -2843,12 +2857,18 @@ func mapToStruct_VirtIOStorage(virtio *pxapi.QemuVirtIOStorage, key string, sche } // schema definition -func schema_CdRom(path string) *schema.Schema { +func schema_CdRom(path string, ci bool) *schema.Schema { + var conflicts []string + if ci { + conflicts = []string{path + ".cloudinit", path + ".disk", path + ".passthrough"} + } else { + conflicts = []string{path + ".disk", path + ".passthrough"} + } return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".disk", path + ".passthrough"}, + ConflictsWith: conflicts, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "iso": { @@ -2866,20 +2886,158 @@ func schema_CdRom(path string) *schema.Schema { } } -func schema_Ide(setting string) *schema.Schema { - path := "disks.0.ide.0." + setting + ".0" +func schema_CloudInit(path, slot string) *schema.Schema { + // 41 is all the disk slots for cloudinit + // 3 are the conflicts within the same disk slot + c := append(make([]string, 0, 44), path+".cdrom", path+".disk", path+".passthrough") + if slot != "ide0" { + c = append(c, "disks.0.ide.0.ide0.0.cloudinit") + } + if slot != "ide1" { + c = append(c, "disks.0.ide.0.ide1.0.cloudinit") + } + if slot != "ide2" { + c = append(c, "disks.0.ide.0.ide2.0.cloudinit") + } + if slot != "ide3" { + c = append(c, "disks.0.ide.0.ide3.0.cloudinit") + } + if slot != "sata0" { + c = append(c, "disks.0.sata.0.sata0.0.cloudinit") + } + if slot != "sata1" { + c = append(c, "disks.0.sata.0.sata1.0.cloudinit") + } + if slot != "sata2" { + c = append(c, "disks.0.sata.0.sata2.0.cloudinit") + } + if slot != "sata3" { + c = append(c, "disks.0.sata.0.sata3.0.cloudinit") + } + if slot != "sata4" { + c = append(c, "disks.0.sata.0.sata4.0.cloudinit") + } + if slot != "sata5" { + c = append(c, "disks.0.sata.0.sata5.0.cloudinit") + } + if slot != "scsi0" { + c = append(c, "disks.0.scsi.0.scsi0.0.cloudinit") + } + if slot != "scsi1" { + c = append(c, "disks.0.scsi.0.scsi1.0.cloudinit") + } + if slot != "scsi2" { + c = append(c, "disks.0.scsi.0.scsi2.0.cloudinit") + } + if slot != "scsi3" { + c = append(c, "disks.0.scsi.0.scsi3.0.cloudinit") + } + if slot != "scsi4" { + c = append(c, "disks.0.scsi.0.scsi4.0.cloudinit") + } + if slot != "scsi5" { + c = append(c, "disks.0.scsi.0.scsi5.0.cloudinit") + } + if slot != "scsi6" { + c = append(c, "disks.0.scsi.0.scsi6.0.cloudinit") + } + if slot != "scsi7" { + c = append(c, "disks.0.scsi.0.scsi7.0.cloudinit") + } + if slot != "scsi8" { + c = append(c, "disks.0.scsi.0.scsi8.0.cloudinit") + } + if slot != "scsi9" { + c = append(c, "disks.0.scsi.0.scsi9.0.cloudinit") + } + if slot != "scsi10" { + c = append(c, "disks.0.scsi.0.scsi10.0.cloudinit") + } + if slot != "scsi11" { + c = append(c, "disks.0.scsi.0.scsi11.0.cloudinit") + } + if slot != "scsi12" { + c = append(c, "disks.0.scsi.0.scsi12.0.cloudinit") + } + if slot != "scsi13" { + c = append(c, "disks.0.scsi.0.scsi13.0.cloudinit") + } + if slot != "scsi14" { + c = append(c, "disks.0.scsi.0.scsi14.0.cloudinit") + } + if slot != "scsi15" { + c = append(c, "disks.0.scsi.0.scsi15.0.cloudinit") + } + if slot != "scsi16" { + c = append(c, "disks.0.scsi.0.scsi16.0.cloudinit") + } + if slot != "scsi17" { + c = append(c, "disks.0.scsi.0.scsi17.0.cloudinit") + } + if slot != "scsi18" { + c = append(c, "disks.0.scsi.0.scsi18.0.cloudinit") + } + if slot != "scsi19" { + c = append(c, "disks.0.scsi.0.scsi19.0.cloudinit") + } + if slot != "scsi20" { + c = append(c, "disks.0.scsi.0.scsi20.0.cloudinit") + } + if slot != "scsi21" { + c = append(c, "disks.0.scsi.0.scsi21.0.cloudinit") + } + if slot != "scsi22" { + c = append(c, "disks.0.scsi.0.scsi22.0.cloudinit") + } + if slot != "scsi23" { + c = append(c, "disks.0.scsi.0.scsi23.0.cloudinit") + } + if slot != "scsi24" { + c = append(c, "disks.0.scsi.0.scsi24.0.cloudinit") + } + if slot != "scsi25" { + c = append(c, "disks.0.scsi.0.scsi25.0.cloudinit") + } + if slot != "scsi26" { + c = append(c, "disks.0.scsi.0.scsi26.0.cloudinit") + } + if slot != "scsi27" { + c = append(c, "disks.0.scsi.0.scsi27.0.cloudinit") + } + if slot != "scsi28" { + c = append(c, "disks.0.scsi.0.scsi28.0.cloudinit") + } + if slot != "scsi29" { + c = append(c, "disks.0.scsi.0.scsi29.0.cloudinit") + } + if slot != "scsi30" { + c = append(c, "disks.0.scsi.0.scsi30.0.cloudinit") + } + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: c, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage": schema_DiskStorage()}}} +} + +func schema_Ide(slot string) *schema.Schema { + path := "disks.0.ide.0." + slot + ".0" return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "cdrom": schema_CdRom(path), + "cdrom": schema_CdRom(path, true), + "cloudinit": schema_CloudInit(path, slot), "disk": { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".passthrough"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".passthrough"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -2902,7 +3060,7 @@ func schema_Ide(setting string) *schema.Schema { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".disk"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".disk"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -2923,20 +3081,21 @@ func schema_Ide(setting string) *schema.Schema { } } -func schema_Sata(setting string) *schema.Schema { - path := "disks.0.sata.0." + setting + ".0" +func schema_Sata(slot string) *schema.Schema { + path := "disks.0.sata.0." + slot + ".0" return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "cdrom": schema_CdRom(path), + "cdrom": schema_CdRom(path, true), + "cloudinit": schema_CloudInit(path, slot), "disk": { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".passthrough"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".passthrough"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -2959,7 +3118,7 @@ func schema_Sata(setting string) *schema.Schema { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".disk"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".disk"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -2980,20 +3139,21 @@ func schema_Sata(setting string) *schema.Schema { } } -func schema_Scsi(setting string) *schema.Schema { - path := "disks.0.scsi.0." + setting + ".0" +func schema_Scsi(slot string) *schema.Schema { + path := "disks.0.scsi.0." + slot + ".0" return &schema.Schema{ Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "cdrom": schema_CdRom(path), + "cdrom": schema_CdRom(path, true), + "cloudinit": schema_CloudInit(path, slot), "disk": { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".passthrough"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".passthrough"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -3018,7 +3178,7 @@ func schema_Scsi(setting string) *schema.Schema { Type: schema.TypeList, Optional: true, MaxItems: 1, - ConflictsWith: []string{path + ".cdrom", path + ".disk"}, + ConflictsWith: []string{path + ".cdrom", path + ".cloudinit", path + ".disk"}, Elem: &schema.Resource{ Schema: schema_DiskBandwidth(map[string]*schema.Schema{ "asyncio": schema_DiskAsyncIO(), @@ -3049,7 +3209,7 @@ func schema_Virtio(setting string) *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "cdrom": schema_CdRom(path), + "cdrom": schema_CdRom(path, false), "disk": { Type: schema.TypeList, Optional: true, From 714d6842a2181cb3212a0122d1b7578ab66a53b4 Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Sun, 19 May 2024 01:32:01 +0200 Subject: [PATCH 4/6] fix: unable to move `cloudinit` disk --- proxmox/resource_vm_qemu.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index 1ab431cb..6ae3596b 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -1202,8 +1202,9 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte logger.Debug().Int("vmid", vmID).Msgf("Updating VM with the following configuration: %+v", config) var rebootRequired bool + automaticReboot := d.Get("automatic_reboot").(bool) // don't let the update function handel the reboot as it can't deal with cloud init changes yet - rebootRequired, err = config.Update(false, vmr, client) + rebootRequired, err = config.Update(automaticReboot, vmr, client) if err != nil { return diag.FromErr(err) } @@ -1319,7 +1320,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } } else if d.Get("reboot_required").(bool) { // reboot the VM - if d.Get("automatic_reboot").(bool) { // automatic reboots is enabled + if automaticReboot { // automatic reboots is enabled log.Print("[DEBUG][QemuVmUpdate] rebooting the VM to match the configuration changes") _, err = client.RebootVm(vmr) // note: the default timeout is 3 min, configurable per VM: Options/Start-Shutdown Order/Shutdown timeout From 548c3ddc75cb471ec1a730a999e4aaa5ad20bb45 Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Sun, 19 May 2024 01:35:02 +0200 Subject: [PATCH 5/6] refactor: reuse `reboot_required` value --- proxmox/resource_vm_qemu.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index 6ae3596b..df240bc3 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -1208,7 +1208,6 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte if err != nil { return diag.FromErr(err) } - d.Set("reboot_required", rebootRequired) // If any of the "critical" keys are changed then a reboot is required. if d.HasChanges( @@ -1253,17 +1252,17 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte "hostpci", "smbios", ) { - d.Set("reboot_required", true) + rebootRequired = true } // reboot is only required when memory hotplug is disabled if d.HasChange("memory") && !strings.Contains(d.Get("hotplug").(string), "memory") { - d.Set("reboot_required", true) + rebootRequired = true } // reboot is only required when cpu hotplug is disabled if d.HasChanges("sockets", "cores", "vcpus") && !strings.Contains(d.Get("hotplug").(string), "cpu") { - d.Set("reboot_required", true) + rebootRequired = true } // if network hot(un)plug is not enabled, then check if some of the "critical" parameters have changes @@ -1273,18 +1272,18 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte newValues := newValuesRaw.([]interface{}) if len(oldValues) != len(newValues) { // network interface added or removed - d.Set("reboot_required", true) + rebootRequired = true } else { // some of the existing interface parameters have changed for i := range oldValues { // loop through the interfaces if oldValues[i].(map[string]interface{})["model"] != newValues[i].(map[string]interface{})["model"] { - d.Set("reboot_required", true) + rebootRequired = true } if oldValues[i].(map[string]interface{})["macaddr"] != newValues[i].(map[string]interface{})["macaddr"] { - d.Set("reboot_required", true) + rebootRequired = true } if oldValues[i].(map[string]interface{})["queues"] != newValues[i].(map[string]interface{})["queues"] { - d.Set("reboot_required", true) + rebootRequired = true } } } @@ -1319,7 +1318,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte return diag.FromErr(err) } } - } else if d.Get("reboot_required").(bool) { // reboot the VM + } else if rebootRequired { // reboot the VM if automaticReboot { // automatic reboots is enabled log.Print("[DEBUG][QemuVmUpdate] rebooting the VM to match the configuration changes") _, err = client.RebootVm(vmr) @@ -1352,6 +1351,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte lock.unlock() + d.Set("reboot_required", rebootRequired) // err = resourceVmQemuRead(ctx, d, meta) // if err != nil { // diags = append(diags, diag.FromErr(err)...) From c585fa22a9ebb018c6e8705f2e7aeaa9498d6e8b Mon Sep 17 00:00:00 2001 From: Tinyblargon <76069640+Tinyblargon@users.noreply.github.com> Date: Sun, 19 May 2024 01:40:07 +0200 Subject: [PATCH 6/6] refactor: remove redundant code --- proxmox/resource_vm_qemu.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/proxmox/resource_vm_qemu.go b/proxmox/resource_vm_qemu.go index df240bc3..5e9bc0f9 100755 --- a/proxmox/resource_vm_qemu.go +++ b/proxmox/resource_vm_qemu.go @@ -1352,14 +1352,7 @@ func resourceVmQemuUpdate(ctx context.Context, d *schema.ResourceData, meta inte lock.unlock() d.Set("reboot_required", rebootRequired) - // err = resourceVmQemuRead(ctx, d, meta) - // if err != nil { - // diags = append(diags, diag.FromErr(err)...) - // return diags - // } - diags = append(diags, resourceVmQemuRead(ctx, d, meta)...) - return diags - // return resourceVmQemuRead(ctx, d, meta) + return append(diags, resourceVmQemuRead(ctx, d, meta)...) } func resourceVmQemuRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {