I have am trying to pass output from a child module to the root module within the following output block which is contained in my main.tf in root module.
output "ansible_inventory" {
value = <<-EOF
${module.vsphere_virtual_machine_s4hana.ip} ansible_host=
[all:vars]
ansible_user=ansadmin
EOF
}
The plan is to execute terraform output ansible_inventory once apply is complete to have a populated inventory for ansible.
I receive the following error
Error: Unsupported attribute
│
│ on main.tf line 158, in output "ansible_inventory":
│ 158: ${module.vsphere_virtual_machine_s4hana.ip} ansible_host=
│ ├────────────────
│ │ module.vsphere_virtual_machine_s4hana is a list of object, known only after apply
│
│ Can't access attributes on a list of objects. Did you mean to access attribute "ip" for a specific element of the list, or across all elements of the list?
Layout of files
$ tree
.
├── main.tf
├── modules
│ └── terraform-vsphere-vm
│ ├── CODE_OF_CONDUCT.md
│ ├── CONTRIBUTING.md
│ ├── LICENSE
│ ├── README.md
│ ├── main.tf
│ ├── output.tf
│ ├── variables.tf
│ └── versions.tf
├── provider.tf
├── terraform.auto.tfvars
├── terraform.tfstate
├── terraform.tfstate.backup
└── variables.tf
The root main.tf is as below
root
|
main.tf
module "vsphere_virtual_machine_s4hana" {
source = "./modules/terraform-vsphere-vm/"
count = var.create_s4hana_vm ? 1 : 0
vmname = "${lower(var.vmnameprefix)}${lower(var.s4hana_sap_sid)}"
instances = var.s4hana_instances
dc = var.dc
vmrp = var.vmrp
ram_size = var.s4hana_ram_size
cpu_number = var.s4hana_cpu_number
datastore = var.datastore
vmtemp = var.vmtemp
domain = var.domain
network = var.s4hana_network
data_disk = {
disk1 = {
size_gb = var.s4hana_disks_usr_sap_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 1,
},
disk2 = {
size_gb = var.s4hana_disks_sapmnt_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 1,
},
disk3 = {
size_gb = var.s4hana_disks_trans_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 1,
},
disk4 = {
size_gb = var.s4hana_disks_shared_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 1,
},
disk5 = {
size_gb = var.s4hana_disks_data_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 2,
},
disk6 = {
size_gb = var.s4hana_disks_log_storage_size,
thin_provisioned = true,
eagerly_scrub = false,
data_disk_scsi_controller = 3,
},
}
scsi_controller = 0 // This will assign OS disk to controller 0
dns_server_list = var.dns_server_list
vmgateway = var.vmgateway
is_windows_image = false
enable_disk_uuid = true
}
output "ansible_inventory" {
value = <<-EOF
${module.vsphere_virtual_machine_s4hana.ip} ansible_host=
[all:vars]
ansible_user=ansadmin
EOF
}
child module terraform-vsphere-vm
main.tf
resource "vsphere_virtual_machine" "vm" {
count = var.instances
depends_on = [var.vm_depends_on]
name = var.staticvmname != null ? var.staticvmname : format("${var.vmname}${var.vmnameformat}", count.index + 1)
resource_pool_id = data.vsphere_resource_pool.pool.id
folder = var.vmfolder
tags = var.tag_ids != null ? var.tag_ids : data.vsphere_tag.tag[*].id
custom_attributes = var.custom_attributes
annotation = var.annotation
extra_config = var.extra_config
firmware = var.content_library == null && var.firmware == null ? data.vsphere_virtual_machine.template[0].firmware : var.firmware
efi_secure_boot_enabled = var.content_library == null && var.efi_secure_boot == null ? data.vsphere_virtual_machine.template[0].efi_secure_boot_enabled : var.efi_secure_boot
enable_disk_uuid = var.content_library == null && var.enable_disk_uuid == null ? data.vsphere_virtual_machine.template[0].enable_disk_uuid : var.enable_disk_uuid
storage_policy_id = var.storage_policy_id
datastore_cluster_id = var.datastore_cluster != "" ? data.vsphere_datastore_cluster.datastore_cluster[0].id : null
datastore_id = var.datastore != "" ? data.vsphere_datastore.datastore[0].id : null
num_cpus = var.cpu_number
num_cores_per_socket = var.num_cores_per_socket
cpu_hot_add_enabled = var.cpu_hot_add_enabled
cpu_hot_remove_enabled = var.cpu_hot_remove_enabled
cpu_reservation = var.cpu_reservation
cpu_share_level = var.cpu_share_level
cpu_share_count = var.cpu_share_level == "custom" ? var.cpu_share_count : null
memory_reservation = var.memory_reservation
memory = var.ram_size
memory_hot_add_enabled = var.memory_hot_add_enabled
memory_share_level = var.memory_share_level
memory_share_count = var.memory_share_level == "custom" ? var.memory_share_count : null
guest_id = var.content_library == null ? data.vsphere_virtual_machine.template[0].guest_id : null
scsi_bus_sharing = var.scsi_bus_sharing
scsi_type = var.scsi_type != "" ? var.scsi_type : (var.content_library == null ? data.vsphere_virtual_machine.template[0].scsi_type : null)
scsi_controller_count = max(
max(0, flatten([
for item in values(var.data_disk) : [
for elem, val in item :
elem == "data_disk_scsi_controller" ? val : 0
]])...) + 1,
ceil((max(0, flatten([
for item in values(var.data_disk) : [
for elem, val in item :
elem == "unit_number" ? val : 0
]])...) + 1) / 15),
var.scsi_controller)
wait_for_guest_net_routable = var.wait_for_guest_net_routable
wait_for_guest_ip_timeout = var.wait_for_guest_ip_timeout
wait_for_guest_net_timeout = var.wait_for_guest_net_timeout
child module output
output.tf
output "DC_ID" {
description = "id of vSphere Datacenter"
value = data.vsphere_datacenter.dc.id
}
output "ResPool_ID" {
description = "Resource Pool id"
value = data.vsphere_resource_pool.pool.id
}
output "VM" {
description = "VM Names"
value = vsphere_virtual_machine.vm.*.name
}
output "ip" {
description = "default ip address of the deployed VM"
value = vsphere_virtual_machine.vm.*.default_ip_address
}
output "guest-ip" {
description = "all the registered ip address of the VM"
value = vsphere_virtual_machine.vm.*.guest_ip_addresses
}
output "uuid" {
description = "UUID of the VM in vSphere"
value = vsphere_virtual_machine.vm.*.uuid
}
output "disk" {
description = "Disks of the deployed VM"
value = vsphere_virtual_machine.vm.*.disk
}