Dynamic attach secondary volume if needed

Hi,

I’d like to create few instances and optionally add them datadisk, if it’s configured in variable. I ended up following code, but it’s too complicated and not working - terraform is trying to create 3 datadisk per machine, because of bad condition.

variable "datadisks" {
  description = "List of datadisks"
  default = [
  {
    # id = 0
    name = "kafka_node_1"
    size = 10
    az   = "az1.dc0"
  },
  {
    # id = 1
    name = "kafka_node_2"
    size = 10
    az   = "az1.dc0"
  }
  ]
}

variable "servers" {
    description = "List of servers"
    default = [
    {
      name      = "kafka_node_1"
      flavor    = "s3.large.2"
      network   = 0
      key       = "prod"
      ip        = "10.0.0.10"
      disksize  = 10
      create_datadisk = true
      datadisk_id   = 0
      image     = "centos8"
      az        = "az1.dc0"
    },
    {
      name      = "kafka_node_2"
      flavor    = "s3.large.2"
      network   = 0
      key       = "prod"
      ip        = "10.0.0.11"
      disksize  = 10
      create_datadisk = true
      datadisk_id   = 1
      image     = "centos8"
      az        = "az1.dc0"
    }]
}

# System disks
resource "huaweicloudstack_blockstorage_volume_v2" "volume" {
  count       = length(var.servers)
  availability_zone = lookup(var.servers[count.index], "az", null)
  name        = lookup(var.servers[count.index], "name", null)
  size        = lookup(var.servers[count.index], "disksize", null)
  image_id    = lookup(var.images, lookup(var.servers[count.index], "image", null), "")
  volume_type = "SSD01"
}

# Data disks
resource "huaweicloudstack_blockstorage_volume_v2" "datadisk" {
  count             = length(var.datadisks)
  availability_zone = lookup(var.datadisks[count.index], "az", null)
  name              = lookup(var.datadisks[count.index], "name", null)
  size              = lookup(var.datadisks[count.index], "size", null)
  volume_type       = "SSD01"
}

# instance
resource "huaweicloudstack_compute_instance_v2" "server" {
  count             = length(var.servers)
  name              = lookup(var.servers[count.index], "name", null)
  flavor_name       = lookup(var.servers[count.index], "flavor", null)
  key_pair          = lookup(var.servers[count.index], "key", null)
  security_groups   = ["default", "base"]
  availability_zone = lookup(var.servers[count.index], "az", null)
  config_drive      = true

  network {
    port = huaweicloudstack_networking_port_v2.port[count.index].id
  }

  # system disk
  block_device {
    uuid = try(huaweicloudstack_blockstorage_volume_v2.volume[count.index].id, "")
    source_type           = "volume"
    destination_type      = "volume"
    boot_index            = 0
    delete_on_termination = true
  }

  # data disk
  dynamic "block_device" {
    for_each = var.servers[count.index].create_datadisk == true  ? var.datadisks[var.servers[count.index].datadisk_id] : {}
    content {
      uuid                  = block_device.id
      source_type           = "volume"
      destination_type      = "volume"
      delete_on_termination = false
      boot_index            = -1
    }
  }

}

What is the best variable type for this scenario and how to mount volumes conditionally into instances?

Thank you for help.

I’ve got it working:

variable "datadisks" {
  description = "List of datadisks"
  default = [
  # name must be unique
  {
    name     = "kafka_node_1_datadisk"
    instance = "kafka_node_1"
    size     = 10
    az       = "az1.dc0"
    type     = "SSD01"
  },
  {
    name     = "kafka_node_2_datadisk"
    instance = "kafka_node_2"
    size     = 10
    az       = "az1.dc0"
    type     = "SSD01"
  }]
}

variable "servers" {
    description = "List of servers"
    default = [
    {
      name      = "kafka_node_1"
      flavor    = "s3.large.2"
      ip        = "10.0.0.10"
      network   = "soc-prod-pipeline"
      disksize  = 10
      image     = "centos8"
      az        = "az1.dc0"
    },
    {
      name      = "kafka_node_2"
      flavor    = "s3.large.2"
      ip        = "10.0.0.11"
      network   = "soc-prod-pipeline"
      disksize  = 10
      image     = "centos8"
      az        = "az1.dc0"
    }]
}

resource "huaweicloudstack_compute_volume_attach_v2" "attach" {
  # for_each loop expect a map in kv format: the key is datadisk name, value contains tuple with datadisk info
  for_each = {
    for disk in var.datadisks: 
    disk.name => disk
  }
  volume_id   = huaweicloudstack_blockstorage_volume_v2.datadisk[each.key].id
  instance_id = huaweicloudstack_compute_instance_v2.server[each.value.instance].id  
}

There is stil one issue with this - if I change only the image for kafka_node_1, terraform wants to recreate all the infra. For now I don’t know why.
In terraform planis for resource “huaweicloudstack_blockstorage_volume_v2” “volume”

~ image_id          = "143034d7-dddc-4a3e-bf37-6c9fd96e914d" -> "354c12e3-2c21-4d04-852d-8be0a9153bd9" # forces replacement 

This is OK.

But then in all the servers resource “huaweicloudstack_compute_instance_v2” “server”

 ~ block_device {                      
            boot_index            = 0                                                    
            delete_on_termination = true                                                 
            destination_type      = "volume"                                             
            source_type           = "volume"                                             
          ~ uuid                  = "f1edf486-ae01-4354-a6f2-10cc587fc183" -> (known after apply) # forces replacement                                                             
          - volume_size           = 0 -> null                                            
        } 

This does not make sense for me now. Why it wants to destroy all system disks and therefore all servers, when I changed only system disk of one server?

Hi,

the problem with recreating all volumes was probably resides in common resouce with same name.

I decided to generate terraform config with help of ansible jinja template and create one unique resource per instance without loops. Now it’s working as expected.

For anyone looking for similar solutions, just adjust this ansible template to your needs:

{{ ansible_managed }}

provider "huaweicloudstack" {
  #access_key    = data.ansiblevault_path.access_key.value
  #secret_key    = data.ansiblevault_path.secret_key.value
  #user_name     = data.ansiblevault_path.user_name.value
  user_name     = var.provider_data["user_name"]
  #password      = data.ansiblevault_path.password.value
  password      = var.provider_data["password"]
  domain_name   = var.provider_data["domain_name"]
  region        = var.provider_data["region"]
  tenant_id     = var.provider_data["tenant_id"]
  auth_url      = var.provider_data["auth_url"]
  insecure      = true
  endpoints     = local.endpoints
}

# Create VPC
resource "huaweicloudstack_networking_router_v2" "router" {
  name                = "{{ vpc }}"
  admin_state_up      = true
  external_network_id = var.external_network
#  enable_snat         = true
}

# router interface
{% for subnet in subnets %}
resource "huaweicloudstack_networking_router_interface_v2" "router_interface_{{ subnet }}" {
  router_id = huaweicloudstack_networking_router_v2.router.id
  subnet_id = huaweicloudstack_networking_subnet_v2.{{ subnet }}.id
}
{% endfor %}

# Create networks
{% for subnet in subnets %}
resource "huaweicloudstack_networking_network_v2" "{{ subnet }}" {
  name           = "{{ subnet }}"
  admin_state_up = "true"
}
{% endfor %}

# Create subnets
{% for subnet in subnets %}
resource "huaweicloudstack_networking_subnet_v2" "{{ subnet }}" {
  name       = "{{ subnet }}"
  cidr       = "{{ subnets[subnet].cidr }}"
  network_id = huaweicloudstack_networking_network_v2.{{ subnet }}.id
#  enable_dhcp   = lookup(var.subnets[count.index], "dhcp", false) - "enable_dhcp" must be true
}
{% endfor %}

# Ports
{% for server in servers %}
resource "huaweicloudstack_networking_port_v2" "{{ server.name }}" {
  name               = "{{ server.name }}"
  network_id         = huaweicloudstack_networking_network_v2.{{ server.network.keys()|first }}.id
  admin_state_up     = "true"
  #security_group_ids = [huaweicloudstack_networking_secgroup_v2.secgroup_1.id]

  fixed_ip {
    subnet_id  = huaweicloudstack_networking_subnet_v2.{{ server.network.keys()|first }}.id
    ip_address = "{{ server.network[server.network.keys()|first] }}"
  }
}
{% endfor %}

# System disks
{% for server in servers %}
resource "huaweicloudstack_blockstorage_volume_v2" "system_{{ server.name }}" {
#  region           = "sk-ba-1"
  availability_zone = "{{ server.az | default(server_defaults.az) }}"
  name              = "{{ server.name }}"
  size              = "{{ server.disksize | default(server_defaults.disksize) }}"
  {% if server.image is defined -%}
  image_id          = "{{ images[server.image] }}"
  {% else -%}
  image_id          = "{{ images[server_defaults.image] }}"
  {% endif -%}
  volume_type       = "SSD01"
}
{% endfor %}

# Data disks
{% for server in servers %}
{% if server.datadisk is defined %}
resource "huaweicloudstack_blockstorage_volume_v2" "datadisk_{{ server.name }}" {
#  region           = "sk-ba-1"
  availability_zone = "{{ server.az | default(server_defaults.az) }}"
  name              = "{{ server.name }}"
  size              = "{{ server.datadisk }}"
  volume_type       = "SSD01"
}
{% endif %}
{% endfor %}


# Create servers
{% for server in servers %}
resource "huaweicloudstack_compute_instance_v2" "{{ server.name }}" {
  name              = "{{ server.name }}"
  flavor_name       = "{{ server.flavor | default(server_defaults.flavor) }}"
  security_groups   = {{ ( server.sg | default(server_defaults.sg) ) | to_json }}
  availability_zone = "{{ server.az | default(server_defaults.az) }}"
  config_drive      = true
  user_data         = <<-EOT
#cloud-config
#package_upgrade: true
hostname: {{ server.name }}
users:
  - name: vagrant
    sudo: ["ALL=(ALL) NOPASSWD:ALL"]
    lock_passwd: true
    gecos: Cloud User
    groups: [wheel, adm, systemd-journal]
    shell: /bin/bash
    ssh_authorized_keys:
      - {{ auth_keys[server.network.keys()|first] }}
resolv_conf:
    nameservers: ['8.8.4.4', '8.8.8.8']
final_message: "Cloud-init done for {{ server.name }}"
network:
  version: 1
  config:
  - id: eth0
    name: eth0
    subnets:
      - address: {{ server.network[server.network.keys()|first] }}/24
        dns_nameservers:
          - 8.8.4.4
          - 8.8.8.8
        type: static
    type: physical
EOT

  # router tag is added automatically to instance
  tags = [huaweicloudstack_networking_router_v2.router.id]

  network {
    port = huaweicloudstack_networking_port_v2.{{ server.name }}.id
  }

# system disk
  block_device {
    # terraform do tejto verzie 1.12.24 ma stale bug - pokial niesu vytvorene volume, tak povie ze taky index medzi volumami nieje. Jedine riesenie co som nasiel je pouzit try nez to vyriesia.
    # https://github.com/terraform-providers/terraform-provider-aws/issues/9733
    uuid = try(huaweicloudstack_blockstorage_volume_v2.system_{{ server.name }}.id, "")
    source_type           = "volume"
    destination_type      = "volume"
    boot_index            = 0
    delete_on_termination = true
  }
}
{% endfor %}

{% for server in servers %}
{% if server.datadisk is defined %}
resource "huaweicloudstack_compute_volume_attach_v2" "datadisk_attach_{{ server.name }}" {
  volume_id   = huaweicloudstack_blockstorage_volume_v2.datadisk_{{ server.name }}.id
  instance_id = huaweicloudstack_compute_instance_v2.{{ server.name }}.id  
}
{% endif %}
{% endfor %}