Adding more instances is messing up my volume attachments

Hello. This is really hard to explain, but we currently have 3 instances of one type. I was tasked to add 3 more. However the problem is whenever I add more instances, the loop wants to change the order of my volume attachments. Since I used to have 3 types of one node and now I’ve increased it to 6 it sequentially is looping and moving volumes around. Any help?

provider "aws" {
  region = "us-west-2"
}

resource "aws_instance" "dnode" {
  count                   = "${var.dnodes}"
  ami                     = "${var.ami}"
  instance_type           = "${var.instance_type_dnode}"
  subnet_id               = "${var.subnet}"
  key_name                = "${var.key}"
  vpc_security_group_ids  = ["${var.security_group}"]
  iam_instance_profile    = "${var.company}-${lower(var.service)}"
  user_data               = "${file("./scripts/dnodes.sh")}"
  disable_api_termination = false
  ebs_optimized           = true
  monitoring              = false

  lifecycle {
    ignore_changes = ["user_data", "ami"]
  }

  tags = {
    Name             = "${var.service}_${var.environment}_Energy_Node_${count.index + 1}"
    Service          = "${var.service}"
    Contact          = "${var.contact}"
    Environment      = "${title(lower(var.environment))}"
    Terraform        = "true"
    "c7n:DoNotPatch" = "True"
    "c7n:OffHour"    = "off=[(M-H,20),(S,8)];tz=mt"
    "c7n:OnHour"     = "on=(M-F,00);tz=mt"
  }

  volume_tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${count.index + 1}_ROOT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_instance" "enode" {
  count                   = "${var.enodes}"
  ami                     = "${var.ami}"
  instance_type           = "${var.instance_type_enode}"
  subnet_id               = "${var.subnet}"
  key_name                = "${var.key}"
  vpc_security_group_ids  = ["${var.security_group}"]
  iam_instance_profile    = "${var.company}-${lower(var.service)}"
  user_data               = "${file("./scripts/enodes.sh")}"
  disable_api_termination = false

  lifecycle {
    ignore_changes = ["user_data", "ami"]
  }

  tags = {
    Name             = "${var.service}_${var.environment}_Energy_Node_${count.index + var.dnodes + 1}"
    Service          = "${var.service}"
    Contact          = "${var.contact}"
    Environment      = "${title(lower(var.environment))}"
    Terraform        = "true"
    "c7n:DoNotPatch" = "True"
    "c7n:OffHour"    = "off=[(M-H,20),(S,8)];tz=mt"
    "c7n:OnHour"     = "on=(M-F,00);tz=mt"
  }

  volume_tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${count.index + var.dnodes + 1}_ROOT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_ebs_volume" "varopt-dnode" {
  count             = "${var.dnodes}"
  availability_zone = "${var.availability_zone}"
  size              = 100
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${element(var.dnode_list, count.index)}_VAROPT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_ebs_volume" "varopt-enode" {
  count             = "${var.enodes}"
  availability_zone = "${var.availability_zone}"
  size              = 100
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${element(var.enode_list, count.index)}_VAROPT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_ebs_volume" "data-disk" {
  count             = "${var.dnodes * 5}"
  availability_zone = "${var.availability_zone}"
  size              = 600
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${element(var.dnode_list, count.index)}_VOL_${element(var.data_volume_device_list, count.index)}"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_ebs_volume" "opt2-enode" {
  count             = "${var.enodes}"
  availability_zone = "${var.availability_zone}"
  size              = 150
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${element(var.enode_list, count.index)}_OPT2"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_volume_attachment" "varopt-dnode-volume-attachment" {
  count        = "${var.dnodes}"
  device_name  = "/dev/sdf"
  instance_id  = "${element(aws_instance.dnode.*.id, count.index)}"
  volume_id    = "${element(aws_ebs_volume.varopt-dnode.*.id, count.index)}"
  force_detach = true
  skip_destroy = true
}

resource "aws_volume_attachment" "varopt-enode-volume-attachment" {
  count        = "${var.enodes}"
  device_name  = "/dev/sdf"
  instance_id  = "${element(aws_instance.enode.*.id, count.index)}"
  volume_id    = "${element(aws_ebs_volume.varopt-enode.*.id, count.index)}"
  force_detach = true
}

resource "aws_volume_attachment" "opt2-enode-volume-attachment" {
  count        = "${var.enodes}"
  device_name  = "/dev/sdg"
  instance_id  = "${element(aws_instance.enode.*.id, count.index)}"
  volume_id    = "${element(aws_ebs_volume.opt2-enode.*.id, count.index)}"
  force_detach = true
}

resource "aws_volume_attachment" "data-volume-attachment" {
  count        = "${var.dnodes * 5}"
  device_name  = "${element(var.data_volume_device_list, count.index)}"
  instance_id  = "${element(aws_instance.dnode.*.id, count.index)}"
  volume_id    = "${element(aws_ebs_volume.data-disk.*.id, count.index)}"
  force_detach = true
}

I think the problem is the way I’m doing my volume attachment using a list. Here’s that variable

variable "data_volume_device_list" {
  description = "device list for EC2 mapping"
  type        = "list"
  default     = ["/dev/sdg", "/dev/sdh", "/dev/sdi", "/dev/sdj", "/dev/sdk"]
}

Here’s a bit of my plan output:

# aws_volume_attachment.data-volume-attachment[3] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdj"
        force_detach = true
      ~ id           = "vai-1957865839" -> (known after apply)
      ~ instance_id  = "i-08a576eac17cbc00b" -> (known after apply) # forces replacement
        volume_id    = "vol-0b70a54ea5a81c718"
    }

  # aws_volume_attachment.data-volume-attachment[4] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdk"
        force_detach = true
      ~ id           = "vai-2929341630" -> (known after apply)
      ~ instance_id  = "i-0b9ba0db464e8f0cb" -> (known after apply) # forces replacement
        volume_id    = "vol-05f4671c1f23853c6"
    }

  # aws_volume_attachment.data-volume-attachment[5] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdg"
        force_detach = true
      ~ id           = "vai-1849870220" -> (known after apply)
      ~ instance_id  = "i-061c8a0463e9aefe4" -> (known after apply) # forces replacement
        volume_id    = "vol-0c1dc11f9c344990d"
    }

  # aws_volume_attachment.data-volume-attachment[9] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdk"
        force_detach = true
      ~ id           = "vai-2507744327" -> (known after apply)
      ~ instance_id  = "i-08a576eac17cbc00b" -> (known after apply) # forces replacement
        volume_id    = "vol-0336e19c00bb07b65"
    }

  # aws_volume_attachment.data-volume-attachment[10] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdg"
        force_detach = true
      ~ id           = "vai-1666224816" -> (known after apply)
      ~ instance_id  = "i-0b9ba0db464e8f0cb" -> (known after apply) # forces replacement
        volume_id    = "vol-0534b6cdd346945a2"
    }

  # aws_volume_attachment.data-volume-attachment[11] must be replaced
-/+ resource "aws_volume_attachment" "data-volume-attachment" {
        device_name  = "/dev/sdh"
        force_detach = true
      ~ id           = "vai-124253228" -> (known after apply)
      ~ instance_id  = "i-061c8a0463e9aefe4" -> (known after apply) # forces replacement
        volume_id    = "vol-097031d6d2ef2cc22"
    }

  # aws_volume_attachment.data-volume-attachment[15] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdg"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[16] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdh"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[17] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdi"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[18] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdj"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-08a576eac17cbc00b"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[19] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdk"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-0b9ba0db464e8f0cb"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[20] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdg"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-061c8a0463e9aefe4"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[21] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdh"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[22] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdi"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[23] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdj"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[24] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdk"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-08a576eac17cbc00b"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[25] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdg"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-0b9ba0db464e8f0cb"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[26] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdh"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = "i-061c8a0463e9aefe4"
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[27] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdi"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[28] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdj"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.data-volume-attachment[29] will be created
  + resource "aws_volume_attachment" "data-volume-attachment" {
      + device_name  = "/dev/sdk"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.varopt-dnode-volume-attachment[3] will be created
  + resource "aws_volume_attachment" "varopt-dnode-volume-attachment" {
      + device_name  = "/dev/sdf"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + skip_destroy = true
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.varopt-dnode-volume-attachment[4] will be created
  + resource "aws_volume_attachment" "varopt-dnode-volume-attachment" {
      + device_name  = "/dev/sdf"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + skip_destroy = true
      + volume_id    = (known after apply)
    }

  # aws_volume_attachment.varopt-dnode-volume-attachment[5] will be created
  + resource "aws_volume_attachment" "varopt-dnode-volume-attachment" {
      + device_name  = "/dev/sdf"
      + force_detach = true
      + id           = (known after apply)
      + instance_id  = (known after apply)
      + skip_destroy = true
      + volume_id    = (known after apply)
    }

Plan: 45 to add, 27 to change, 6 to destroy.

The problem is that you’re using the “wrap-around” behaviour of the element function in the instance_id. For some values of var.dnodes that might actually work, but I can imagine all sort of cases where it gives strange results.

resource "aws_volume_attachment" "data-volume-attachment" {
  count        = "${var.dnodes * 5}"
  device_name  = "${element(var.data_volume_device_list, count.index)}"
  instance_id  = "${element(aws_instance.dnode.*.id, count.index)}"
  volume_id    = "${element(aws_ebs_volume.data-disk.*.id, count.index)}"
  force_detach = true
}

You only need it for the device_name where it provides the functionality we’re interested in.

resource "aws_volume_attachment" "data-volume-attachment" {
  count        = "${var.dnodes * 5}"
  device_name  = "${element(var.data_volume_device_list, count.index)}"
  instance_id  = aws_instance.dnode[count.index % var.dnodes].id
  volume_id    = aws_ebs_volume.data-disk[count.index].id
  force_detach = true
}

This is sort of an inner-loop-outer-loop construct where the instances are the outer loop and the device_list is the inner loop.

Note that I don’t have an AWS provider configured right now so I can’t actually verify the syntax, but something like this is how I would go about it - if the dnodes are ephemeral slaves, otherwise I’d have a list with their names and use nested for_each instead.

Thanks for that bit of code! I was learning Terraform when I built these environments and when I was doing trial and error what I did just so happened to work with that number of volumes and instances. I am pushing for new resource creation where I can implement this, as we are looking at creating more nodes and I can’t just do it by changing the number of machines variable.

Wanted to follow-up on this! The documentation here describes the behavior between count and for_each. For the use case documented above, @bentterp’s suggestion to use a for_each would help!

Thanks Rosemary. I JUST received permission to rebuild our cluster using new Terraform code and will get to work on this right now as I have a week. I am going to try to use the for_each and see what happens.

Alright, I’m already stuck. I was able to figure out how to use for_each in order to create a single volume for a single instance:

var.tf

variable "data_disks" {
  description = "device list for EC2 mapping"
  default     = ["/dev/sdg", "/dev/sdh", "/dev/sdi", "/dev/sdj", "/dev/sdk"]
}

variable "dnodes" {
  default = ["1", "2", "3", "4", "5"]
}

main.tf

resource "aws_instance" "dnode" {
  for_each = toset(var.dnodes)

  ami                     = var.ami
  instance_type           = var.instance_type_dnode
  subnet_id               = var.subnet
  key_name                = var.key
  vpc_security_group_ids  = ["${var.security_group}"]
  iam_instance_profile    = "${var.company}-${lower(var.service)}"
  user_data               = file("./scripts/dnodes.sh")
  disable_api_termination = false

  tags = {
    Name             = "${var.service}_${var.environment}_Energy_Node_${each.value}"
    Service          = "${var.service}"
    Contact          = "${var.contact}"
    Environment      = "${title(lower(var.environment))}"
    Terraform        = "true"
    "c7n:DoNotPatch" = "True"
  }

  volume_tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${each.value}_ROOT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_ebs_volume" "varopt-dnode" {
  for_each = toset(var.dnodes)

  availability_zone = var.availability_zone
  size              = 100
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${each.value}_VAROPT"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

resource "aws_volume_attachment" "varopt-dnode" {
  for_each = toset(var.dnodes)

  device_name  = "/dev/sdf"
  instance_id  = aws_instance.dnode[each.key].id
  volume_id    = aws_ebs_volume.varopt-dnode[each.key].id
  force_detach = true
}

However, I am trying to add 5 data volumes per instance and can’t seem to figure it out since I can’t use count anymore.

addition to main.tf:

resource "aws_ebs_volume" "data-disk" {
  for_each = xxxxxxxxxxxx

  availability_zone = var.availability_zone
  size              = 1000
  type              = "gp2"

  tags = {
    Name        = "${var.service}_${var.environment}_Energy_Node_${each.value}_VOL_${each.key}}"
    Service     = "${var.service}"
    Contact     = "${var.contact}"
    Environment = "${title(lower(var.environment))}"
    Terraform   = "true"
  }
}

Do I need to construct a loop inside of the for_each?

Just an updated. I think my answer can be found here: https://www.terraform.io/docs/configuration/functions/setproduct.html

Okay, I’ve been at it a couple hours now and I’ve learned that I need a map to use for_each. That’s fine. So I had the idea to create a map based off all my required volumes and all my instances using two variables:

variable "data_disks" {
  description = "device list for EC2 mapping"
  default     = ["/dev/sdg", "/dev/sdh", "/dev/sdi", "/dev/sdj", "/dev/sdk"]
}

variable "dnodes" {
  default = ["1", "2", "3", "4", "5"]
}

So I built a range:

output "test" {
  value = range(length(setproduct(var.dnodes, var.data_disks)))
}

Outputs:

test = [
  0,
  1,
  2,
  3,
  4,
  5,
  6,
  7,
  8,
  9,
  10,
  11,
  12,
  13,
  14,
  15,
  16,
  17,
  18,
  19,
  20,
  21,
  22,
  23,
  24,
]

Then I built a repetitive list of device names

output "test-output" {
  value = [for pair in setproduct(var.dnodes, var.data_disks): pair[1]]
}

Apply complete! Resources: 0 added, 0 changed, 0 destroyed.

Outputs:

test-output = [
  "/dev/sdg",
  "/dev/sdh",
  "/dev/sdi",
  "/dev/sdj",
  "/dev/sdk",
  "/dev/sdg",
  "/dev/sdh",
  "/dev/sdi",
  "/dev/sdj",
  "/dev/sdk",
  "/dev/sdg",
  "/dev/sdh",
  "/dev/sdi",
  "/dev/sdj",
  "/dev/sdk",
  "/dev/sdg",
  "/dev/sdh",
  "/dev/sdi",
  "/dev/sdj",
  "/dev/sdk",
  "/dev/sdg",
  "/dev/sdh",
  "/dev/sdi",
  "/dev/sdj",
  "/dev/sdk",
]

However, when I try and zipmap them, it truncates the map:

output "test-output" {
  value = zipmap([for pair in setproduct(var.dnodes, var.data_disks): pair[1]], range(length(setproduct(var.dnodes, var.data_disks))))
}

Apply complete! Resources: 0 added, 0 changed, 0 destroyed.

Outputs:

test-output = {
  "/dev/sdg" = 20
  "/dev/sdh" = 21
  "/dev/sdi" = 22
  "/dev/sdj" = 23
  "/dev/sdk" = 24
}

What gives? Am I going about this all wrong?

Hi @wblakecannon,

It looks like you’re on the right track. The problem in your most recent comment is that you’re using setproduct but then discarding the value coming from var.dnodes, and so the resulting list contains duplicates that get lost when you try to use them as map keys. (Each element of a map must have a unique key.)

For this to work, you need to make sure to include all of the values that make the result unique.

I’ve tried to write a complete-ish example here just to show how all the pieces fit together, but you’ll see that most of this is very similar to what you were already doing and the for_each for the volumes and volume attachments is the main difference:

variable "dnodes" {
  type = set(string)
}

variable "data_disks" {
  type = set(string)
}

locals {
  # This contains one element for each pair of node and disk,
  # with each pair represented as an object. Both of the
  # input values are included in the map keys to make them
  # unique.
  node_disks = { for pair in setproduct(var.dnodes, var.data_disks) : "${pair[0]}:${pair[1]}" => {
    node_name     = pair[0]
    disk_dev_path = pair[1]
  } }
}

resource "aws_instance" "dnode" {
  for_each = var.dnodes

  # (...all of the instance arguments you have in your example...)
}

resource "aws_ebs_volume" "data_disk" {
  for_each = local.node_disks

  availability_zone = var.availability_zone
  size              = 1000
  type              = "gp2"

  tags = {
    Name = "${var.service}_${var.environment}_Energy_Node_${each.value.node_name}_VOL_${each.value.disk_dev_path}"
    # (...and your other tags...)
  }
}

resource "aws_volume_attachment" "dnode_data_disk" {
  for_each = local.node_disks

  device_name  = each.value.device_dev_path
  instance_id  = aws_instance.dnode[each.value.node_name].id
  volume_id    = aws_ebs_volume.data_disk[each.key].id
  force_detach = true
}

With an example of this size I’m sure I made some small mistakes somewhere, but hopefully that illustrates the general pattern. The composite keys I used for the volumes and their attachments here would lead to instance addresses like these, given the values for var.dnodes and var.data_disks you showed in your examples:

  • aws_ebs_volume.data_disk["1:/dev/sdg"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdg"]
  • aws_ebs_volume.data_disk["1:/dev/sdh"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdh"]
  • aws_ebs_volume.data_disk["2:/dev/sdg"]
  • aws_volume_attachment.dnode_data_disk["2:/dev/sdg"]
  • etc, for all combinations of dnode name and device path

It looks like these “dnode” instances are fungible cattle rather than pets, and so when you reduce the number of them it shouldn’t matter which ones get destroyed, as with count. If that’s true, you could restore your original module interface of having just an integer count of dnodes with the following modification:

variable "dnodes_count" {
  type = number
}

locals {
  dnodes = toset([for n in range(var.dnodes_count) : tostring(n)])
}

…end then use local.dnodes instead of var.dnodes in the remainder of the module. local.dnodes will be a set of consecutive integers starting at zero with the length chosen by var.dnodes_count.

If you were then to decrease var.dnodes_count from 2 to 1, Terraform would plan to destroy all of the following addresses:

  • aws_instance.dnode["1"]
  • aws_ebs_volume.data_disk["1:/dev/sdg"]
  • aws_ebs_volume.data_disk["1:/dev/sdh"]
  • aws_ebs_volume.data_disk["1:/dev/sdi"]
  • aws_ebs_volume.data_disk["1:/dev/sdj"]
  • aws_ebs_volume.data_disk["1:/dev/sdk"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdg"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdh"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdi"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdj"]
  • aws_volume_attachment.dnode_data_disk["1:/dev/sdk"]

It should leave aws_instance.dnode["0"] and all of its associated volumes untouched, as long as nothing else in their configurations changed.

Thanks Martin! There’s just one tiny mistake but I was able to figure it out in half a second.

I’ve spent the past few hours updating the rest of my code and this for_each is awesome. I was also able to make use of a local using the same method and create single resource blocks for target_groups, listeners, and target_group_attachment. I seem to remember not being able to use count for those resources so I had individual resource blocks for every port we need; it was pretty ugly.

I’ve posted to my company MS Teams Terraform group about the new for_each to let everyone know how well it works. There’s still a lot of people using CloudFormation but there’s a bunch of us who use Terraform.

I haven’t used local before. Is it good practice to keep it in var.tf or do I make a local.tf?

I usually recommend declaring local values close to where they are used, so that it’s easy for the reader to keep track of what the values are. Often a local value is only used in a few different places in a single file, so putting the corresponding locals block in that same file is helpful.

For local values that are used throughout the module, such as those which are transforming input variables into a different/normalized form that will be used instead of the variable, I usually put those in variables.tf adjacent to the variable they derive from, so that a future maintainer looking at the variable declaration can easily see that they ought to be using the corresponding local value instead.

The general form of that advice is to define the local values in whatever location will make them easiest to find for a future maintainer, but as with many things that depends a lot on the context; I consider the above to be rules of thumb or good defaults, but I’ve encountered less common situations where other approaches felt better.

Hi @ apparentlymart

I am able to create one or more EC2 servers by providing one subnet and EBS volumes in one AZ and also able to attach volumes to the EC2. But how can i create more than one EC2 in multiple subnets and also EBS volumes in multiple AZs by considering the above explanation.

It would be great if you will share the details.

Thanks

1 Like

This is really super helpful. Thank you for this amazing explanation.

I’m encountering the following situation, where I want to be able to include size of the volume in the map.

For Example:

"1:/dev/sdg" = {

    "disk_dev_path" = "/dev/sdg"

    "node_name" = "1"

    "size" = "10"

  }


locals {

  dnodes = toset([for n in range(var.instance_count) : tostring(n)])

}

variable "data_disks" {

  description = "device list for EC2 mapping"

  default     = ["/dev/sdg", "/dev/sdh"]

}

variable "disk_size" {

  description = "device list for EC2 mapping"

  default     = ["10", "30"]

}

locals {

  # This contains one element for each pair of node and disk,

  # with each pair represented as an object. Both of the

  # input values are included in the map keys to make them

  # unique.

  node_disks = { for pair in setproduct(local.dnodes, var.data_disks) : "${pair[0]}:${pair[1]}" => {

    node_name     = pair[0]

    disk_dev_path = pair[1]

  } }

}

Hi @apparentlymart

Could you please help on this. Below I have elaborated what I’m trying to acheive.

How to have both “aws_ebs_volume” “size” and “aws_volume_attachment” “device_name” in the same map of “node_disks” so that I can call it in the resource.

Example:

I could have a list of data_disks as well it’s corresponding disk_size as variable input as shown below.

variable "dnodes" {
  default = ["1", "2", "3"]
}

variable "data_disks" {
  description = "device list for EC2 mapping"
  default     = ["/dev/xvdb", "/dev/xvdc" ]
}

variable "disk_size" {
  description = "device list for EC2 mapping"
  default     = ["10", "30"]
}

I want to be able to have something like this.

node_disks = {
  "1:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "1"
    "disk_size" = "10"
  }
  "1:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "1"
    "disk_size" = "30"
  }
  "2:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "2"
    "disk_size" = "10"
  }
  "2:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "2"
    "disk_size" = "30"
  }
  "3:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "3"
    "disk_size" = "10"
  }
  "3:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "3"
    "disk_size" = "30"
  }
}

I want to be able to use it here as shown below:

resource "aws_ebs_volume" "ebs_volume" {
  for_each = local.node_disks
    availability_zone = "us-west-1a"
    size              =  each.value.disk_size

I tried to build another map for size as shown below,

locals {
ebs_size = { for pair in setproduct(local.dnodes, var.disk_size) : "${pair[0]}:${pair[1]}" => {
    node_name  = pair[0]
    data_dev_size = pair[1]
  } }
}

However when I run plan, it throws below error.

resource "aws_ebs_volume" "data_disk" {
  for_each          = local.ebs_size
  availability_zone = "us-west-1a"
  size              = each.value.data_dev_size
  type              = "gp2"
}

resource "aws_volume_attachment" "dnode_data_disk" {
  for_each = local.node_disks

  device_name  = each.value.disk_dev_path
  instance_id  = aws_instance.this[each.value.node_name].id
  volume_id    = aws_ebs_volume.data_disk[each.key].id
  force_detach = true
}
Error: Invalid index

  on ../../main.tf line 87, in resource "aws_volume_attachment" "dnode_data_disk":
  87:   volume_id    = aws_ebs_volume.data_disk[each.key].id
    |----------------
    | aws_ebs_volume.data_disk is object with 4 attributes
    | each.key is "1:/dev/xvdb"

It’s really quite unclear what is actually code that you’re using and what is pseudo-code of how you would like things to be structured. Could you just please include the .tf file only?

Hi @nhw76

Thanks for looking into this. Please find the .tf file.


variable "dnodes" {
  default = ["1", "2", "3"]
}
variable "data_disks" {
  description = "device list for EC2 mapping"
  default     = ["/dev/xvdb", "/dev/xvdc"]
}

variable "disk_size" {
  description = "device list for EC2 mapping"
  default     = ["10", "30"]
}


locals {
  # This contains one element for each pair of node and disk,
  # with each pair represented as an object. Both of the
  # input values are included in the map keys to make them
  # unique.
  node_disks = { for pair in setproduct(var.dnodes, var.data_disks) : "${pair[0]}:${pair[1]}" => {
    node_name     = pair[0]
    disk_dev_path = pair[1]
  } }

  ebs_size = { for pair in setproduct(var.dnodes, var.disk_size) : "${pair[0]}:${pair[1]}" => {
    node_name  = pair[0]
    data_dev_size = pair[1]
  } }
}

resource "aws_instance" "this" {
  for_each      = local.dnodes
  ami           = var.ami
  instance_type = var.instance_type
  associate_public_ip_address = var.associate_public_ip_address
  subnet_id                   = var.subnet_id
  vpc_security_group_ids      = var.vpc_security_group_ids
  key_name                    = var.key_name
  iam_instance_profile        = var.iam_instance_profile
  
  volume_tags = {
    Name = "volumetag"
  }
  tags = {"Name" = "ec2tags"}
}

resource "aws_ebs_volume" "data_disk" {
  for_each          = local.ebs_size
  availability_zone = "us-west-1a"
  size              = each.value.data_dev_size
  type              = "gp2"
  tags = {"Name" = "ebsvol"}
}

resource "aws_volume_attachment" "dnode_data_disk" {
  for_each = local.node_disks

  device_name  = each.value.disk_dev_path
  instance_id  = aws_instance.this[each.value.node_name].id
  volume_id    = aws_ebs_volume.data_disk[each.key].id // Here is the problem, because the each.key is different for node_disks & ebs_size.
  force_detach = true
}

Below is the pseudo code and solution which I think may work.

What I’m thinking is, instead of creating two maps for “node_disks & ebs_size”, which produces two different keys. I want to be able to create a single map which includes corresponding disk_size for the disk_dev_path. Need your help to create a single map as shown below

/dev/xvdb should have 10GB
/dev/xvdc should have 30GB

node_disks = {
  "1:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "1"
    "disk_size" = "10"
  }
  "1:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "1"
    "disk_size" = "30"
  }
  "2:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "2"
    "disk_size" = "10"
  }
  "2:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "2"
    "disk_size" = "30"
  }
  "3:/dev/xvdb" = {
    "disk_dev_path" = "/dev/xvdb"
    "node_name" = "3"
    "disk_size" = "10"
  }
  "3:/dev/xvdc" = {
    "disk_dev_path" = "/dev/xvdc"
    "node_name" = "3"
    "disk_size" = "30"
  }
}

Hope this explains, please let me know if I need to elaborate more. Thanks in advance for the help.

There are multiple references to local.dnodes which doesn’t appear to be defined here; should we be reading that as var.dnodes or is there another local defined elsewhere?

Hi @nhw7

Yes, my bad it should be var.dnodes, I have corrected it above as well.

Hi,

Just wanted to check if you were able to find a solution to help us on this scenario?

I’m wondering if anyone could find a solution here ?

as it’s a bit strange @bentterp’s code works only in the following case,

2 instances and odd # of volumes, but with 2 instances and even # of volumes it failed with below error

Error attaching volume (vol-xxx) to instance (i-xxx), message: “Invalid value ‘/dev/sdz’ for unixDevice. Attachment point /dev/sdz is already in use”, code: “InvalidParameterValue”