Terraform v0.14.8
I am attempting to create multiple EC2 instances within Terraform, upload specific directories to each instances /var/tmp folder and then move those directories into an EFS mount on each instance. I am seeing some strange results when trying this with the code below, all the directories get successfully uploaded to each EC2 instance’s var/tmp
folder but when we get to the remote-exec
stage and it attempts to move the files they seem to be removed from the instance.
It weirdly looks like the first instances files get moved into the EFS volume correctly, the 2nd instance only copies a few of the files and then the third instance doesn’t work at all.
I am wondering if there is something wrong with my setup.
resource "aws_instance" "huski" {
count = var.node_count
ami = data.aws_ami.ecs.id
associate_public_ip_address = true
iam_instance_profile = aws_iam_instance_profile.huski_ec2.name
instance_type = "t2.micro"
key_name = format("huski_%d", count.index)
subnet_id = element(var.vpc_public_subnets, 0)
vpc_security_group_ids = [
aws_security_group.huski.id]
lifecycle {
create_before_destroy = true
prevent_destroy = false
}
depends_on = [
aws_key_pair.generated_key
]
user_data = join("\n",
[
"#!/bin/bash\necho ECS_CLUSTER=${var.ecs_cluster_name} > /etc/ecs/ecs.config",
data.template_file.script[count.index].rendered
])
provisioner "file" {
source = format("nodes/node_%d", count.index)
destination = "/var/tmp"
connection {
type = "ssh"
user = "ec2-user"
private_key = tls_private_key.huski[count.index].private_key_pem
host = self.public_ip
}
}
tags = {
Name = format("%d-huski-terraform", count.index)
}
}
resource "null_resource" "move_files" {
count = var.node_count
provisioner "remote-exec" {
inline = [
"sudo mv -v ${format("/var/tmp/node_%d/", count.index)} /mnt/efs_node_data/",
]
connection {
type = "ssh"
user = "ec2-user"
private_key = tls_private_key.huski[count.index].private_key_pem
host = aws_instance.huski[count.index].public_ip
}
}
depends_on = [
aws_instance.huski
]
}