I am trying to use custom iam roles and policies while launching my EKS cluster. I am getting below error when I run my plan.
Error: Invalid template interpolation value
on .terraform/modules/eks/aws_auth.tf line 24, in locals:
24: worker_role_arn = “arn:{data.aws_partition.current.partition}:iam::{data.aws_caller_identity.current.account_id}:role/${element(
25: coalescelist(
26: aws_iam_instance_profile.workers. .role,
27: data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile. .role_name,
28: [”“]
29: ),
30: index,
31: )}”
|----------------
| aws_iam_instance_profile.workers is empty tuple
| data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile is tuple with 6 elementsThe expression result is null. Cannot include a null value in a string template.
Below is the code snippet of my eks module
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "14.0.0"
cluster_name = local.cluster_name
cluster_version = var.kubernetes_version
cluster_endpoint_private_access = true
cluster_endpoint_private_access_cidrs = [var.vpc_cidr]
cluster_endpoint_public_access = false
write_kubeconfig = false
subnets = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
tags = var.tags
manage_cluster_iam_resources = false
manage_worker_iam_resources = false
cluster_iam_role_name = "my-poc-eks"
workers_role_name = "my-poc-eks-ec2"
workers_group_defaults = {
# tags = var.tags
additional_security_group_ids = [local.security_group_id, "sg-989dasuoijasd"]
key_name = "my-key"
ami_id = "ami-xyz"
iam_instance_profile_name = "my-poc-eks-ec2"
}
worker_groups = local.worker_groups
}
locals {
default_node_pool = [
{
name = "default"
instance_type = var.default_nodepool_vm_type
root_volume_size = var.default_nodepool_os_disk_size
root_volume_type = var.default_nodepool_os_disk_type
root_iops = var.default_nodepool_os_disk_iops
asg_desired_capacity = var.default_nodepool_node_count
asg_min_size = var.default_nodepool_min_nodes
asg_max_size = var.default_nodepool_max_nodes
kubelet_extra_args = "--node-labels=${replace(replace(jsonencode(var.default_nodepool_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", var.default_nodepool_taints)}"
additional_userdata = (var.default_nodepool_custom_data != "" ? file(var.default_nodepool_custom_data) : "")
metadata_http_endpoint = var.default_nodepool_metadata_http_endpoint
metadata_http_tokens = var.default_nodepool_metadata_http_tokens
metadata_http_put_response_hop_limit = var.default_nodepool_metadata_http_put_response_hop_limit
}
]
user_node_pool = [
for np_key, np_value in var.node_pools :
{
name = np_key
instance_type = np_value.vm_type
root_volume_size = np_value.os_disk_size
root_volume_type = np_value.os_disk_type
root_iops = np_value.os_disk_iops
asg_desired_capacity = np_value.min_nodes
asg_min_size = np_value.min_nodes
asg_max_size = np_value.max_nodes
kubelet_extra_args = "--node-labels=${replace(replace(jsonencode(np_value.node_labels), "/[\"\\{\\}]/", ""), ":", "=")} --register-with-taints=${join(",", np_value.node_taints)}"
additional_userdata = (np_value.custom_data != "" ? file(np_value.custom_data) : "")
metadata_http_endpoint = np_value.metadata_http_endpoint
metadata_http_tokens = np_value.metadata_http_tokens
metadata_http_put_response_hop_limit = np_value.metadata_http_put_response_hop_limit
}
]
# Merging the default_node_pool into the work_groups node pools
worker_groups = concat(local.default_node_pool, local.user_node_pool)
}
I upgraded to terraform v1.0 and also upgraded eks module to 16.2. below is my provider list
- provider Terraform Registry v3.42.0
- provider Terraform Registry v2.2.0
- provider Terraform Registry v2.0.0
- provider Terraform Registry v2.0.2
- provider Terraform Registry v2.0.0
- provider Terraform Registry v3.0.0
- provider Terraform Registry v3.0.1
- provider Terraform Registry v2.2.0
- provider Terraform Registry v2.4.1
Any help would be great
Thanks