Unable to add new access entry to existing eks cluster

I am trying to add a new user to my access entry of the eks module , here is my configuration →

module "vpc" {
  source  = "terraform-aws-modules/vpc/aws"
  version = "~> 4.0"

  name = local.name
  cidr = local.vpc_cidr

  azs             = local.azs
  private_subnets = local.private_subnets_cidrs
  public_subnets  = local.public_subnets_cidrs

  enable_nat_gateway = true
  single_nat_gateway = true
  enable_dns_hostnames = true
  create_igw = true

  public_subnet_tags = {
    "kubernetes.io/cluster/${local.name}" = "shared"
    "kubernetes.io/role/elb"             = 1
  }

  private_subnet_tags = {
    "kubernetes.io/cluster/${local.name}" = "shared"
    "kubernetes.io/role/internal-elb"    = 1
  }

  tags = local.tags


  map_public_ip_on_launch = true
}

module "eks" {
  source  = "terraform-aws-modules/eks/aws"

  cluster_name    = local.name
  cluster_version = "1.28"
  cluster_endpoint_public_access  = true
  cluster_endpoint_private_access = false

  vpc_id     = module.vpc.vpc_id
  subnet_ids = concat(module.vpc.private_subnets, module.vpc.public_subnets)
  cluster_security_group_additional_rules = {
    egress_nodes_ephemeral_ports_tcp = {
      description                = "To node 1025-65535"
      protocol                   = "tcp"
      from_port                  = 1025
      to_port                    = 65535
      type                       = "egress"
      source_node_security_group = true
    }
  }
  node_security_group_additional_rules = {
    ingress_self_all = {
      description = "Node to node all ports/protocols"
      protocol    = "-1"
      from_port   = 0
      to_port     = 0
      type        = "ingress"
      self        = true
    }
    egress_all = {
      description      = "Node all egress"
      protocol         = "-1"
      from_port        = 0
      to_port          = 0
      type             = "egress"
      cidr_blocks      = ["0.0.0.0/0"]
    }
  }
  cluster_addons = {
    coredns = {
      most_recent = true
    }
    kube-proxy = {
      most_recent = true
    }
    vpc-cni = {
      most_recent = true
    }
  }

  eks_managed_node_group_defaults = {
    ami_type       = "AL2_x86_64"
    instance_types = ["m5.large"]
    attach_cluster_primary_security_group = false
  }

  eks_managed_node_groups = {
    mongodb = {
      min_size       = 2
      max_size       = 4
      desired_size   = 2
      instance_types = ["r6a.xlarge", "r5a.xlarge"]
      capacity_type  = "SPOT"
      labels         = {
        Environment = "test"
        Deployment  = "mongodb"
        lifecycle   = "Ec2Spot"
        spot        = "true"
        node-class  = "worker-node"
        role        = "managed-nodes"
      }
      #      additional_security_group_ids = [aws_security_group.public.id]
      cluster_autoscaler_settings = {
        scale_down_utilization_threshold = 0.9
        scan_interval                    = "10s"
        policies                         = [
          {
            name  = "custom-policy"
            rules = [
              {
                action = "add"
                type   = "PodsViolatingConstraints"
              },
            ]
          },
        ]
      }
      taints = {
        dedicated = {
          key    = "spotInstance"
          value  = "true"
          effect = "PREFER_NO_SCHEDULE"
        }
      }
      tags = {
        Deployment = "mongodb"
      }
    }
    perconamongodb = {
      min_size       = 1
      max_size       = 4
      desired_size   = 1
      instance_types = ["t3a.2xlarge"]
      capacity_type  = "SPOT"
      labels         = {
        Environment = "test"
        Deployment  = "perconamongodb"
        lifecycle   = "Ec2spot"
        spot        = "true"
        node-class  = "worker-node"
        role        = "managed-nodes"
      }
      #      additional_security_group_ids = [aws_security_group.public.id]
      cluster_autoscaler_settings = {
        scale_down_utilization_threshold = 0.9
        scan_interval                    = "10s"
        policies                         = [
          {
            name  = "custom-policy"
            rules = [
              {
                action = "add"
                type   = "PodsViolatingConstraints"
              },
            ]
          },
        ]
      }
      taints = {
        dedicated = {
          key    = "perconaspotInstance"
          value  = "true"
          effect = "PREFER_NO_SCHEDULE"
        }
      }
      tags = {
        Deployment = "perconamongodb"
      }
    }
    copilot = {
      min_size       = 1
      max_size       = 3
      desired_size   = 1
      instance_types = ["t3a.large", "r5a.xlarge", "r6a.xlarge"]
      capacity_type  = "SPOT"
      labels         = {
        Environment = "test"
        Deployment  = "copilot"
        lifecycle   = "Ec2spot"
        spot        = "true"
        node-class  = "worker-node"
        role        = "managed-nodes"
      }
      #      additional_security_group_ids = [aws_security_group.public.id]
      cluster_autoscaler_settings = {
        scale_down_utilization_threshold = 0.9
        scan_interval                    = "10s"
        policies                         = [
          {
            name  = "custom-policy"
            rules = [
              {
                action = "add"
                type   = "PodsViolatingConstraints"
              },
            ]
          },
        ]
      }
      taints = {
        dedicated = {
          key    = "copilotspotInstance"
          value  = "true"
          effect = "PREFER_NO_SCHEDULE"
        }
      }
      tags = {
        Deployment = "copilot"
      }
    }
  }

  authentication_mode = "API_AND_CONFIG_MAP"
  enable_cluster_creator_admin_permissions = true
  access_entries = {
      devansh = {
        kubernetes_group = []
        principal_arn     = "arn:aws:iam::XXXXXXXXXX:user/abc"

        policy_associations = {
          admin = {
            policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
            access_scope = {
              namespaces = []
              type       = "cluster"
            }
          }
          }
        }
      prathmesh = {
        kubernetes_group = []
        principal_arn     = "arn:aws:iam::XXXXXXXXX:user/xyz"

        policy_associations = {
          admin = {
            policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
            access_scope = {
              namespaces = []
              type       = "cluster"
            }
          }
          }
        }
    }
  enable_irsa = true

  tags = local.tags
}

when I try to add a new user using this config , I get this error →

module.eks.aws_eks_access_entry.this["cluster_creator"]: Creating...
╷
│ Error: creating EKS Access Entry (mongodb-cluster:arn:aws:iam::645193536862:user/vatsal.sharma): operation error EKS: CreateAccessEntry, https response error StatusCode: 409, RequestID: 7c376cd9-1baa-48f2-9748-09f03c8244bb, ResourceInUseException: The specified access entry resource is already in use on this cluster.
│ 
│   with module.eks.aws_eks_access_entry.this["cluster_creator"],
│   on .terraform/modules/eks/main.tf line 185, in resource "aws_eks_access_entry" "this":
│  185: resource "aws_eks_access_entry" "this" {
│ 
╵

I already have tthis user added to my cluster, and wahnt to add user xyz but get tthis eror everytime I try

Hey, it’s happening because you have enable_cluster_creator_admin_permissions = true.

From the docs:

When enabling authentication_mode = "API_AND_CONFIG_MAP" , EKS will automatically create an access entry for the IAM role(s) used by managed nodegroup(s) and Fargate profile(s). There are no additional actions required by users. For self-managed nodegroups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users.

https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest#cluster-access-entry