Terraform hangs when creating aws_eks_node_group for EKS cluster

I’m creating a simple 2 node EKS cluster using Terraform. It hangs in a loop for 20 minutes while trying to create the aws_eks_node_group. It finally fails with the error below.

My .tf file is below the error message. I cobbled this together from multiple sources, but I must have missed something. Can anyone spot the problem with my .tf file?

aws_eks_node_group.cluster_eks_node_group: Still creating... [23m30s elapsed]
aws_eks_node_group.cluster_eks_node_group: Still creating... [23m40s elapsed]
╷
╷
│ Error: waiting for EKS Node Group (dean-eks-test:workers) to create: unexpected state 'CREATE_FAILED', wanted target 'ACTIVE'. last error: 1 error occurred:
│       * i-05c6b71e578f30a7b, i-06fe96881274c1a8e, i-08089f6a26ba4af49, i-0e02d8a45086f02e7: NodeCreationFailure: Instances failed to join the kubernetes cluster
│ 
│ 
│ 
│   with aws_eks_node_group.cluster_eks_node_group,
│   on eks.tf line 108, in resource "aws_eks_node_group" "cluster_eks_node_group":
│  108: resource "aws_eks_node_group" "cluster_eks_node_group" {
│ 
╵
╷
│ Error: Failed to create deployment: Post "http://localhost/apis/apps/v1/namespaces/default/deployments": dial tcp 127.0.0.1:80: connect: connection refused
│ 
│   with kubernetes_deployment.nginx_deployment,
│   on eks.tf line 122, in resource "kubernetes_deployment" "nginx_deployment":
│  122: resource "kubernetes_deployment" "nginx_deployment" {
│

Here’s my .tf file:

terraform {
  required_providers {
    aws = {
      source = "hashicorp/aws"
    }
  }
}


resource "aws_iam_role" "cluster_iam_role" {
  name = "eks-cluster-role"
  path = "/"

  assume_role_policy = <<EOF
{
 "Version": "2012-10-17",
 "Statement": [
  {
   "Effect": "Allow",
   "Principal": {
    "Service": "eks.amazonaws.com"
   },
   "Action": "sts:AssumeRole"
  }
 ]
}
EOF

}

resource "aws_iam_role_policy_attachment" "AmazonEKSClusterPolicy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
  role    = aws_iam_role.cluster_iam_role.name
}
resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly-EKS" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
  role    = aws_iam_role.cluster_iam_role.name
}

resource "aws_iam_role" "workernodes_iam_role" {
  name = "eks-node-role"

  assume_role_policy = jsonencode({
    Statement = [{
      Action = "sts:AssumeRole"
      Effect = "Allow"
      Principal = {
        Service = "ec2.amazonaws.com"
      }
    }]
    Version = "2012-10-17"
  })
}

resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
  role    = aws_iam_role.workernodes_iam_role.name
}

resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
  role    = aws_iam_role.workernodes_iam_role.name
}

resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilderECRContainerBuilds" {
  policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds"
  role    = aws_iam_role.workernodes_iam_role.name
}

resource "aws_iam_role_policy_attachment" "AmazonEC2ContainerRegistryReadOnly" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
  role    = aws_iam_role.workernodes_iam_role.name
}

# VPC
resource "aws_vpc" "cluster_vpc" {
  cidr_block = "10.0.0.0/16"
}

# Subnets
resource "aws_subnet" "cluster_subnets" {
  count = 2

  vpc_id     = aws_vpc.cluster_vpc.id
  cidr_block = "10.0.${count.index}.0/24"

  tags = {
    "Name" = "Private ${count.index}"
  }
}

# Create EKS cluster
resource "aws_eks_cluster" "eks_cluster" {
  name = "dean-eks-test"

  role_arn = aws_iam_role.cluster_iam_role.arn

  vpc_config {
    subnet_ids = aws_subnet.cluster_subnets[*].id
  }
}

# Worker Nodes
resource "aws_eks_node_group" "cluster_eks_node_group" {
  cluster_name    = aws_eks_cluster.eks_cluster.name
  node_group_name = "workers"
  node_role_arn   = aws_iam_role.workernodes_iam_role.arn
  subnet_ids      = aws_subnet.cluster_subnets[*].id

  scaling_config {
    desired_size = 4
    max_size     = 4
    min_size     = 4
  }
}

# Kubernetes Deployment
resource "kubernetes_deployment" "nginx_deployment" {
  metadata {
    name = "nginx"
    labels = {
      app = "nginx"
    }
  }

  spec {
    replicas = 2

    selector {
      match_labels = {
        app = "nginx"
      }
    }

    template {
      metadata {
        labels = {
          app = "nginx"
        }
      }

      spec {
        container {
          image = "nginx:latest"
          name  = "nginx"

          port {
            container_port = 80
          }
        }
      }
    }
  }
}

# Kubernetes Load Balancer Service
resource "kubernetes_service" "nginx_service" {
  metadata {
    name = "nginx-lb"
  }
  spec {
    selector = {
      app = kubernetes_deployment.nginx_deployment.metadata[0].labels.app
    }
    port {
      port        = 80
      target_port = 80
    }

    type = "LoadBalancer"
  }
}