I have a terraform module to create AWS EKS cluster and I am trying to upgrade my EKS version from 1.25 to 1.26 . I ran my terraform plan and it continued to run over an hour , then I stopped it forcefully . I ran it many times and the result is same . During terraform plan it show as -
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# module.eks_imported.rancher2_app_v2.cluster-autoscaler_v2[0] will be created
+ resource "rancher2_app_v2" "cluster-autoscaler_v2" {
+ annotations = (known after apply)
+ chart_name = "cluster-autoscaler"
+ chart_version = "9.29.0"
+ cleanup_on_fail = false
+ cluster_id = "c-cnxb9"
+ cluster_name = (known after apply)
+ disable_hooks = false
+ disable_open_api_validation = false
+ force_upgrade = false
+ id = (known after apply)
+ labels = (known after apply)
+ name = "cluster-autoscaler"
+ namespace = "kube-system"
+ project_id = "c-cnxb9:p-78px5"
+ repo_name = "autoscaler"
+ system_default_registry = (known after apply)
+ values = <<-EOT
---
awsRegion: us-east-1
fullnameOverride: "cluster-autoscaler"
rbac:
create: true
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: "arn:aws:iam::00000000000:role/k8s-XXXXXXXXXXXX-cluster1-cluster-autoscaler"
name: "cluster-autoscaler"
priorityClassName: system-cluster-critical
replicaCount: 2
image:
tag: v1.26.6
autoDiscovery:
clusterName: "xxxxxxxxxxxxx-useast1-cluster1"
enabled: true
resources:
limits:
cpu: 400m
memory: 1000Mi
requests:
cpu: 200m
memory: 600Mi
extraArgs:
v: 4
stderrthreshold: info
logtostderr: true
write-status-configmap: true
leader-elect: true
skip-nodes-with-local-storage: false
expander: priority
scale-down-enabled: true
balance-similar-node-groups: true
min-replica-count: 1
scale-down-utilization-threshold: 0.65
max-node-provision-time: 5m0s
skip-nodes-with-system-pods: false
serviceMonitor:
enabled: true
namespace: kube-system
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- cluster-autoscaler
topologyKey: "kubernetes.io/hostname"
expanderPriorities: |-
20:
- xxxxxxxxxxxx-useast1-cluster1-spot-.*
10:
- .*
EOT
+ wait = true
}
# module.eks_imported.rancher2_app_v2.metrics-server_bitnami[0] will be created
+ resource "rancher2_app_v2" "metrics-server_bitnami" {
+ annotations = (known after apply)
+ chart_name = "metrics-server"
+ chart_version = "5.11.9"
+ cleanup_on_fail = false
+ cluster_id = "c-cnxb9"
+ cluster_name = (known after apply)
+ disable_hooks = false
+ disable_open_api_validation = false
+ force_upgrade = false
+ id = (known after apply)
+ labels = (known after apply)
+ name = "metrics-server"
+ namespace = "kube-system"
+ project_id = "c-cnxb9:p-78px5"
+ repo_name = "bitnamipre2022"
+ system_default_registry = (known after apply)
+ values = <<-EOT
extraArgs:
kubelet-insecure-tls: true
kubelet-preferred-address-types: InternalIP
replicas: 2
image:
registry: docker.innowatts.net/cache
apiService:
create: true
resources:
limits:
cpu: 50m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
EOT
+ wait = true
}
Plan: 2 to add, 0 to change, 0 to destroy.
But when I try to apply changes it never completes and keeps running . When I checked from previous run , both resources “cluster-autoscaler” and “metrics-server” seems deployed successfully in EKS cluster but somehow never shows on command line logs .