Hi,
I am puzzled with the following behaviour with terraform. I keep getting the error with plan
Terraform v1.3.5
on darwin_amd64
module.gardener-shoot-aws-cilium-kubectl.kubectl_manifest.gardener_shoot: Refreshing state... [id=/apis/core.gardener.cloud/v1beta1/namespaces/garden-dmi-sbox/shoots/register]
module.gardener-shoot-aws-cilium-kubectl.data.kubernetes_secret.shootk8sconfig: Reading...
module.gardener-shoot-aws-cilium-kubectl.data.kubernetes_secret.shootk8sconfig: Read complete after 0s [id=garden-dmi-sbox/register.kubeconfig]
module.gardener-shoot-aws-cilium-kubectl.local_sensitive_file.shootk8sconfig: Refreshing state... [id=413fe591d2f3f1d28d8441c0e447a0441bf59958]
module.argo-vault-shoot-setup.vault_policy.vault_secrets_operator: Refreshing state... [id=secrets-operator-register]
module.argo-vault-shoot-setup.vault_mount.gardener_shoot_mount: Refreshing state... [id=shoots/aws/production/eu-central-1/register]
module.argo-vault-shoot-setup.vault_auth_backend.kubernetes: Refreshing state... [id=shoots/aws/production/eu-central-1/register]
module.argo-vault-shoot-setup.vault_generic_secret.gardener_shoot_secret: Refreshing state... [id=shoots/aws/production/eu-central-1/register/k8sconfig]
module.argo-vault-shoot-setup.kubernetes_namespace.ns_secrets_operator: Refreshing state... [id=secrets-operator]
╷
│ Error: Get "http://localhost/api/v1/namespaces/secrets-operator": dial tcp [::1]:80: connect: connection refused
│
│ with module.argo-vault-shoot-setup.kubernetes_namespace.ns_secrets_operator,
│ on ../modules/argo-vault-shoot-setup/main.tf line 79, in resource "kubernetes_namespace" "ns_secrets_operator":
│ 79: resource "kubernetes_namespace" "ns_secrets_operator" {
│
Removing the not needed parts below from the module
…/modules/argo-vault-shoot-setup/main.tf
## Creation of operators namespace and Service account for Vault secrets operator
data "kubernetes_secret" "sa_secret" {
provider = kubernetes.shoot
metadata {
namespace = kubernetes_namespace.ns_secrets_operator.metadata.0.name
name = kubernetes_secret.secret_secrets_operator.metadata.0.name
}
}
resource "kubernetes_namespace" "ns_secrets_operator" {
provider = kubernetes.shoot
metadata {
annotations = {
name = "secrets-operator"
managedby = "terraform"
}
name = "secrets-operator"
}
}
resource "kubernetes_service_account" "sa_secrets_operator" {
provider = kubernetes.shoot
metadata {
name = "secrets-operator-sa"
namespace = kubernetes_namespace.ns_secrets_operator.metadata.0.name
annotations = {
name = "secrets-operator-sa"
managedby = "terraform"
}
}
#automount_service_account_token = true
}
resource "kubernetes_secret" "secret_secrets_operator" {
provider = kubernetes.shoot
metadata {
name = "secrets-operator-secret"
namespace = kubernetes_namespace.ns_secrets_operator.metadata.0.name
annotations = {
name = "secrets-operator-secret"
managedby = "terraform"
"kubernetes.io/service-account.name" = kubernetes_service_account.sa_secrets_operator.metadata.0.name
}
}
type = "kubernetes.io/service-account-token"
}
resource "kubernetes_cluster_role_binding" "secrets_operator_auth_role_binding" {
provider = kubernetes.shoot
metadata {
name = "secrets-operator-role-tokenreview-binding"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "system:auth-delegator"
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.sa_secrets_operator.metadata.0.name
namespace = kubernetes_namespace.ns_secrets_operator.metadata.0.name
}
}
Root main.tf calling the module
module "gardener-shoot-aws-cilium-kubectl" {
source = "./../modules/gardener-shoot-aws-cilium-kubectl"
gardener_project = var.gardener_project
cluster_name = var.cluster_name
cloud_provider = var.cloud_provider
environment = var.environment
location = var.location
project_name = var.project_name
kubernetes_version = var.kubernetes_version
providers = {
kubectl.robot = kubectl.robot
kubernetes.robot = kubernetes.robot
}
}
module "argo-vault-shoot-setup" {
source = "./../modules/argo-vault-shoot-setup"
cloud_provider = var.cloud_provider
cluster_name = var.cluster_name
gardener_project = var.gardener_project
environment = var.environment
location = var.location
vault_policy_name = var.vault_policy_name
providers = {
kubernetes.shoot = kubernetes.shoot
kubernetes.robot = kubernetes.robot
vault.bootstrap = vault.bootstrap
}
depends_on = [ module.gardener-shoot-aws-cilium-kubectl ]
}
module "register-cluster-argocd" {
source = "./../modules/register-cluster-argocd"
gardener_project = var.gardener_project
cloud_provider = var.cloud_provider
environment = var.environment
location = var.location
project_name = var.project_name
cluster_name = var.cluster_name
argocd_server = var.argocd_server
argocd_username = var.argocd_username
argocd_password = var.argocd_password
providers = {
argocd.cluster = argocd.cluster
kubernetes.robot = kubernetes.robot
}
depends_on = [ module.argo-vault-shoot-setup ]
}
providers.tf
provider "kubernetes" {
alias = "robot"
config_path = var.robot_config_local_path
}
provider "kubernetes" {
alias = "shoot"
host = yamldecode(data.kubernetes_secret.shootk8sconfig.data.kubeconfig).clusters[0].cluster.server
cluster_ca_certificate = lookup(data.kubernetes_secret.shootk8sconfig.data, "ca.crt")
token = yamldecode(data.kubernetes_secret.shootk8sconfig.data.kubeconfig).users[0].user.token
}
provider "kubectl" {
alias = "robot"
config_path = var.robot_config_local_path
}
data-sources.tf
data "kubernetes_secret" "shootk8sconfig" {
provider = kubernetes.robot
depends_on = [ module.gardener-shoot-aws-cilium-kubectl ]
metadata {
name = join(".", [var.cluster_name, "kubeconfig"])
namespace = "garden-${var.gardener_project}"
}
}
To summarize. I am using a robot kubeconfig to get the real kubeconfig of the cluster which is a K8s secret and therafter connect to the cluster with the alias=shoot and it keeps giving this error.
What am i doing incorrect with the provider config ?
Kevin