Hi,
I deployed an Azure AKS cluster via the following terraform statements into an existing vnet. It worked, the AKS cluster is created with an Azure load balancer and an public IP address assigned to it. I need a setup with an internal Azure load balancer only. How do I have to change the terraform code to only get an internal Azure load balancer? Thanks
resource "azurerm_kubernetes_cluster" "aks" {
name = "${var.tags.department}-${var.tags.stage}-${var.tags.environment}_aks"
location = var.location
resource_group_name = azurerm_resource_group.aksrg.name
dns_prefix = lower("${var.tags.department}-${var.tags.stage}-${var.tags.environment}-aks")
private_link_enabled = true
node_resource_group = "${var.tags.department}-${var.tags.stage}-${var.tags.environment}_aks_nodes_rg"
linux_profile {
admin_username = "testadmin"
ssh_key {
key_data = file("/ssh/id_rsa.pub") #ssh-keygen
}
}
default_node_pool {
name = "default"
vm_size = "Standard_DS1_v2"
enable_auto_scaling = false
enable_node_public_ip = false
node_count = 1
vnet_subnet_id = azurerm_subnet.akssubnet.id
}
network_profile {
network_plugin = "azure"
service_cidr = "172.100.0.0/24"
dns_service_ip = "172.100.0.10"
docker_bridge_cidr = "172.101.0.1/16"
load_balancer_sku = "standard"
}
service_principal {
client_id = azurerm_azuread_service_principal.aks_sp.application_id
client_secret = azurerm_azuread_service_principal_password.aks_sp_pwd.value
}
addon_profile {
kube_dashboard {
enabled = true
}
}
role_based_access_control {
enabled = false
}
}