Hi @apparentlymart,
Thanks for the reply. I am have the following template defintion
${yamlencode(merge(
"apiVersion": "core.gardener.cloud/v1beta1",
"kind": "Shoot",
"metadata": {
"name": shoot_cluster_name,
"namespace": project_name,
},
"timeouts": {
"create": create_timeout,
"update": update_timeout,
"delete": delete_timeout,
}
"spec": {
"addons": {
"kubernetesDashboard": {
"authenticationMode": "token",
"enabled": dashboard_enabled,
},
"nginxIngress": {
"enabled": nginx_enabled,
"externalTrafficPolicy": "Cluster",
}
},
"cloudProfileName": target_profile,
"hibernation": {
"enabled": false,
},
"kubernetes": {
"allowPrivilegedContainers": true,
"version": kubernetes_version,
},
"maintenance": {
"autoUpdate": {
"kubernetesVersion": maintenance_k8s_version_enabled,
"machineImageVersion": maintenance_machine_image_version_enabled,
},
"timeWindow": {
"begin": "210000+0000",
"end": "220000+0000",
}
},
"networking": {
"nodes": networking_nodes,
"pods": networking_pods,
"services": networking_services,
"type": networking_type,
},
"provider": {
"infrastructureConfig": {
"apiVersion": "aws.provider.extensions.gardener.cloud/v1alpha1",
"kind": "InfrastructureConfig",
"networks": {
"vpc": {
"cidr": vnetcidr,
},
"zones": [
for subnet in subnets : {
internal = subnet.internal
name = subnet.name
public = subnet.public
workers = subnet.workers
}
]
}
},
"type": cloud_provider,
"workers": [
{
"machine": {
"image":{
"name": machine_image_name,
"version": machine_image_version,
},
"type": machine_type,
},
"maxSurge": worker_max_surge,
"maxUnavailable": worker_max_unavailable,
"maximum": worker_maximum,
"minimum": worker_minimum,
"name": worker_name,
"volume": {
"size": disk_size,
"type": disk_type,
},
"zones": zones,
}
]
},
"purpose": "evaluation",
"region": location,
"secretBindingName": target_secret,
},
gardener_dns_management? {
"spec": {
"dns": {
"domain": "testcluster.ondemand.com",
"providers": [
{
"domains": {
"include": [
"domain1.ondemand.com",
"domain12.ondemand.com",
]
},
"secretName": "mysecret",
"type": "aws-route53",
}
]
},
"extensions": [
{
"type": "shoot-dns-service",
}
]
},
} : {},
gardener_dns_ssl_management ? {
"spec": {
"dns": {
"domain": "testcluster.ondemand.com",
"providers": [
{
"domains": {
"include": [
"domain1.ondemand.com",
"domain12.ondemand.com",
],
},
"secretName": "my-secret",
"type": "aws-route53",
}
]
},
"extensions": [
{
"type": "shoot-dns-service",
},
{
"providerConfig": {
"apiVersion": "service.cert.extensions.gardener.cloud/v1alpha1",
"issuers": [
{
"email": "test@example.com",
"name": "ondemand.com",
"server": "https://acme-v02.api.letsencrypt.org/directory",
}
],
},
"type": "shoot-cert-service",
}
]
},
} : {},
))}
I added the following in the variables
variables.tf
variable "gardener_dns_management" {
type = bool
default = true
}
variable "gardener_dns_ssl_management" {
type = bool
default = false
}
In main.tf
resource "kubectl_manifest" "gardener_shoot" {
yaml_body = templatefile("${path.module}/templates/gardener-shoot.yaml_wip.tmpl", {
shoot_cluster_name = var.shoot_cluster_name,
project_name = var.project_name,
create_timeout = var.create_timeout,
update_timeout = var.update_timeout,
delete_timeout = var.delete_timeout,
dashboard_enabled = var.dashboard_enabled,
nginx_enabled = var.nginx_enabled,
target_profile = var.target_profile,
kubernetes_version = var.kubernetes_version,
maintenance_k8s_version_enabled = var.maintenance_k8s_version_enabled,
maintenance_machine_image_version_enabled = var.maintenance_machine_image_version_enabled,
networking_nodes = var.networking_nodes,
networking_pods = var.networking_pods,
networking_services = var.networking_services,
networking_type = var.networking_type,
vnetcidr = var.vnetcidr,
cloud_provider = var.cloud_provider,
machine_image_name = var.machine_image_name,
machine_image_version = var.machine_image_version,
machine_type = var.machine_type,
worker_max_surge = var.worker_max_surge,
worker_max_unavailable = var.worker_max_unavailable,
worker_maximum = var.worker_maximum,
worker_minimum = var.worker_minimum,
worker_name = var.worker_name,
disk_size = var.disk_size,
disk_type = var.disk_type,
location = var.location,
target_secret = var.target_secret,
zones = var.zones,
subnets = var.subnets,
gardener_dns_management = var.gardener_dns_management,
gardener_dns_ssl_management = var.gardener_dns_ssl_management,
})
}
I get an error with merge
on main.tf line 18, in resource "kubectl_manifest" "gardener_shoot":
18: yaml_body = templatefile("${path.module}/templates/gardener-shoot.yaml_wip.tmpl", {
19: shoot_cluster_name = var.shoot_cluster_name,
20: project_name = var.project_name,
21: create_timeout = var.create_timeout,
22: update_timeout = var.update_timeout,
23: delete_timeout = var.delete_timeout,
24: dashboard_enabled = var.dashboard_enabled,
25: nginx_enabled = var.nginx_enabled,
26: target_profile = var.target_profile,
27: kubernetes_version = var.kubernetes_version,
28: maintenance_k8s_version_enabled = var.maintenance_k8s_version_enabled,
29: maintenance_machine_image_version_enabled = var.maintenance_machine_image_version_enabled,
30: networking_nodes = var.networking_nodes,
31: networking_pods = var.networking_pods,
32: networking_services = var.networking_services,
33: networking_type = var.networking_type,
34: vnetcidr = var.vnetcidr,
35: cloud_provider = var.cloud_provider,
36: machine_image_name = var.machine_image_name,
37: machine_image_version = var.machine_image_version,
38: machine_type = var.machine_type,
39: worker_max_surge = var.worker_max_surge,
40: worker_max_unavailable = var.worker_max_unavailable,
41: worker_maximum = var.worker_maximum,
42: worker_minimum = var.worker_minimum,
43: worker_name = var.worker_name,
44: disk_size = var.disk_size,
45: disk_type = var.disk_type,
46: location = var.location,
47: target_secret = var.target_secret,
48: zones = var.zones,
49: subnets = var.subnets,
50: gardener_dns_management = var.gardener_dns_management,
51: gardener_dns_ssl_management = var.gardener_dns_ssl_management,
52: })
|----------------
| path.module is "."
Call to function "templatefile" failed:
./templates/gardener-shoot.yaml_wip.tmpl:2,17-18: Missing argument separator;
A comma is required to separate each function argument from the next..
As you see i would like to add the relevant snippet under the spec
section of the yaml if the boolean value is set.
What am i doing incorrect ?
Kevin