Here is the case:
After the deployment of the Vault by Helm Chart on the EKS cluster and with the appropriate configurations EBS volume is being provisioned automatically and It’s ok. We do a daily backup of that EBS volume and are able to restore EBS volume from it. But how to configure the Vault helm chart to assign restored EBS volume in case of losing the original one?
1 Like
Hey,
Any updates on this?
Stuck in the exact same situation, would love a way out.
Yes. Resolved by the using our custom Persistent Volume where we specify EBS ID, please note
resource "kubernetes_manifest" "vault-pv" {
...
volumeHandle = aws_ebs_volume.vault_ebs.id
...
}
in config
resource "helm_release" "vault" {
chart = "vault"
name = "vault"
repository = "https://helm.releases.hashicorp.com"
version = "0.24.1"
values = [data.template_file.vault-config.rendered]
depends_on = [
helm_release.lb_controller,
helm_release.calico,
kubernetes_manifest.vault-pvc
]
}
resource "kubernetes_manifest" "vault-pvc" {
count = 1
manifest = {
apiVersion = "v1"
kind = "PersistentVolumeClaim"
metadata = {
name = local.vault_pvc_name
namespace = "default"
}
spec = {
storageClassName = "gp3"
accessModes = [
"ReadWriteOnce"
]
volumeMode = "Filesystem"
resources = {
requests = {
storage = "${var.vault_storage_capacity_gb}Gi"
}
}
}
}
depends_on = [helm_release.calico , kubernetes_manifest.vault-pv]
}
resource "kubernetes_manifest" "vault-pv" {
count = 1
manifest = {
apiVersion = "v1"
kind = "PersistentVolume"
metadata = {
name = local.vault_pv_name
}
spec = {
storageClassName = "gp3"
claimRef = {
namespace = "default"
name = local.vault_pvc_name
}
capacity = {
storage = "${var.vault_storage_capacity_gb}Gi"
}
volumeMode = "Filesystem"
accessModes = [
"ReadWriteOnce"
]
csi = {
driver = "ebs.csi.aws.com"
volumeHandle = aws_ebs_volume.vault_ebs.id
fsType = "ext4"
}
nodeAffinity = {
required = {
nodeSelectorTerms = [
{
matchExpressions = [
{
key = "topology.ebs.csi.aws.com/zone"
operator = "In"
values = [var.vault_ebs_az]
}
]
}
]
}
}
}
}
depends_on = [helm_release.calico , aws_ebs_volume.vault_ebs]
}
data "template_file" "vault-config" {
template = <<EOF
injector:
enabled: true
hostNetwork: true
port: ${local.vault_injector_port}
logLevel: "${var.vault_injector_log_level}"
failurePolicy: Fail
server:
hostNetwork: true
standalone:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:${local.vault_server_port}"
cluster_address = "[::]:8201"
}
seal "awskms" {
region = "${var.region}"
kms_key_id = "${aws_kms_key.vault-kms.id}"
}
storage "file" {
path = "${local.vault_pod_mount_path}"
}
service:
enabled: true
type: NodePort
volumes:
- name: data
persistentVolumeClaim:
claimName: ${local.vault_pvc_name}
volumeMounts:
- name: data
mountPath: "${local.vault_pod_mount_path}"
dataStorage:
enabled: false
ingress:
enabled: true
annotations:
helm.sh/resource-policy: keep
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: instance
alb.ingress.kubernetes.io/healthcheck-path: /v1/sys/health
alb.ingress.kubernetes.io/group.name: group-for-alb-0
alb.ingress.kubernetes.io/load-balancer-name: ${aws_lb.k8s-alb[0].name}
alb.ingress.kubernetes.io/certificate-arn: ${aws_acm_certificate.domain-certificate.arn}
hosts:
- host: ${local.route53_vault_domain}
paths:
- /
EOF
}