I know I am missing something very simple, but cant join the raft with tls-skip-verify. I have a cert from lets-encrypt for vault.pgvirtuoso.net. here is my helm manifest:
COMPUTED VALUES:
csi:
daemonSet:
annotations: {}
updateStrategy:
maxUnavailable: ""
type: RollingUpdate
debug: false
enabled: false
extraArgs: []
image:
pullPolicy: IfNotPresent
repository: hashicorp/vault-csi-provider
tag: 0.3.0
livenessProbe:
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
pod:
annotations: {}
tolerations: []
readinessProbe:
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::334719107214:role/kms_poweruser
volumeMounts: null
volumes: null
global:
enabled: true
imagePullSecrets: []
openshift: false
psp:
annotations: |
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
enable: false
tlsDisable: false
injector:
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: webhook
topologyKey: kubernetes.io/hostname
agentDefaults:
cpuLimit: 500m
cpuRequest: 250m
memLimit: 128Mi
memRequest: 64Mi
template: map
templateConfig:
exitOnRetryFailure: true
agentImage:
repository: hashicorp/vault
tag: 1.8.1
annotations: {}
authPath: auth/kubernetes
certs:
caBundle: ""
certName: tls.crt
keyName: tls.key
secretName: vault-pgvirtuoso-net-tls
enabled: true
externalVaultAddr: ""
# extraEnvironmentVars:
# VAULT_ADDR: 'http://127.0.0.1:8200'
# VAULT_API_ADDR: 'https://vault.pgvirtuoso.net:8200'
# VAULT_CLUSTER_ADDR: 'https://vault.pgvirtuoso.net:8200'
extraLabels: {}
failurePolicy: Ignore
hostNetwork: false
image:
pullPolicy: IfNotPresent
repository: hashicorp/vault-k8s
tag: 0.12.0
leaderElector:
enabled: true
image:
repository: gcr.io/google_containers/leader-elector
tag: "0.4"
ttl: 60s
logFormat: standard
logLevel: debug
metrics:
enabled: false
namespaceSelector: {}
nodeSelector: {}
objectSelector: {}
port: 8080
priorityClassName: ""
replicas: 1
resources: {}
revokeOnShutdown: false
service:
annotations: {}
tolerations: []
webhookAnnotations: {}
server:
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
annotations: {}
auditStorage:
accessMode: ReadWriteOnce
annotations: {}
enabled: true
mountPath: /vault/audit
size: 10Gi
storageClass: null
authDelegator:
enabled: true
dataStorage:
accessMode: ReadWriteOnce
annotations: {}
enabled: true
mountPath: /vault/data
size: 10Gi
storageClass: null
dev:
devRootToken: root
enabled: false
enabled: true
enterpriseLicense:
secretKey: license
secretName: ""
extraArgs: ""
extraContainers: null
# extraEnvironmentVars:
# VAULT_ADDR: 'http://127.0.0.1:8200'
extraInitContainers: null
extraLabels: {}
extraSecretEnvironmentVars: []
extraVolumes:
- type: secret
name: vault-pgvirtuoso-net-tls
path: '/vault/userconfig'
ha:
apiAddr: null
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
service_registration "kubernetes" {}
seal "awskms" {
region = "us-east-1"
kms_key_id = "xxxxx"
endpoint = "https://xxxxx.vpce.amazonaws.com"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
disruptionBudget:
enabled: true
maxUnavailable: null
enabled: true
raft:
config: |
ui = true
listener "tcp" {
tls_disable = 0
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/vault-pgvirtuoso-net-tls/tls.crt"
tls_key_file = "/vault/userconfig/vault-pgvirtuoso-net-tls/tls.key"
tls_disable_client_certs = "true"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
seal "awskms" {
region = "us-east-1"
kms_key_id = "xxxxx"
endpoint = "https://xxxxx.vpce.amazonaws.com"
}
enabled: true
setNodeId: false
replicas: 3
image:
pullPolicy: IfNotPresent
repository: hashicorp/vault
tag: 1.8.1
ingress:
activeService: true
annotations:
external-dns.alpha.kubernetes.io/hostname: vault.pgvirtuoso.net
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
enabled: false
extraPaths:
- path: /*
backend:
serviceName: ssl-redirect
servicePort: use-annotation
hosts:
- host: vault.pgvirtuoso.net
# paths: []
# labels: {}
tls:
- secretName: vault-pgvirtuoso-net-tls
hosts:
- vault.pgvirtuoso.net
livenessProbe:
enabled: false
failureThreshold: 2
initialDelaySeconds: 60
path: /v1/sys/health?standbyok=true
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
logFormat: ""
logLevel: ""
networkPolicy:
egress: []
enabled: false
nodeSelector: {}
postStart: []
preStopSleepSeconds: 5
priorityClassName: ""
readinessProbe:
enabled: true
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
route:
activeService: true
annotations: {}
enabled: false
host: vault.pgvirtuoso.net
labels: {}
tls:
hosts:
- vault.pgvirtuoso.net
secretName: vault-pgvirtuoso-net-tls
service:
annotations: {}
enabled: true
port: 8200
targetPort: 8200
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::1111111:role/kms_poweruser
create: true
name: ""
shareProcessNamespace: false
standalone:
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
tls_cert_file = "/vault/userconfig/vault-pgvirtuoso-net-tls/tls.crt"
tls_key_file = "/vault/userconfig/vault-pgvirtuoso-net-tls/tls.key"
}
storage "file" {
path = "/vault/data"
}
seal "awskms" {
region = "us-east-1"
kms_key_id = "xxxxx"
endpoint = "https://xxxxx.vpce.amazonaws.com"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
enabled: '-'
statefulSet:
annotations: {}
tolerations: []
updateStrategyType: OnDelete
volumeMounts: null
volumes: null
ui:
activeVaultPodOnly: false
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
external-dns.alpha.kubernetes.io/hostname: vault.pgvirtuoso.net
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: letsencrypt-prod
externalPort: 8200
publishNotReadyAddresses: true
serviceNodePort: null
serviceType: LoadBalancer
targetPort: 8200
Vault auto unseal fine. When I do kubectl exec vault-0 -- vault operator init -key-shares=1 -key-threshold=1 -format=json --tls-skip-verify > cluster-keys.json
and I can login when I do kubectl exec vault-0 -- vault login $CLUSTER_ROOT_TOKEN --tls-skip-verify
WHen I do kubectl exec vault-1 -- vault operator raft join --tls-skip-verify http://vault-0.vault-internal:8200
it fails.
Error joining the node to the Raft cluster: Error making API request.
URL: POST https://127.0.0.1:8200/v1/sys/storage/raft/join
Code: 500. Errors:
* failed to join raft cluster: failed to join any raft leader node
command terminated with exit code 2
What did I missed? Thank you!!!