I deployed HashiCorp Boundary on an AWS EKS cluster using the official Helm chart.
I am trying to connect to a PostgreSQL database using the following command:
boundary connect postgres
-target-id ttcp_xxxxxxx
-addr=“https://boundary-controller.mycompany.com”
I am getting the following error:
psql: Error closing session after command end:
error sending session teardown request to worker:
error fetching connection to send session teardown request to worker:
error dialing the worker:
failed to WebSocket dial:
failed to send handshake request:
Get “http://boundary-worker.mycompany.com:9202/v1/proxy”:
read tcp 100.64.0.1:63868->100.64.1.4:9202:
read: connection reset by peer
It appears that the Boundary client cannot establish the WebSocket connection to the worker.
Additional observations:
- The session is successfully created in the Web UI but remains in Pending state.
- The same issue occurs when using Boundary Desktop.
- The controller is exposed via an ALB.
- The worker is exposed via an NLB on port 9202.
- AWS health checks are passing.
Here is a simplified snippet of my Helm values:
global:
namePrefix: "boundary"
namespace: "boundary"
database:
databaseName: "boundary"
secretName: "boundary-database-secret"
initialize:
enabled: false
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: '${hashicorp_boundary_irsa_role_arn}'
extraLabels:
sg: "hashicorp-boundary"
controller:
replicas: 3
extraLabels:
sg: "hashicorp-boundary"
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: '${hashicorp_boundary_irsa_role_arn}'
ingress:
enabled: true
annotations:
alb.ingress.kubernetes.io/target-type: ip
alb.ingress.kubernetes.io/backend-protocol: HTTP
alb.ingress.kubernetes.io/scheme: internal
alb.ingress.kubernetes.io/tags: '${alb_ingress_tags}'
alb.ingress.kubernetes.io/group.name: "dcp-hashicorp-boundary-alb"
alb.ingress.kubernetes.io/load-balancer-name: dcp-hashicorp-boundary-alb
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
alb.ingress.kubernetes.io/ssl-redirect: '443'
alb.ingress.kubernetes.io/subnets: '${private_subnets}'
alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=350
alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
alb.ingress.kubernetes.io/healthcheck-path: /health
alb.ingress.kubernetes.io/healthcheck-port: '9203'
alb.ingress.kubernetes.io/success-codes: '200'
alb.ingress.kubernetes.io/security-groups: ${alb_security_groups}
alb.ingress.kubernetes.io/manage-backend-security-group-rules: "true"
alb.ingress.kubernetes.io/certificate-arn: '${acm_certificate}'
ingressClassName: "alb"
pathType: Prefix
hosts:
- host: 'boundary-controller.mycompany.com'
config: |
disable_mlock = true
log_format = "standard"
log_level = "debug"
controller {
name = "env://POD_NAME"
description = "Boundary Controller"
database {
url = "env://BOUNDARY_DATABASE_URL"
}
public_cluster_addr = "boundary-controller.boundary.svc.cluster.local:9201"
}
listener "tcp" {
address = "0.0.0.0"
purpose = "api"
tls_disable = true
}
listener "tcp" {
address = "0.0.0.0"
purpose = "cluster"
tls_disable = true
}
listener "tcp" {
address = "0.0.0.0"
purpose = "ops"
tls_disable = true
}
# --- CONFIGURATION AWS KMS ---
kms "awskms" {
purpose = "root"
region = "${region}"
kms_key_id = "${root_key_id}"
}
kms "awskms" {
purpose = "worker-auth"
region = "${region}"
kms_key_id = "${worker_auth_key_id}"
}
kms "awskms" {
purpose = "recovery"
region = "${region}"
kms_key_id = "${recovery_key_id}"
}
worker:
replicas: 3
extraLabels:
sg: "hashicorp-boundary-worker"
serviceAccount:
annotations:
eks.amazonaws.com/role-arn: '${hashicorp_boundary_irsa_role_arn}'
loadbalancer:
enabled: true
publicAddr: 'boundary-worker.mycompany.com'
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "external"
external-dns.alpha.kubernetes.io/hostname: "boundary-worker.mycompany.com"
service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing"
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
service.beta.kubernetes.io/aws-load-balancer-subnets: '${public_subnets}'
service.beta.kubernetes.io/aws-load-balancer-protocol: "TCP"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "9202"
service.beta.kubernetes.io/aws-load-balancer-healthcheck-protocol: TCP
service.beta.kubernetes.io/aws-load-balancer-healthcheck-port: "9203"
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"
service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*"
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "${acm_certificate}"
config: |
disable_mlock = true
log_format = "standard"
worker {
name = "env://POD_NAME"
description = "Boundary Worker"
controllers = ["boundary-controller.boundary.svc.cluster.local:9201"]
public_addr = "env://BOUNDARY_WORKER_LOAD_BALANCER"
initial_upstreams = [
"boundary-controller.boundary.svc.cluster.local:9201"
]
}
listener "tcp" {
address = "0.0.0.0"
purpose = "proxy"
tls_disable = true
}
listener "tcp" {
address = "0.0.0.0"
purpose = "ops"
tls_disable = true
}
# Worker authorization KMS
kms "awskms" {
purpose = "worker-auth"
region = "${region}"
kms_key_id = "${worker_auth_key_id}"
}
Any suggestions on what to check next would be greatly appreciated.
Thank you in advance.

