Hello,
I have deployed boundary in production mode. While trying to connect to the target via Boundary desktop app, I’m getting server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request.
error. However, I am able to connect to the target if I re-try a few times. What do you think would cause such an error?
boundary-controller-config
disable_mlock = true
log_format = "standard"
controller {
name = "kubernetes-controller"
description = "Boundary kubernetes-controller"
database {
url = "file:///vault/secrets/boundary-database-creds"
}
public_cluster_addr = "boundary.hashicorp:9201"
}
listener "tcp" {
address = "0.0.0.0"
purpose = "api"
tls_disable = true
}
listener "tcp" {
address = "0.0.0.0"
purpose = "cluster"
}
listener "tcp" {
address = "0.0.0.0"
purpose = "ops"
tls_disable = true
}
kms "awskms" {
purpose = "worker-auth"
region = "{AWS_REGION}"
}
kms "awskms" {
purpose = "root"
region = "{AWS_REGION}"
}
kms "awskms" {
purpose = "recovery"
region = "{AWS_REGION}"
}
kms "awskms" {
purpose = "config"
region = "{AWS_REGION}"
}
boundary-worker config
disable_mlock = true
log_format = "standard"
worker {
name = "kubernetes-worker"
description = "Boundary kubernetes-worker"
controllers = ["boundary.hashicorp:9201"]
public_addr = "env://BOUNDARY_WORKER_LOAD_BALANCER"
}
listener "tcp" {
address = "0.0.0.0"
purpose = "proxy"
tls_disable = true
}
kms "awskms" {
purpose = "worker-auth"
region = "{AWS_REGION}"
}
listener "tcp" {
address = "0.0.0.0"
purpose = "api"
tls_disable = true
}
listener "tcp" {
address = "0.0.0.0"
purpose = "cluster"
tls_disable = true
}
Btw, I have no issues connecting with boundary cli at all. Any help on this issue will highly be appreciated. Thanks!