--- # Source: consul/templates/server-podsecuritypolicy.yaml apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: consul-server labels: app: consul chart: consul-helm heritage: Helm release: consul spec: privileged: false # Required to prevent escalations to root. allowPrivilegeEscalation: false # This is redundant with non-root + disallow privilege escalation, # but we can provide it for defense in depth. requiredDropCapabilities: - ALL # Allow core volume types. volumes: - 'configMap' - 'emptyDir' - 'projected' - 'secret' - 'downwardAPI' - 'persistentVolumeClaim' hostNetwork: false hostIPC: false hostPID: false runAsUser: # Require the container to run without root privileges. rule: 'RunAsAny' seLinux: rule: 'RunAsAny' supplementalGroups: rule: 'RunAsAny' fsGroup: rule: 'RunAsAny' readOnlyRootFilesystem: false --- # Source: consul/templates/server-disruptionbudget.yaml # PodDisruptionBudget to prevent degrading the server cluster through # voluntary cluster changes. apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: consul-server namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul spec: maxUnavailable: 1 selector: matchLabels: app: consul release: "consul" component: server --- # Source: consul/templates/server-config-configmap.yaml # StatefulSet to run the actual Consul server cluster. apiVersion: v1 kind: ConfigMap metadata: name: consul-server-config namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul data: extra-from-values.json: |- {} --- # Source: consul/templates/server-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: consul-server namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul --- # Source: consul/templates/server-clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: consul-server labels: app: consul chart: consul-helm heritage: Helm release: consul rules: - apiGroups: ["policy"] resources: ["podsecuritypolicies"] resourceNames: - consul-server verbs: - use --- # Source: consul/templates/server-clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: consul-server labels: app: consul chart: consul-helm heritage: Helm release: consul roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: consul-server subjects: - kind: ServiceAccount name: consul-server namespace: consul --- # Source: consul/templates/server-service.yaml # Headless service for Consul server DNS entries. This service should only # point to Consul servers. For access to an agent, one should assume that # the agent is installed locally on the node and the NODE_IP should be used. # If the node can't run a Consul agent, then this service can be used to # communicate directly to a server agent. apiVersion: v1 kind: Service metadata: name: consul-server namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul annotations: # This must be set in addition to publishNotReadyAddresses due # to an open issue where it may not work: # https://github.com/kubernetes/kubernetes/issues/58662 service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: clusterIP: None # We want the servers to become available even if they're not ready # since this DNS is also used for join operations. publishNotReadyAddresses: true ports: - name: http port: 8500 targetPort: 8500 - name: serflan-tcp protocol: "TCP" port: 8301 targetPort: 8301 - name: serflan-udp protocol: "UDP" port: 8301 targetPort: 8301 - name: serfwan-tcp protocol: "TCP" port: 8302 targetPort: 8302 - name: serfwan-udp protocol: "UDP" port: 8302 targetPort: 8302 - name: server port: 8300 targetPort: 8300 - name: dns-tcp protocol: "TCP" port: 8600 targetPort: dns-tcp - name: dns-udp protocol: "UDP" port: 8600 targetPort: dns-udp selector: app: consul release: "consul" component: server --- # Source: consul/templates/ui-service.yaml # UI Service for Consul Server apiVersion: v1 kind: Service metadata: name: consul-ui namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul spec: selector: app: consul release: "consul" component: server ports: - name: http port: 80 targetPort: 8500 --- # Source: consul/templates/server-statefulset.yaml # StatefulSet to run the actual Consul server cluster. apiVersion: apps/v1 kind: StatefulSet metadata: name: consul-server namespace: consul labels: app: consul chart: consul-helm heritage: Helm release: consul component: server spec: serviceName: consul-server podManagementPolicy: Parallel replicas: 3 selector: matchLabels: app: consul chart: consul-helm release: consul component: server hasDNS: "true" template: metadata: labels: app: consul chart: consul-helm release: consul component: server hasDNS: "true" annotations: "consul.hashicorp.com/connect-inject": "false" spec: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchLabels: app: consul release: "consul" component: server topologyKey: kubernetes.io/hostname terminationGracePeriodSeconds: 10 serviceAccountName: consul-server securityContext: fsGroup: 1000 volumes: - name: config configMap: name: consul-server-config containers: - name: consul image: "consul:1.10.3" env: - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: GOSSIP_KEY valueFrom: secretKeyRef: name: consul-gossip-encryption-key key: key command: - "/bin/sh" - "-ec" - | CONSUL_FULLNAME="consul" exec /bin/consul agent \ -advertise="${POD_IP}" \ -bind=0.0.0.0 \ -bootstrap-expect=3 \ -client=0.0.0.0 \ -config-dir=/consul/config \ -datacenter=xxxxx \ -data-dir=/consul/data \ -domain=consul \ -encrypt="${GOSSIP_KEY}" \ -hcl="connect { enabled = true }" \ -ui \ -retry-join=${CONSUL_FULLNAME}-server-0.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc \ -retry-join=${CONSUL_FULLNAME}-server-1.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc \ -retry-join=${CONSUL_FULLNAME}-server-2.${CONSUL_FULLNAME}-server.${NAMESPACE}.svc \ -server volumeMounts: - name: data-consul mountPath: /consul/data - name: config mountPath: /consul/config lifecycle: preStop: exec: command: - /bin/sh - -c - consul leave ports: - containerPort: 8500 name: http - containerPort: 8301 name: serflan - containerPort: 8302 name: serfwan - containerPort: 8300 name: server - containerPort: 8600 name: dns-tcp protocol: "TCP" - containerPort: 8600 name: dns-udp protocol: "UDP" readinessProbe: # NOTE(mitchellh): when our HTTP status endpoints support the # proper status codes, we should switch to that. This is temporary. exec: command: - "/bin/sh" - "-ec" - | curl http://127.0.0.1:8500/v1/status/leader 2>/dev/null | \ grep -E '".+"' failureThreshold: 2 initialDelaySeconds: 5 periodSeconds: 3 successThreshold: 1 timeoutSeconds: 5 volumeClaimTemplates: - metadata: name: data-consul spec: accessModes: - ReadWriteOnce resources: requests: storage: 10Gi --- # Source: consul/templates/client-config-configmap.yaml # ConfigMap with extra configuration specified directly to the chart # for client agents only. --- # Source: consul/templates/client-daemonset.yaml # DaemonSet to run the Consul clients on every node. --- # Source: consul/templates/connect-inject-clusterrole.yaml # The ClusterRole to enable the Connect injector to get, list, watch and patch MutatingWebhookConfiguration. --- # Source: consul/templates/connect-inject-deployment.yaml # The deployment for running the Connect sidecar injector --- # Source: consul/templates/connect-inject-mutatingwebhook.yaml # The MutatingWebhookConfiguration to enable the Connect injector. --- # Source: consul/templates/connect-inject-service.yaml # The service for the Connect sidecar injector --- # Source: consul/templates/dns-service.yaml # Service for Consul DNS. --- # Source: consul/templates/sync-catalog-deployment.yaml # The deployment for running the sync-catalog pod