Consul templating

Hi!
I’m creating a small prometheus demo with Nomad. Prometheus is configured through yaml.
Is it possible to store the whole configuration in Consul, kind of like the artifact?

How have you done it? What is considered best practice?
I’d like to keep my nomad job file as clean as possible

regards

1 Like

I’m storing my Prometheus config inside the Git repository along the Nomad job file and load it as artifact.

E.g.:

# prometheus.yml.tpl
---
global:
  scrape_interval: 15s

alerting:
  alertmanagers:
    - consul_sd_configs:
        - server: "consul.service.consul:8500"
          services: ["alertmanager"]

scrape_configs:
  - job_name: "consul"
    consul_sd_configs:
      - server: "consul.service.consul:8500"
        tags: ["prometheus"]
    relabel_configs:
      - source_labels: [__meta_consul_tags]
        separator: ','
        regex: label:([^=]+)=([^,]+)
        target_label: ${1}
        replacement: ${2}
      - source_labels: [__meta_consul_node]
        target_label: instance
      - source_labels: [__meta_consul_service]
        target_label: service

  - job_name: "nomad"
    consul_sd_configs:
      - server: "consul.service.consul:8500"
        services: ["nomad-client", "nomad"]
    metrics_path: /v1/metrics
    params:
      format: ["prometheus"]
    relabel_configs:
      - source_labels: [__meta_consul_service_port]
        regex: '4646'
        action: keep
      - source_labels: [__meta_consul_node]
        target_label: instance
      - source_labels: [__meta_consul_service]
        target_label: service
# prometheus-job.hcl
variable "datacenters" {
  type        = list(string)
  description = "List of datacenters to deploy to."
  default     = ["dc1"]
}

variable "domain" {
  type        = string
  description = "Base domain name."
}

variable "prometheus_image_tag" {
  type        = string
  description = "Prometheus Docker image tag to deploy."
  default     = "latest"
}

variable "alertmanager_image_tag" {
  type        = string
  description = "Alertmanager Docker image tag to deploy."
  default     = "latest"
}

variable "consul_exporter_image_tag" {
  type        = string
  description = "consul_exporter Docker image tag to deploy."
  default     = "latest"
}

job "prometheus" {
  datacenters = var.datacenters

  update {
    stagger      = "30s"
    max_parallel = 1
  }

  group "prometheus" {
    count = 1

    ephemeral_disk {
      size    = 600
      migrate = true
    }

    network {
      port "prometheus_ui" { to = 9090 }
    }

    task "prometheus" {
      driver = "docker"

      artifact {
        # Double slash required to download just the specified subdirectory, see:
        # https://github.com/hashicorp/go-getter#subdirectories
        source = "git::https://github.com/myorg/myrepo.git//nomad_jobs/artifacts/prometheus"
      }

      config {
        image = "prom/prometheus:${var.prometheus_image_tag}"

        cap_drop = [
          "ALL",
        ]

        volumes = [
          "local/prometheus.yml:/etc/prometheus/prometheus.yml:ro",
        ]

        ports = ["prometheus_ui"]
      }

      resources {
        cpu    = 100
        memory = 100
      }

      service {
        name = "prometheus"

        tags = [
          "prometheus",
        ]

        port = "prometheus_ui"

        check {
          type     = "http"
          path     = "/-/healthy"
          interval = "10s"
          timeout  = "2s"
        }
      }

      template {
        source        = "local/prometheus.yml.tpl"
        destination   = "local/prometheus.yml"
        change_mode   = "signal"
        change_signal = "SIGHUP"
      }
    }
  }

  group "alertmanager" {
    count = 1

    spread {
      attribute = "${node.unique.name}"
      weight    = 100
    }

    network {
      port "alertmanager_ui" { to = 9093 }
    }

    task "alertmanager" {
      driver = "docker"

      artifact {
        source = "git::https://github.com/myorgy/myrepo.git//nomad_jobs/artifacts/prometheus"
      }

      config {
        image = "prom/alertmanager:${var.alertmanager_image_tag}"

        cap_drop = [
          "ALL",
        ]

        volumes = [
          "secret/alertmanager.yml:/etc/alertmanager/config.yml",
        ]

        ports = ["alertmanager_ui"]
      }

      resources {
        cpu    = 100
        memory = 50
      }

      service {
        name = "alertmanager"

        tags = [
          "prometheus",
        ]

        port = "alertmanager_ui"

        check {
          type     = "http"
          path     = "/-/healthy"
          interval = "10s"
          timeout  = "2s"
        }
      }

      template {
        source        = "local/alertmanager.yml.tpl"
        destination   = "secret/alertmanager.yml"
        change_mode   = "signal"
        change_signal = "SIGHUP"
      }

     # alertmanager.yml includes secrets from Vault
      vault {
        policies      = ["monitoring-alertmanager"]
        change_mode   = "signal"
        change_signal = "SIGHUP"
      }
    }
  }

  group "exporters" {
    count = 1

    network {
      port "consul_exporter" { to = 9107 }
    }

    task "consul-exporter" {
      driver = "docker"

      config {
        image = "prom/consul-exporter:${var.consul_exporter_image_tag}"

        cap_drop = [
          "ALL",
        ]

        args = [
          "--consul.server",
          "consul.service.consul:8500",
        ]

        ports = ["consul_exporter"]
      }

      resources {
        cpu    = 100
        memory = 50
      }

      service {
        name = "${TASK}"
        tags = ["prometheus"]
        port = "consul_exporter"

        check {
          type     = "http"
          path     = "/-/healthy"
          interval = "10s"
          timeout  = "2s"
        }
      }
    }
  }
}
2 Likes

This is great, thank you! This gives me some ideas how to proceed.

When you make changes to the configuration, Will you have to redeploy prometheus?

Yes, this needs a redeployment, but the config itself usually doesn’t change too often, as services/tags are pulled from Consul directly.

You can also put the Prometheus config templates inline, like this:

  template {
    destination = "local/prometheus.yml"
    data        = <<EOF
      global:
        scrape_interval: 15s
        evaluation_interval: 15s

      scrape_configs:
        - job_name: prometheus
          metrics_path: "/metrics"
          static_configs:
          - targets: ['prometheus.service.consul:{{ env "NOMAD_PORT_metrics" }}']

        - job_name: nomad
          metrics_path: "/v1/metrics"
          params:
            format: ['prometheus']
          consul_sd_configs:
          - server: 'localhost:8500'
            services:
              - nomad
              - nomad-client
          relabel_configs:
            - source_labels: [__meta_consul_service]
              target_label: job

            - source_labels: [__meta_consul_tags]
              regex: .*,http,.*
              action: keep
    EOF
  }