Allocation is being scaled to max limit (here 3) defined under the scaling block, and it’s not scaling down although the average cpu and memory utilization is much below target value when using nomad apm. I am attaching my job template can anyone correct me if I’m wrong anywhere.
job "scaling-example" {
datacenters = ["aws-us-east-1"]
region = "aws-us-east-1"
namespace = "devops"
type = "service"
priority = 50
constraint {
attribute = "${node.class}"
value = "general"
}
group "nomad-scaling" {
count = 1
scaling {
min = 1
max = 3
enabled = true
policy {
evaluation_interval = "5s"
cooldown = "1m"
check "cpu_allocated_percentage" {
source = "nomad-apm"
query = "avg_cpu"
query_window = "1m"
strategy "target-value" {
target = 50
}
}
check "memory_allocated_percentage" {
source = "nomad-apm"
query = "avg_memory"
query_window = "1m"
strategy "target-value" {
target = 50
}
}
}
}
restart {
attempts = 3
interval = "5m"
delay = "30s"
mode = "fail"
}
volume "ca-certificates" {
type = "host"
read_only = true
source = "ca-certificates"
}
network {
mode = "bridge"
port "http" { to = 80 }
}
task "scaling-test" {
driver = "docker"
user = "root"
volume_mount {
volume = "ca-certificates"
destination = "/etc/ssl/certs"
}
config {
image = "nginx:latest"
}
service {
name = "nomad-dynamic-scaling"
port = "http"
check {
name = "alive"
type = "http"
path = "/"
interval = "60s"
timeout = "5s"
check_restart {
limit = 5
grace = "90s"
ignore_warnings = false
}
}
}
resources {
cpu = 100
memory = 100
}
}
}
}