Hello!
I have a problem when running a job that requires to use constraint block for gpu device.
Nomad server version : 1.5.6
Nomad client version: 1.4.3
This is the job that I am running:
job "test-2070-2" {
datacenters = ["dc1"]
group "test-2070-2" {
restart {
attempts=0
}
count=1
task "test-2070-2" {
driver = "podman"
config {
image = "image_with_gpu"
}
resources {
cpu = 2650
memory = 8192
device "nvidia/gpu" {
count = 2
constraint {
attribute = "${device.model}"
value = "NVIDIA GeForce RTX 2070 SUPER"
}
constraint {
attribute = "${device.ids}"
operator = "set_contains"
value = "GPU-9b5df054-6f08-f35c-9c4c-5709b19efea5,GPU-1846fc5f-8c71-bfab-00e1-9c190dd88ed7"
}
}
}
}
}
}
when I am running nvidia-smi -L inside the container I get another UUIDs
[root@481a2da8e0a9 /]# nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 2070 SUPER (UUID: GPU-d7574813-0b3f-ee8f-39fc-2b48f9dff169)
GPU 1: NVIDIA GeForce RTX 2070 SUPER (UUID: GPU-9b5df054-6f08-f35c-9c4c-5709b19efea5)
Do I need to upgrade the client in order to have this job work?