Hello,
I was following your demo to mount ceph filesystem using csi plugin.
I am able to run node and controller ceph csi plugins and register volume. However when I try to run task with mounted volume I got:
E1023 17:40:33.414525 1 utils.go:163] ID: 101 Req-ID: 53245bc5-e638-4b24-b692-505feaef8f2f GRPC error: rpc error: code = Internal desc = missing required field monitors
E1023 17:40:33.564481 1 utils.go:163] ID: 102 Req-ID: 53245bc5-e638-4b24-b692-505feaef8f2f GRPC error: rpc error: code = Internal desc = an error occurred while running (20) umount [/csi/per-alloc/99a505df-7c92-4119-b2ec-23c3659ff1bb/cephfs/rw-file-system-single-node-writer]: exit status 32: umount: /csi/per-alloc/99a505df-7c92-4119-b2ec-23c3659ff1bb/cephfs/rw-file-system-single-node-writer: mountpoint not found
I have checked and plugin tasks have properly mounted /etc/ceph-csi-config/config.json with monitors field defined. So I am stuck trying to figure out what is wrong with my config. Below I post my plugin tasks, my volume definition and example task I use to test. I use nomad 0.12.4
with docker 19.03.13-ce
. In ceph-volume.hcl
userID and userKey are empty because so far I have not set up keyring on my ceph cluster and secrets can not be empty (I guess I should get authorization error but nevertheless it should not be connected with currently given error).
plugin-ceph-nodes.nomad:
job "plugin-ceph-nodes" {
datacenters = ["dc1"]
type = "system"
group "nodes" {
task "ceph-node" {
driver = "docker"
template {
data = <<EOF
[{
"clusterID": "53245bc5-e638-4b24-b692-505feaef8f2f",
"monitors": [
{{range $index, $service := service "ceph-mon"}}{{if gt $index 0}}, {{end}}"{{.Address}}"{{end}}
]
}]
EOF
destination = "local/config.json"
change_mode = "restart"
}
config {
image = "https://quay.io/cephcsi/cephcsi:v2.1.2"
args = [
"--endpoint=unix://csi/csi.sock",
"--type=cephfs",
"--drivername=cephfs.csi.ceph.com",
"--nodeid=${node.unique.id}",
"--logtostderr",
"--nodeserver=true"
]
privileged = true
volumes = [
"./local/config.json:/etc/ceph-csi-config/config.json"
]
}
resources {
cpu = 500
memory = 256
network {
mbits = 1
}
}
csi_plugin {
id = "ceph-csi"
type = "node"
mount_dir = "/csi"
}
}
task "ceph-controller" {
driver = "docker"
template {
data = <<EOF
[{
"clusterID": "53245bc5-e638-4b24-b692-505feaef8f2f",
"monitors": [
{{range $index, $service := service "ceph-mon"}}{{if gt $index 0}}, {{end}}"{{.Address}}"{{end}}
]
}]
EOF
destination = "local/config.json"
change_mode = "restart"
}
config {
image = "quay.io/cephcsi/cephcsi:v2.1.2"
args = [
"--type=cephfs",
"--controllerserver=true",
"--drivername=cephfs.csi.ceph.com",
"--logtostderr",
"--endpoint=unix://csi/csi.sock",
"--nodeid=${node.unique.id}"
]
volumes = [
"./local/config.json:/etc/ceph-csi-config/config.json"
]
}
resources {
cpu = 200
memory = 500
network {
mbits = 1
}
}
// service {
// name = "prometheus"
// port = "prometheus"
// tags = ["ceph-csi"]
// }
csi_plugin {
id = "ceph-csi"
type = "controller"
mount_dir = "/csi"
}
}
}
}
ceph-volume.hcl
id = "cephfs"
name = "CephFS"
external_id = "53245bc5-e638-4b24-b692-505feaef8f2f"
access_mode = "single-node-writer"
attachment_mode = "file-system"
plugin_id = "ceph-csi"
// mount_options {
// fs_type = "ext4"
// mount_flags = ["ro"]
// }
parameters {
}
secrets {
userID = ""
userKey = ""
}
context {
pool = "cephfs_data"
clusterID = "53245bc5-e638-4b24-b692-505feaef8f2f",
}
test.nomad
job "mysql-server" {
datacenters = ["dc1"]
type = "service"
group "mysql-server" {
count = 1
volume "mysql" {
type = "csi"
read_only = false
source = "cephfs"
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
task "mysql-server" {
env {
CHANGE = 2
}
driver = "docker"
volume_mount {
volume = "mysql"
destination = "/srv"
read_only = false
}
env = {
"MYSQL_ROOT_PASSWORD" = "password"
CHANGE = 2
}
config {
image = "hashicorp/mysql-portworx-demo:latest"
args = ["--datadir", "/srv/mysql"]
port_map {
db = 3306
}
}
resources {
cpu = 500
memory = 1024
network {
port "db" {
static = 3306
}
}
}
service {
name = "mysql-server"
port = "db"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
}
}