I’m using the default NFS CSI driver and it works like a charm.
Just for context, my NAS exporting NFS has the DNS name ‘storage.home’ … pretty boring, I know.
In the NFS settings, make sure that either security is disabled (not recommended) or at least the IPs of your Nomad nodes are white-listed.
Controller:
job “csi-nfs-controller” {
datacenters = [“home”]
type = “system”
constraint {
attribute = “${node.class}”
value = “compute”
}
group “nfs” {
task "controller" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.3.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
}
csi_plugin {
id = "nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
memory = 64
cpu = 100
}
}
}
}
Node plugin:
job “csi-nfs-plugin” {
datacenters = [“home”]
type = “system” # ensures that all nodes in the DC have a copy.
group “nfs” {
restart {
interval = "30m"
attempts = 10
delay = "15s"
mode = "fail"
}
task "plugin" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.3.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
# node plugins must run as privileged jobs because they
# mount disks to the host
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
memory = 100
cpu = 200
}
}
}
}
Example volume:
plugin_id = “nfs”
type = “csi”
id = “nginx”
name = “NGINX”
capability {
access_mode = “single-node-writer”
attachment_mode = “file-system”
}
context {
server = “storage.home”
share = “/volume2/homelab”
subDir = “nginx/content”
mountPermissions = “0”
}
mount_options {
fs_type = “nfs”
mount_flags = [ “timeo=30”, “vers=4.1”, “nolock”, “sync” ]
}
Example job using the volume:
job “nginx” {
datacenters = [“home”]
type = “service”
group “nginx” {
constraint {
attribute = "${node.class}"
value = "compute"
}
restart {
attempts = 3
delay = "1m"
mode = "fail"
}
network {
mode = "bridge"
port "envoy_metrics" { to = 9102 }
}
service {
name = "nginx"
port = 80
check {
type = "http"
path = "/alive"
interval = "10s"
timeout = "2s"
expose = true # required for Connect
}
tags = [
"traefik.enable=true",
"traefik.consulcatalog.connect=true",
"traefik.http.routers.nginx.rule=Host(`www.example.domain`)",
"traefik.http.routers.nginx.entrypoints=inet-websecure"
]
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" # make envoy metrics port available in Consul
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
envoy_prometheus_bind_addr = "0.0.0.0:9102"
}
}
}
sidecar_task {
resources {
cpu = 50
memory = 48
}
}
}
}
task "server" {
driver = "docker"
config {
image = "nginx:latest"
volumes = [ "local:/etc/nginx/conf.d" ]
}
template {
data = file("default.conf")
destination = "local/default.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
resources {
memory = 50
cpu = 50
}
volume_mount {
volume = "nginx"
destination = "/usr/share/nginx/content"
}
}
volume "nginx" {
type = "csi"
source = "nginx"
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
}
}