This repository has been archived on 2026-04-18. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
server-config/services/grafana/prometheus.nix
Simon Gardling a3a6700106 grafana: replace disk-usage-collector with prometheus-zfs-exporter
The custom disk-usage-collector shell script + minutely timer is replaced
by prometheus-zfs-exporter (pdf/zfs_exporter, packaged in nixpkgs as
services.prometheus.exporters.zfs). The exporter provides pool capacity
metrics (allocated/free/size) natively.

Partition metrics (/boot, /persistent, /nix) now use node_exporter's
built-in filesystem collector (node_filesystem_*_bytes) which already
runs and collects these metrics.

Also fixes a latent race condition in serviceMountWithZpool: the -mounts
service now orders after zfs-mount.service (which runs 'zfs mount -a'),
not just after pool import. Without this, the mount check could run
before datasets are actually mounted.
2026-04-09 19:54:57 -04:00

111 lines
2.8 KiB
Nix

{
service_configs,
lib,
...
}:
let
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
in
{
imports = [
(lib.serviceMountWithZpool "prometheus" service_configs.zpool_ssds [
"/var/lib/prometheus"
])
(lib.serviceFilePerms "prometheus" [
"Z /var/lib/prometheus 0700 prometheus prometheus"
])
];
services.prometheus = {
enable = true;
port = service_configs.ports.private.prometheus.port;
listenAddress = "127.0.0.1";
stateDir = "prometheus";
retentionTime = "0d"; # 0 disables time-based retention (keep forever)
exporters = {
node = {
enable = true;
port = service_configs.ports.private.prometheus_node.port;
listenAddress = "127.0.0.1";
enabledCollectors = [
"hwmon"
"systemd"
"textfile"
];
extraFlags = [
"--collector.textfile.directory=${textfileDir}"
];
};
apcupsd = {
enable = true;
port = service_configs.ports.private.prometheus_apcupsd.port;
listenAddress = "127.0.0.1";
apcupsdAddress = "127.0.0.1:3551";
};
zfs = {
enable = true;
port = service_configs.ports.private.prometheus_zfs.port;
listenAddress = "127.0.0.1";
};
};
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus.port}" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_node.port}" ]; }
];
}
{
job_name = "apcupsd";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_apcupsd.port}" ]; }
];
}
{
job_name = "llama-cpp";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}" ]; }
];
}
{
job_name = "jellyfin";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.jellyfin_exporter.port}" ]; }
];
}
{
job_name = "qbittorrent";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.qbittorrent_exporter.port}" ]; }
];
}
{
job_name = "igpu";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.igpu_exporter.port}" ]; }
];
}
{
job_name = "zfs";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_zfs.port}" ]; }
];
}
];
};
systemd.tmpfiles.rules = [
"d ${textfileDir} 0755 root root -"
];
}