grafana: re-organize
This commit is contained in:
696
services/grafana/dashboard.nix
Normal file
696
services/grafana/dashboard.nix
Normal file
@@ -0,0 +1,696 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
let
|
||||
promDs = {
|
||||
type = "prometheus";
|
||||
uid = "prometheus";
|
||||
};
|
||||
|
||||
dashboard = {
|
||||
editable = true;
|
||||
graphTooltip = 1;
|
||||
schemaVersion = 39;
|
||||
tags = [
|
||||
"system"
|
||||
"monitoring"
|
||||
];
|
||||
time = {
|
||||
from = "now-6h";
|
||||
to = "now";
|
||||
};
|
||||
timezone = "browser";
|
||||
title = "System Overview";
|
||||
uid = "system-overview";
|
||||
|
||||
annotations.list = [
|
||||
{
|
||||
name = "Jellyfin Streams";
|
||||
datasource = {
|
||||
type = "grafana";
|
||||
uid = "-- Grafana --";
|
||||
};
|
||||
enable = true;
|
||||
iconColor = "green";
|
||||
showIn = 0;
|
||||
type = "tags";
|
||||
tags = [ "jellyfin" ];
|
||||
}
|
||||
{
|
||||
name = "ZFS Scrubs";
|
||||
datasource = {
|
||||
type = "grafana";
|
||||
uid = "-- Grafana --";
|
||||
};
|
||||
enable = true;
|
||||
iconColor = "orange";
|
||||
showIn = 0;
|
||||
type = "tags";
|
||||
tags = [ "zfs-scrub" ];
|
||||
}
|
||||
{
|
||||
name = "LLM Requests";
|
||||
datasource = {
|
||||
type = "grafana";
|
||||
uid = "-- Grafana --";
|
||||
};
|
||||
enable = true;
|
||||
iconColor = "purple";
|
||||
showIn = 0;
|
||||
type = "tags";
|
||||
tags = [ "llama-cpp" ];
|
||||
}
|
||||
];
|
||||
|
||||
panels = [
|
||||
# -- Row 1: UPS --
|
||||
{
|
||||
id = 1;
|
||||
type = "timeseries";
|
||||
title = "UPS Power Draw";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 8;
|
||||
x = 0;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts";
|
||||
legendFormat = "Power (W)";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[5m:])";
|
||||
legendFormat = "5m average (W)";
|
||||
refId = "B";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "watt";
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "A";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "custom.lineStyle";
|
||||
value = {
|
||||
fill = "dot";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 10;
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 1;
|
||||
}
|
||||
{
|
||||
id = "custom.pointSize";
|
||||
value = 1;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "B";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 4;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 7;
|
||||
type = "stat";
|
||||
title = "Energy Usage (24h)";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 4;
|
||||
x = 8;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[24h:]) * 24 / 1000";
|
||||
legendFormat = "";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "kwatth";
|
||||
decimals = 2;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 5;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 10;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "none";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 2;
|
||||
type = "gauge";
|
||||
title = "UPS Load";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 12;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_ups_load_percent";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 70;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 90;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options.reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 3;
|
||||
type = "gauge";
|
||||
title = "UPS Battery";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 18;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_battery_charge_percent";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "red";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 20;
|
||||
}
|
||||
{
|
||||
color = "green";
|
||||
value = 50;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options.reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 2: System --
|
||||
{
|
||||
id = 4;
|
||||
type = "timeseries";
|
||||
title = "CPU Temperature";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 0;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = ''node_hwmon_temp_celsius{chip=~"pci.*"}'';
|
||||
legendFormat = "CPU {{sensor}}";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "celsius";
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 5;
|
||||
type = "stat";
|
||||
title = "System Uptime";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 12;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "time() - node_boot_time_seconds";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "s";
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "none";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 6;
|
||||
type = "stat";
|
||||
title = "Jellyfin Active Streams";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 18;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "jellyfin_active_streams";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 6;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "area";
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 3: qBittorrent --
|
||||
{
|
||||
id = 11;
|
||||
type = "timeseries";
|
||||
title = "qBittorrent Speed";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 24;
|
||||
x = 0;
|
||||
y = 16;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "qbittorrent_download_bytes_per_second";
|
||||
legendFormat = "Download";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "qbittorrent_upload_bytes_per_second";
|
||||
legendFormat = "Upload";
|
||||
refId = "B";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time(qbittorrent_download_bytes_per_second[10m:])";
|
||||
legendFormat = "Download (10m avg)";
|
||||
refId = "C";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time(qbittorrent_upload_bytes_per_second[10m:])";
|
||||
legendFormat = "Upload (10m avg)";
|
||||
refId = "D";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "binBps";
|
||||
min = 0;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 1;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "A";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "green";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 5;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "B";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "blue";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 5;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "C";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "green";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "D";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "blue";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 4: Intel GPU --
|
||||
{
|
||||
id = 8;
|
||||
type = "timeseries";
|
||||
title = "Intel GPU Utilization";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 24;
|
||||
x = 0;
|
||||
y = 24;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "intel_gpu_engine_busy_percent";
|
||||
legendFormat = "{{engine}}";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 5: Storage --
|
||||
{
|
||||
id = 12;
|
||||
type = "timeseries";
|
||||
title = "ZFS Pool Utilization";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 0;
|
||||
y = 32;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "zpool_used_bytes{pool=\"tank\"} / zpool_size_bytes{pool=\"tank\"} * 100";
|
||||
legendFormat = "tank";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "zpool_used_bytes{pool=\"hdds\"} / zpool_size_bytes{pool=\"hdds\"} * 100";
|
||||
legendFormat = "hdds";
|
||||
refId = "B";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 13;
|
||||
type = "timeseries";
|
||||
title = "Boot Drive Partitions";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 12;
|
||||
y = 32;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "partition_used_bytes{mount=\"/boot\"} / partition_size_bytes{mount=\"/boot\"} * 100";
|
||||
legendFormat = "/boot";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "partition_used_bytes{mount=\"/persistent\"} / partition_size_bytes{mount=\"/persistent\"} * 100";
|
||||
legendFormat = "/persistent";
|
||||
refId = "B";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "partition_used_bytes{mount=\"/nix\"} / partition_size_bytes{mount=\"/nix\"} * 100";
|
||||
legendFormat = "/nix";
|
||||
refId = "C";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
environment.etc."grafana-dashboards/system-overview.json" = {
|
||||
text = builtins.toJSON dashboard;
|
||||
mode = "0444";
|
||||
};
|
||||
}
|
||||
14
services/grafana/default.nix
Normal file
14
services/grafana/default.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
imports = [
|
||||
./grafana.nix
|
||||
./prometheus.nix
|
||||
./dashboard.nix
|
||||
./jellyfin-collector.nix
|
||||
./jellyfin-annotations.nix
|
||||
./qbittorrent-collector.nix
|
||||
./intel-gpu-collector.nix
|
||||
./disk-usage-collector.nix
|
||||
./llama-cpp-annotations.nix
|
||||
./zfs-scrub-annotations.nix
|
||||
];
|
||||
}
|
||||
38
services/grafana/disk-usage-collector.nix
Normal file
38
services/grafana/disk-usage-collector.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
|
||||
diskUsageCollector = pkgs.writeShellApplication {
|
||||
name = "disk-usage-collector";
|
||||
runtimeInputs = with pkgs; [
|
||||
coreutils
|
||||
gawk
|
||||
config.boot.zfs.package
|
||||
util-linux # for mountpoint
|
||||
];
|
||||
text = builtins.readFile ./disk-usage-collector.sh;
|
||||
};
|
||||
in
|
||||
lib.mkIf config.services.grafana.enable {
|
||||
systemd.services.disk-usage-collector = {
|
||||
description = "Collect ZFS pool and partition usage metrics for Prometheus";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = lib.getExe diskUsageCollector;
|
||||
};
|
||||
environment.TEXTFILE = "${textfileDir}/disk-usage.prom";
|
||||
};
|
||||
|
||||
systemd.timers.disk-usage-collector = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "minutely";
|
||||
RandomizedDelaySec = "10s";
|
||||
};
|
||||
};
|
||||
}
|
||||
44
services/grafana/disk-usage-collector.sh
Normal file
44
services/grafana/disk-usage-collector.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
# Collects ZFS pool utilization and boot partition usage for Prometheus textfile collector
|
||||
set -euo pipefail
|
||||
|
||||
TEXTFILE="${TEXTFILE:?TEXTFILE env required}"
|
||||
TMP="${TEXTFILE}.$$"
|
||||
|
||||
{
|
||||
echo '# HELP zpool_size_bytes Total size of ZFS pool in bytes'
|
||||
echo '# TYPE zpool_size_bytes gauge'
|
||||
echo '# HELP zpool_used_bytes Used space in ZFS pool in bytes'
|
||||
echo '# TYPE zpool_used_bytes gauge'
|
||||
echo '# HELP zpool_free_bytes Free space in ZFS pool in bytes'
|
||||
echo '# TYPE zpool_free_bytes gauge'
|
||||
|
||||
# -Hp: scripting mode, parseable, bytes
|
||||
zpool list -Hp -o name,size,alloc,free | while IFS=$'\t' read -r name size alloc free; do
|
||||
echo "zpool_size_bytes{pool=\"${name}\"} ${size}"
|
||||
echo "zpool_used_bytes{pool=\"${name}\"} ${alloc}"
|
||||
echo "zpool_free_bytes{pool=\"${name}\"} ${free}"
|
||||
done
|
||||
|
||||
echo '# HELP partition_size_bytes Total size of partition in bytes'
|
||||
echo '# TYPE partition_size_bytes gauge'
|
||||
echo '# HELP partition_used_bytes Used space on partition in bytes'
|
||||
echo '# TYPE partition_used_bytes gauge'
|
||||
echo '# HELP partition_free_bytes Free space on partition in bytes'
|
||||
echo '# TYPE partition_free_bytes gauge'
|
||||
|
||||
# Boot drive partitions: /boot (ESP), /persistent, /nix
|
||||
# Use df with 1K blocks and convert to bytes
|
||||
for mount in /boot /persistent /nix; do
|
||||
if mountpoint -q "$mount" 2>/dev/null; then
|
||||
read -r size used avail _ <<< "$(df -k --output=size,used,avail "$mount" | tail -1)"
|
||||
size_b=$((size * 1024))
|
||||
used_b=$((used * 1024))
|
||||
avail_b=$((avail * 1024))
|
||||
echo "partition_size_bytes{mount=\"${mount}\"} ${size_b}"
|
||||
echo "partition_used_bytes{mount=\"${mount}\"} ${used_b}"
|
||||
echo "partition_free_bytes{mount=\"${mount}\"} ${avail_b}"
|
||||
fi
|
||||
done
|
||||
} > "$TMP"
|
||||
mv "$TMP" "$TEXTFILE"
|
||||
86
services/grafana/grafana.nix
Normal file
86
services/grafana/grafana.nix
Normal file
@@ -0,0 +1,86 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "grafana" service_configs.zpool_ssds [
|
||||
service_configs.grafana.dir
|
||||
])
|
||||
(lib.serviceFilePerms "grafana" [
|
||||
"Z ${service_configs.grafana.dir} 0700 grafana grafana"
|
||||
])
|
||||
];
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
dataDir = service_configs.grafana.dir;
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = service_configs.ports.private.grafana.port;
|
||||
domain = service_configs.grafana.domain;
|
||||
root_url = "https://${service_configs.grafana.domain}";
|
||||
};
|
||||
|
||||
"auth.anonymous" = {
|
||||
enabled = true;
|
||||
org_role = "Admin";
|
||||
};
|
||||
"auth.basic".enabled = false;
|
||||
"auth".disable_login_form = true;
|
||||
|
||||
analytics.reporting_enabled = false;
|
||||
|
||||
feature_toggles.enable = "dataConnectionsConsole=false";
|
||||
|
||||
users.default_theme = "dark";
|
||||
|
||||
# Disable unused built-in integrations
|
||||
alerting.enabled = false;
|
||||
"unified_alerting".enabled = false;
|
||||
explore.enabled = false;
|
||||
news.news_feed_enabled = false;
|
||||
|
||||
plugins = {
|
||||
enable_alpha = false;
|
||||
plugin_admin_enabled = false;
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
datasources.settings = {
|
||||
apiVersion = 1;
|
||||
datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
url = "http://127.0.0.1:${toString service_configs.ports.private.prometheus.port}";
|
||||
access = "proxy";
|
||||
isDefault = true;
|
||||
editable = false;
|
||||
uid = "prometheus";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
dashboards.settings.providers = [
|
||||
{
|
||||
name = "system";
|
||||
type = "file";
|
||||
options.path = "/etc/grafana-dashboards";
|
||||
disableDeletion = true;
|
||||
updateIntervalSeconds = 60;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts."${service_configs.grafana.domain}".extraConfig = ''
|
||||
import ${config.age.secrets.caddy_auth.path}
|
||||
reverse_proxy :${toString service_configs.ports.private.grafana.port}
|
||||
'';
|
||||
}
|
||||
38
services/grafana/intel-gpu-collector.nix
Normal file
38
services/grafana/intel-gpu-collector.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
|
||||
intelGpuCollector = pkgs.writeShellApplication {
|
||||
name = "intel-gpu-collector";
|
||||
runtimeInputs = with pkgs; [
|
||||
python3
|
||||
intel-gpu-tools
|
||||
];
|
||||
text = ''
|
||||
exec python3 ${./intel-gpu-collector.py}
|
||||
'';
|
||||
};
|
||||
in
|
||||
lib.mkIf config.services.grafana.enable {
|
||||
systemd.services.intel-gpu-collector = {
|
||||
description = "Collect Intel GPU metrics for Prometheus";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = lib.getExe intelGpuCollector;
|
||||
};
|
||||
environment.TEXTFILE = "${textfileDir}/intel-gpu.prom";
|
||||
};
|
||||
|
||||
systemd.timers.intel-gpu-collector = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*:*:0/30";
|
||||
RandomizedDelaySec = "10s";
|
||||
};
|
||||
};
|
||||
}
|
||||
107
services/grafana/intel-gpu-collector.py
Normal file
107
services/grafana/intel-gpu-collector.py
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
TEXTFILE = os.environ.get(
|
||||
"TEXTFILE",
|
||||
"/var/lib/prometheus-node-exporter-textfiles/intel-gpu.prom",
|
||||
)
|
||||
|
||||
|
||||
def read_one_sample():
|
||||
try:
|
||||
proc = subprocess.Popen(
|
||||
["intel_gpu_top", "-J", "-s", "1000"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
buf = b""
|
||||
depth = 0
|
||||
in_obj = False
|
||||
deadline = time.monotonic() + 8.0
|
||||
try:
|
||||
while time.monotonic() < deadline:
|
||||
byte = proc.stdout.read(1)
|
||||
if not byte:
|
||||
break
|
||||
if byte == b"{":
|
||||
in_obj = True
|
||||
depth += 1
|
||||
if in_obj:
|
||||
buf += byte
|
||||
if in_obj and byte == b"}":
|
||||
depth -= 1
|
||||
if depth == 0:
|
||||
break
|
||||
finally:
|
||||
proc.terminate()
|
||||
proc.wait()
|
||||
if not buf:
|
||||
return None
|
||||
try:
|
||||
return json.loads(buf)
|
||||
except json.JSONDecodeError:
|
||||
print("Malformed JSON from intel_gpu_top", file=sys.stderr)
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"intel_gpu_top unavailable: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def write_empty_metrics():
|
||||
"""Write zero-valued metrics so Prometheus doesn't see stale data."""
|
||||
lines = [
|
||||
"# HELP intel_gpu_engine_busy_percent Intel GPU engine busy percentage",
|
||||
"# TYPE intel_gpu_engine_busy_percent gauge",
|
||||
"# HELP intel_gpu_frequency_mhz Intel GPU actual frequency in MHz",
|
||||
"# TYPE intel_gpu_frequency_mhz gauge",
|
||||
"intel_gpu_frequency_mhz 0",
|
||||
"# HELP intel_gpu_rc6_percent Intel GPU RC6 power-saving state percentage",
|
||||
"# TYPE intel_gpu_rc6_percent gauge",
|
||||
"intel_gpu_rc6_percent 0",
|
||||
]
|
||||
tmp = TEXTFILE + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
f.write("\n".join(lines) + "\n")
|
||||
os.replace(tmp, TEXTFILE)
|
||||
|
||||
|
||||
def write_metrics(sample):
|
||||
lines = [
|
||||
"# HELP intel_gpu_engine_busy_percent Intel GPU engine busy percentage",
|
||||
"# TYPE intel_gpu_engine_busy_percent gauge",
|
||||
]
|
||||
for engine, data in sample.get("engines", {}).items():
|
||||
lines.append(
|
||||
f'intel_gpu_engine_busy_percent{{engine="{engine}"}} {data.get("busy", 0)}'
|
||||
)
|
||||
freq = sample.get("frequency", {})
|
||||
lines += [
|
||||
"# HELP intel_gpu_frequency_mhz Intel GPU actual frequency in MHz",
|
||||
"# TYPE intel_gpu_frequency_mhz gauge",
|
||||
f'intel_gpu_frequency_mhz {freq.get("actual", 0)}',
|
||||
"# HELP intel_gpu_rc6_percent Intel GPU RC6 power-saving state percentage",
|
||||
"# TYPE intel_gpu_rc6_percent gauge",
|
||||
f'intel_gpu_rc6_percent {sample.get("rc6", {}).get("value", 0)}',
|
||||
]
|
||||
|
||||
tmp = TEXTFILE + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
f.write("\n".join(lines) + "\n")
|
||||
os.replace(tmp, TEXTFILE)
|
||||
|
||||
|
||||
def main():
|
||||
sample = read_one_sample()
|
||||
if sample is None:
|
||||
print("Failed to read intel_gpu_top sample", file=sys.stderr)
|
||||
write_empty_metrics()
|
||||
sys.exit(0)
|
||||
write_metrics(sample)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
40
services/grafana/jellyfin-annotations.nix
Normal file
40
services/grafana/jellyfin-annotations.nix
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable) {
|
||||
systemd.services.jellyfin-annotations = {
|
||||
description = "Jellyfin stream annotation service for Grafana";
|
||||
after = [
|
||||
"network.target"
|
||||
"grafana.service"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${./jellyfin-annotations.py}";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
DynamicUser = true;
|
||||
StateDirectory = "jellyfin-annotations";
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
MemoryDenyWriteExecute = true;
|
||||
};
|
||||
environment = {
|
||||
JELLYFIN_URL = "http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port}";
|
||||
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
||||
STATE_FILE = "/var/lib/jellyfin-annotations/state.json";
|
||||
POLL_INTERVAL = "30";
|
||||
};
|
||||
};
|
||||
}
|
||||
233
services/grafana/jellyfin-annotations.py
Normal file
233
services/grafana/jellyfin-annotations.py
Normal file
@@ -0,0 +1,233 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
JELLYFIN_URL = os.environ.get("JELLYFIN_URL", "http://127.0.0.1:8096")
|
||||
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
|
||||
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/jellyfin-annotations/state.json")
|
||||
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "30"))
|
||||
|
||||
|
||||
def get_api_key():
|
||||
cred_dir = os.environ.get("CREDENTIALS_DIRECTORY")
|
||||
if cred_dir:
|
||||
return Path(cred_dir, "jellyfin-api-key").read_text().strip()
|
||||
for p in ["/run/agenix/jellyfin-api-key"]:
|
||||
if Path(p).exists():
|
||||
return Path(p).read_text().strip()
|
||||
sys.exit("ERROR: Cannot find jellyfin-api-key")
|
||||
|
||||
|
||||
def http_json(method, url, body=None):
|
||||
data = json.dumps(body).encode() if body is not None else None
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json", "Accept": "application/json"},
|
||||
method=method,
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
def get_active_sessions(api_key):
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{JELLYFIN_URL}/Sessions?api_key={api_key}",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
sessions = json.loads(resp.read())
|
||||
return [s for s in sessions if s.get("NowPlayingItem")]
|
||||
except Exception as e:
|
||||
print(f"Error fetching sessions: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def _codec(name):
|
||||
if not name:
|
||||
return ""
|
||||
aliases = {"h264": "H.264", "h265": "H.265", "hevc": "H.265", "av1": "AV1",
|
||||
"vp9": "VP9", "vp8": "VP8", "mpeg4": "MPEG-4", "mpeg2video": "MPEG-2",
|
||||
"aac": "AAC", "ac3": "AC3", "eac3": "EAC3", "dts": "DTS",
|
||||
"truehd": "TrueHD", "mp3": "MP3", "opus": "Opus", "flac": "FLAC",
|
||||
"vorbis": "Vorbis"}
|
||||
return aliases.get(name.lower(), name.upper())
|
||||
|
||||
|
||||
def _res(width, height):
|
||||
if not height:
|
||||
return ""
|
||||
common = {2160: "4K", 1440: "1440p", 1080: "1080p", 720: "720p",
|
||||
480: "480p", 360: "360p"}
|
||||
return common.get(height, f"{height}p")
|
||||
|
||||
|
||||
def _channels(n):
|
||||
labels = {1: "Mono", 2: "Stereo", 6: "5.1", 7: "6.1", 8: "7.1"}
|
||||
return labels.get(n, f"{n}ch") if n else ""
|
||||
|
||||
|
||||
def format_label(session):
|
||||
user = session.get("UserName", "Unknown")
|
||||
item = session.get("NowPlayingItem", {}) or {}
|
||||
transcode = session.get("TranscodingInfo") or {}
|
||||
play_state = session.get("PlayState") or {}
|
||||
client = session.get("Client", "")
|
||||
device = session.get("DeviceName", "")
|
||||
|
||||
name = item.get("Name", "Unknown")
|
||||
series = item.get("SeriesName", "")
|
||||
season = item.get("ParentIndexNumber")
|
||||
episode = item.get("IndexNumber")
|
||||
media_type = item.get("Type", "")
|
||||
|
||||
if series and season and episode:
|
||||
title = f"{series} S{season:02d}E{episode:02d} \u2013 {name}"
|
||||
elif series:
|
||||
title = f"{series} \u2013 {name}"
|
||||
elif media_type == "Movie":
|
||||
title = f"{name} (movie)"
|
||||
else:
|
||||
title = name
|
||||
|
||||
play_method = play_state.get("PlayMethod", "")
|
||||
if play_method == "DirectPlay":
|
||||
method = "Direct Play"
|
||||
elif play_method == "DirectStream":
|
||||
method = "Direct Stream"
|
||||
elif play_method == "Transcode" or transcode:
|
||||
method = "Transcode"
|
||||
else:
|
||||
method = "Direct Play"
|
||||
|
||||
media_streams = item.get("MediaStreams") or []
|
||||
video_streams = [s for s in media_streams if s.get("Type") == "Video"]
|
||||
audio_streams = [s for s in media_streams if s.get("Type") == "Audio"]
|
||||
default_audio = next((s for s in audio_streams if s.get("IsDefault")), None)
|
||||
audio_stream = default_audio or (audio_streams[0] if audio_streams else {})
|
||||
video_stream = video_streams[0] if video_streams else {}
|
||||
|
||||
src_vcodec = _codec(video_stream.get("Codec", ""))
|
||||
src_res = _res(video_stream.get("Width") or item.get("Width"),
|
||||
video_stream.get("Height") or item.get("Height"))
|
||||
src_acodec = _codec(audio_stream.get("Codec", ""))
|
||||
src_channels = _channels(audio_stream.get("Channels"))
|
||||
|
||||
is_video_direct = transcode.get("IsVideoDirect", True)
|
||||
is_audio_direct = transcode.get("IsAudioDirect", True)
|
||||
|
||||
if transcode and not is_video_direct:
|
||||
dst_vcodec = _codec(transcode.get("VideoCodec", ""))
|
||||
dst_res = _res(transcode.get("Width"), transcode.get("Height")) or src_res
|
||||
if src_vcodec and dst_vcodec and src_vcodec != dst_vcodec:
|
||||
video_part = f"{src_vcodec}\u2192{dst_vcodec} {dst_res}".strip()
|
||||
else:
|
||||
video_part = f"{dst_vcodec or src_vcodec} {dst_res}".strip()
|
||||
else:
|
||||
video_part = f"{src_vcodec} {src_res}".strip()
|
||||
|
||||
if transcode and not is_audio_direct:
|
||||
dst_acodec = _codec(transcode.get("AudioCodec", ""))
|
||||
dst_channels = _channels(transcode.get("AudioChannels")) or src_channels
|
||||
if src_acodec and dst_acodec and src_acodec != dst_acodec:
|
||||
audio_part = f"{src_acodec}\u2192{dst_acodec} {dst_channels}".strip()
|
||||
else:
|
||||
audio_part = f"{dst_acodec or src_acodec} {dst_channels}".strip()
|
||||
else:
|
||||
audio_part = f"{src_acodec} {src_channels}".strip()
|
||||
|
||||
bitrate = transcode.get("Bitrate") or item.get("Bitrate")
|
||||
bitrate_part = f"{bitrate / 1_000_000:.1f} Mbps" if bitrate else ""
|
||||
|
||||
reasons = transcode.get("TranscodeReasons") or []
|
||||
reason_part = f"[{', '.join(reasons)}]" if reasons else ""
|
||||
|
||||
stream_parts = [p for p in [method, video_part, audio_part, bitrate_part, reason_part] if p]
|
||||
client_str = " \u00b7 ".join(filter(None, [client, device]))
|
||||
|
||||
lines = [f"{user}: {title}", " | ".join(stream_parts)]
|
||||
if client_str:
|
||||
lines.append(client_str)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def load_state():
|
||||
try:
|
||||
with open(STATE_FILE) as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def save_state(state):
|
||||
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
|
||||
tmp = STATE_FILE + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
json.dump(state, f)
|
||||
os.replace(tmp, STATE_FILE)
|
||||
|
||||
|
||||
def grafana_post(label, start_ms):
|
||||
try:
|
||||
result = http_json(
|
||||
"POST",
|
||||
f"{GRAFANA_URL}/api/annotations",
|
||||
{"time": start_ms, "text": label, "tags": ["jellyfin"]},
|
||||
)
|
||||
return result.get("id")
|
||||
except Exception as e:
|
||||
print(f"Error posting annotation: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def grafana_close(grafana_id, end_ms):
|
||||
try:
|
||||
http_json(
|
||||
"PATCH",
|
||||
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
|
||||
{"timeEnd": end_ms},
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
api_key = get_api_key()
|
||||
state = load_state()
|
||||
|
||||
while True:
|
||||
now_ms = int(time.time() * 1000)
|
||||
sessions = get_active_sessions(api_key)
|
||||
|
||||
if sessions is not None:
|
||||
current_ids = {s["Id"] for s in sessions}
|
||||
|
||||
for s in sessions:
|
||||
sid = s["Id"]
|
||||
if sid not in state:
|
||||
label = format_label(s)
|
||||
grafana_id = grafana_post(label, now_ms)
|
||||
if grafana_id is not None:
|
||||
state[sid] = {
|
||||
"grafana_id": grafana_id,
|
||||
"label": label,
|
||||
"start_ms": now_ms,
|
||||
}
|
||||
save_state(state)
|
||||
|
||||
for sid in [k for k in state if k not in current_ids]:
|
||||
info = state.pop(sid)
|
||||
grafana_close(info["grafana_id"], now_ms)
|
||||
save_state(state)
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
services/grafana/jellyfin-collector.nix
Normal file
54
services/grafana/jellyfin-collector.nix
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
|
||||
jellyfinCollector = pkgs.writeShellApplication {
|
||||
name = "jellyfin-metrics-collector";
|
||||
runtimeInputs = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
];
|
||||
text = ''
|
||||
API_KEY=$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")
|
||||
JELLYFIN="http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port}"
|
||||
|
||||
if response=$(curl -sf --max-time 5 "''${JELLYFIN}/Sessions?api_key=''${API_KEY}"); then
|
||||
active_streams=$(echo "$response" | jq '[.[] | select(.NowPlayingItem != null)] | length')
|
||||
else
|
||||
active_streams=0
|
||||
fi
|
||||
|
||||
{
|
||||
echo '# HELP jellyfin_active_streams Number of currently active Jellyfin streams'
|
||||
echo '# TYPE jellyfin_active_streams gauge'
|
||||
echo "jellyfin_active_streams $active_streams"
|
||||
} > "${textfileDir}/jellyfin.prom.$$.tmp"
|
||||
mv "${textfileDir}/jellyfin.prom.$$.tmp" "${textfileDir}/jellyfin.prom"
|
||||
'';
|
||||
};
|
||||
in
|
||||
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable) {
|
||||
systemd.services.jellyfin-metrics-collector = {
|
||||
description = "Collect Jellyfin metrics for Prometheus";
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = lib.getExe jellyfinCollector;
|
||||
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.jellyfin-metrics-collector = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*:*:0/30";
|
||||
RandomizedDelaySec = "5s";
|
||||
};
|
||||
};
|
||||
}
|
||||
39
services/grafana/llama-cpp-annotations.nix
Normal file
39
services/grafana/llama-cpp-annotations.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.grafana.enable && config.services.llama-cpp.enable) {
|
||||
systemd.services.llama-cpp-annotations = {
|
||||
description = "LLM request annotation service for Grafana";
|
||||
after = [
|
||||
"grafana.service"
|
||||
"llama-cpp.service"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${./llama-cpp-annotations.py}";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
DynamicUser = true;
|
||||
StateDirectory = "llama-cpp-annotations";
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
MemoryDenyWriteExecute = true;
|
||||
};
|
||||
environment = {
|
||||
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
||||
STATE_FILE = "/var/lib/llama-cpp-annotations/state.json";
|
||||
POLL_INTERVAL = "5";
|
||||
CPU_THRESHOLD = "50";
|
||||
};
|
||||
};
|
||||
}
|
||||
155
services/grafana/llama-cpp-annotations.py
Normal file
155
services/grafana/llama-cpp-annotations.py
Normal file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Grafana annotation service for llama-cpp inference requests.
|
||||
|
||||
Monitors llama-server CPU usage via /proc. Creates a Grafana annotation
|
||||
when inference starts (CPU spikes), closes it when inference ends.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
|
||||
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
|
||||
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/llama-cpp-annotations/state.json")
|
||||
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "5"))
|
||||
CPU_THRESHOLD = float(os.environ.get("CPU_THRESHOLD", "50"))
|
||||
|
||||
|
||||
def find_llama_pid():
|
||||
for path in glob.glob("/proc/[0-9]*/comm"):
|
||||
try:
|
||||
with open(path) as f:
|
||||
if f.read().strip() == "llama-server":
|
||||
return int(path.split("/")[2])
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
def get_cpu_times(pid):
|
||||
try:
|
||||
with open(f"/proc/{pid}/stat") as f:
|
||||
fields = f.read().split(")")[-1].split()
|
||||
return int(fields[11]) + int(fields[12])
|
||||
except (OSError, IndexError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def http_json(method, url, body=None):
|
||||
data = json.dumps(body).encode() if body is not None else None
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json", "Accept": "application/json"},
|
||||
method=method,
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
def load_state():
|
||||
try:
|
||||
with open(STATE_FILE) as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def save_state(state):
|
||||
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
|
||||
tmp = STATE_FILE + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
json.dump(state, f)
|
||||
os.replace(tmp, STATE_FILE)
|
||||
|
||||
|
||||
def grafana_post(text, start_ms):
|
||||
try:
|
||||
result = http_json(
|
||||
"POST",
|
||||
f"{GRAFANA_URL}/api/annotations",
|
||||
{"time": start_ms, "text": text, "tags": ["llama-cpp"]},
|
||||
)
|
||||
return result.get("id")
|
||||
except Exception as e:
|
||||
print(f"Error posting annotation: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def grafana_close(grafana_id, end_ms, text=None):
|
||||
try:
|
||||
body = {"timeEnd": end_ms}
|
||||
if text is not None:
|
||||
body["text"] = text
|
||||
http_json(
|
||||
"PATCH",
|
||||
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
|
||||
body,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
state = load_state()
|
||||
prev_ticks = None
|
||||
prev_time = None
|
||||
hz = os.sysconf("SC_CLK_TCK")
|
||||
|
||||
while True:
|
||||
now_ms = int(time.time() * 1000)
|
||||
pid = find_llama_pid()
|
||||
|
||||
if pid is None:
|
||||
prev_ticks = None
|
||||
prev_time = None
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
ticks = get_cpu_times(pid)
|
||||
now = time.monotonic()
|
||||
|
||||
if ticks is None or prev_ticks is None or prev_time is None:
|
||||
prev_ticks = ticks
|
||||
prev_time = now
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
dt = now - prev_time
|
||||
if dt <= 0:
|
||||
prev_ticks = ticks
|
||||
prev_time = now
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
cpu_pct = ((ticks - prev_ticks) / hz) / dt * 100
|
||||
prev_ticks = ticks
|
||||
prev_time = now
|
||||
|
||||
busy = cpu_pct > CPU_THRESHOLD
|
||||
|
||||
if busy and "active" not in state:
|
||||
grafana_id = grafana_post("LLM request", now_ms)
|
||||
if grafana_id is not None:
|
||||
state["active"] = {
|
||||
"grafana_id": grafana_id,
|
||||
"start_ms": now_ms,
|
||||
}
|
||||
save_state(state)
|
||||
|
||||
elif not busy and "active" in state:
|
||||
info = state.pop("active")
|
||||
duration_s = (now_ms - info["start_ms"]) / 1000
|
||||
text = f"LLM request ({duration_s:.1f}s)"
|
||||
grafana_close(info["grafana_id"], now_ms, text)
|
||||
save_state(state)
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
74
services/grafana/prometheus.nix
Normal file
74
services/grafana/prometheus.nix
Normal file
@@ -0,0 +1,74 @@
|
||||
{
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "prometheus" service_configs.zpool_ssds [
|
||||
"/var/lib/prometheus"
|
||||
])
|
||||
(lib.serviceFilePerms "prometheus" [
|
||||
"Z /var/lib/prometheus 0700 prometheus prometheus"
|
||||
])
|
||||
];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
stateDir = "prometheus";
|
||||
retentionTime = "90d";
|
||||
|
||||
exporters = {
|
||||
node = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus_node.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
enabledCollectors = [
|
||||
"hwmon"
|
||||
"systemd"
|
||||
"textfile"
|
||||
];
|
||||
extraFlags = [
|
||||
"--collector.textfile.directory=${textfileDir}"
|
||||
];
|
||||
};
|
||||
|
||||
apcupsd = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus_apcupsd.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
apcupsdAddress = "127.0.0.1:3551";
|
||||
};
|
||||
};
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_node.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "apcupsd";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_apcupsd.port}" ]; }
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${textfileDir} 0755 root root -"
|
||||
];
|
||||
}
|
||||
60
services/grafana/qbittorrent-collector.nix
Normal file
60
services/grafana/qbittorrent-collector.nix
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
|
||||
qbittorrentCollector = pkgs.writeShellApplication {
|
||||
name = "qbittorrent-collector";
|
||||
runtimeInputs = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
];
|
||||
text = ''
|
||||
QBIT="http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}"
|
||||
OUT="${textfileDir}/qbittorrent.prom"
|
||||
|
||||
if info=$(curl -sf --max-time 5 "''${QBIT}/api/v2/transfer/info"); then
|
||||
dl=$(echo "$info" | jq '.dl_info_speed')
|
||||
ul=$(echo "$info" | jq '.up_info_speed')
|
||||
else
|
||||
dl=0
|
||||
ul=0
|
||||
fi
|
||||
|
||||
{
|
||||
echo '# HELP qbittorrent_download_bytes_per_second Current download speed in bytes/s'
|
||||
echo '# TYPE qbittorrent_download_bytes_per_second gauge'
|
||||
echo "qbittorrent_download_bytes_per_second $dl"
|
||||
echo '# HELP qbittorrent_upload_bytes_per_second Current upload speed in bytes/s'
|
||||
echo '# TYPE qbittorrent_upload_bytes_per_second gauge'
|
||||
echo "qbittorrent_upload_bytes_per_second $ul"
|
||||
} > "''${OUT}.tmp"
|
||||
mv "''${OUT}.tmp" "$OUT"
|
||||
'';
|
||||
};
|
||||
in
|
||||
lib.mkIf (config.services.grafana.enable && config.services.qbittorrent.enable) {
|
||||
systemd.services.qbittorrent-collector = {
|
||||
description = "Collect qBittorrent transfer metrics for Prometheus";
|
||||
after = [
|
||||
"network.target"
|
||||
"qbittorrent.service"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = lib.getExe qbittorrentCollector;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.qbittorrent-collector = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*:*:0/15";
|
||||
RandomizedDelaySec = "3s";
|
||||
};
|
||||
};
|
||||
}
|
||||
36
services/grafana/zfs-scrub-annotations.nix
Normal file
36
services/grafana/zfs-scrub-annotations.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
grafanaUrl = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
||||
|
||||
script = pkgs.writeShellApplication {
|
||||
name = "zfs-scrub-annotations";
|
||||
runtimeInputs = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
coreutils
|
||||
gnugrep
|
||||
gnused
|
||||
config.boot.zfs.package
|
||||
];
|
||||
text = builtins.readFile ./zfs-scrub-annotations.sh;
|
||||
};
|
||||
in
|
||||
lib.mkIf (config.services.grafana.enable && config.services.zfs.autoScrub.enable) {
|
||||
systemd.services.zfs-scrub = {
|
||||
environment = {
|
||||
GRAFANA_URL = grafanaUrl;
|
||||
STATE_DIR = "/run/zfs-scrub-annotations";
|
||||
};
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "zfs-scrub-annotations";
|
||||
ExecStartPre = [ "-${lib.getExe script} start" ];
|
||||
ExecStopPost = [ "${lib.getExe script} stop" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
55
services/grafana/zfs-scrub-annotations.sh
Normal file
55
services/grafana/zfs-scrub-annotations.sh
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
# ZFS scrub annotation script for Grafana
|
||||
# Usage: zfs-scrub-annotations.sh {start|stop}
|
||||
# Required env: GRAFANA_URL, STATE_DIR
|
||||
# Required on PATH: zpool, curl, jq, paste, date, grep, sed
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ACTION="${1:-}"
|
||||
GRAFANA_URL="${GRAFANA_URL:?GRAFANA_URL required}"
|
||||
STATE_DIR="${STATE_DIR:?STATE_DIR required}"
|
||||
|
||||
case "$ACTION" in
|
||||
start)
|
||||
POOLS=$(zpool list -H -o name | paste -sd ', ')
|
||||
NOW_MS=$(date +%s%3N)
|
||||
|
||||
RESPONSE=$(curl -sf --max-time 5 \
|
||||
-X POST "$GRAFANA_URL/api/annotations" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg text "ZFS scrub: $POOLS" --argjson time "$NOW_MS" \
|
||||
'{time: $time, text: $text, tags: ["zfs-scrub"]}')" \
|
||||
) || exit 0
|
||||
|
||||
echo "$RESPONSE" | jq -r '.id' > "$STATE_DIR/annotation-id"
|
||||
;;
|
||||
|
||||
stop)
|
||||
ANN_ID=$(cat "$STATE_DIR/annotation-id" 2>/dev/null) || exit 0
|
||||
[ -z "$ANN_ID" ] && exit 0
|
||||
|
||||
NOW_MS=$(date +%s%3N)
|
||||
|
||||
RESULTS=""
|
||||
while IFS= read -r pool; do
|
||||
scan_line=$(zpool status "$pool" | grep "scan:" | sed 's/^[[:space:]]*//')
|
||||
RESULTS="${RESULTS}${pool}: ${scan_line}"$'\n'
|
||||
done < <(zpool list -H -o name)
|
||||
|
||||
TEXT=$(printf "ZFS scrub completed\n%s" "$RESULTS")
|
||||
|
||||
curl -sf --max-time 5 \
|
||||
-X PATCH "$GRAFANA_URL/api/annotations/$ANN_ID" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg text "$TEXT" --argjson timeEnd "$NOW_MS" \
|
||||
'{timeEnd: $timeEnd, text: $text}')" || true
|
||||
|
||||
rm -f "$STATE_DIR/annotation-id"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
Reference in New Issue
Block a user