Compare commits

...

6 Commits

Author SHA1 Message Date
7ff404b032 llama-cpp: add API key auth via --api-key-file
Some checks failed
Build and Deploy / deploy (push) Failing after 1m18s
Generate and encrypt a Bearer token for llama-cpp's built-in auth.
Remove caddy_auth from the vhost since basic auth blocks Bearer-only
clients. Internal sidecars (xmrig-pause, annotations) connect
directly to localhost and are unaffected (/slots is public).
2026-04-02 17:54:51 -04:00
b8dd129bea monitoring: add zpool and boot partition usage metrics
Add textfile collector for ZFS pool utilization (tank, hdds) and
boot drive partitions (/boot, /persistent, /nix). Runs every 60s.
Add two Grafana dashboard panels: ZFS Pool Utilization and Boot
Drive Partitions as Row 5.
2026-04-02 17:44:11 -04:00
e41f869843 trilium: add self-hosted note-taking service
Add trilium-server on port 8787 behind Caddy reverse proxy at
notes.sigkill.computer. Data stored on ZFS tank pool with
serviceMountWithZpool for mount ordering.
2026-04-02 17:44:04 -04:00
9baeaa5c23 llama-cpp: add grafana annotations for inference requests
Poll /slots endpoint, create annotations when slots start processing,
close with token count when complete. Includes NixOS VM test with
mock llama-cpp and grafana servers. Dashboard annotation entry added.
2026-04-02 17:43:49 -04:00
0235617627 monitoring: fix intel-gpu-collector crash resilience
Wrap entire read_one_sample() in try/except to handle all failures
(missing binary, permission errors, malformed JSON, timeouts).
Write zero-valued metrics on failure instead of exiting non-zero.
Increase timeout from 5s to 8s for slower GPU initialization.
2026-04-02 17:43:13 -04:00
df15be01ea llama-cpp: pause xmrig during active inference requests
Add sidecar service that polls llama-cpp /slots endpoint every 3s.
When any slot is processing, stops xmrig. Restarts xmrig after 10s
grace period when all slots are idle. Handles unreachable llama-cpp
gracefully (leaves xmrig untouched).
2026-04-02 17:43:07 -04:00
15 changed files with 754 additions and 27 deletions

View File

@@ -48,6 +48,8 @@
./services/soulseek.nix
./services/llama-cpp.nix
./services/llama-cpp-annotations.nix
./services/trilium.nix
./services/ups.nix
./services/monitoring.nix
@@ -65,6 +67,8 @@
./services/p2pool.nix
./services/xmrig.nix
./services/llama-cpp-xmrig-pause.nix
# KEEP UNTIL 2028
./services/caddy_senior_project.nix

View File

@@ -159,5 +159,13 @@
owner = "gitea-runner";
group = "gitea-runner";
};
# llama-cpp API key for bearer token auth
llama-cpp-api-key = {
file = ../secrets/llama-cpp-api-key.age;
mode = "0400";
owner = "root";
group = "root";
};
};
}

Binary file not shown.

View File

@@ -173,6 +173,10 @@ rec {
port = 6688;
proto = "tcp";
};
trilium = {
port = 8787;
proto = "tcp";
};
};
};
@@ -302,6 +306,10 @@ rec {
domain = "grafana.${https.domain}";
};
trilium = {
dataDir = services_dir + "/trilium";
};
media = {
moviesDir = torrents_path + "/media/movies";
tvDir = torrents_path + "/media/tv";

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Collects ZFS pool utilization and boot partition usage for Prometheus textfile collector
set -euo pipefail
TEXTFILE="${TEXTFILE:?TEXTFILE env required}"
TMP="${TEXTFILE}.$$"
{
echo '# HELP zpool_size_bytes Total size of ZFS pool in bytes'
echo '# TYPE zpool_size_bytes gauge'
echo '# HELP zpool_used_bytes Used space in ZFS pool in bytes'
echo '# TYPE zpool_used_bytes gauge'
echo '# HELP zpool_free_bytes Free space in ZFS pool in bytes'
echo '# TYPE zpool_free_bytes gauge'
# -Hp: scripting mode, parseable, bytes
zpool list -Hp -o name,size,alloc,free | while IFS=$'\t' read -r name size alloc free; do
echo "zpool_size_bytes{pool=\"${name}\"} ${size}"
echo "zpool_used_bytes{pool=\"${name}\"} ${alloc}"
echo "zpool_free_bytes{pool=\"${name}\"} ${free}"
done
echo '# HELP partition_size_bytes Total size of partition in bytes'
echo '# TYPE partition_size_bytes gauge'
echo '# HELP partition_used_bytes Used space on partition in bytes'
echo '# TYPE partition_used_bytes gauge'
echo '# HELP partition_free_bytes Free space on partition in bytes'
echo '# TYPE partition_free_bytes gauge'
# Boot drive partitions: /boot (ESP), /persistent, /nix
# Use df with 1K blocks and convert to bytes
for mount in /boot /persistent /nix; do
if mountpoint -q "$mount" 2>/dev/null; then
read -r size used avail _ <<< $(df -k --output=size,used,avail "$mount" | tail -1)
size_b=$((size * 1024))
used_b=$((used * 1024))
avail_b=$((avail * 1024))
echo "partition_size_bytes{mount=\"${mount}\"} ${size_b}"
echo "partition_used_bytes{mount=\"${mount}\"} ${used_b}"
echo "partition_free_bytes{mount=\"${mount}\"} ${avail_b}"
fi
done
} > "$TMP"
mv "$TMP" "$TEXTFILE"

View File

@@ -12,6 +12,7 @@ TEXTFILE = os.environ.get(
def read_one_sample():
try:
proc = subprocess.Popen(
["intel_gpu_top", "-J", "-s", "1000"],
stdout=subprocess.PIPE,
@@ -20,7 +21,7 @@ def read_one_sample():
buf = b""
depth = 0
in_obj = False
deadline = time.monotonic() + 5.0
deadline = time.monotonic() + 8.0
try:
while time.monotonic() < deadline:
byte = proc.stdout.read(1)
@@ -38,7 +39,34 @@ def read_one_sample():
finally:
proc.terminate()
proc.wait()
return json.loads(buf) if buf else None
if not buf:
return None
try:
return json.loads(buf)
except json.JSONDecodeError:
print("Malformed JSON from intel_gpu_top", file=sys.stderr)
return None
except Exception as e:
print(f"intel_gpu_top unavailable: {e}", file=sys.stderr)
return None
def write_empty_metrics():
"""Write zero-valued metrics so Prometheus doesn't see stale data."""
lines = [
"# HELP intel_gpu_engine_busy_percent Intel GPU engine busy percentage",
"# TYPE intel_gpu_engine_busy_percent gauge",
"# HELP intel_gpu_frequency_mhz Intel GPU actual frequency in MHz",
"# TYPE intel_gpu_frequency_mhz gauge",
"intel_gpu_frequency_mhz 0",
"# HELP intel_gpu_rc6_percent Intel GPU RC6 power-saving state percentage",
"# TYPE intel_gpu_rc6_percent gauge",
"intel_gpu_rc6_percent 0",
]
tmp = TEXTFILE + ".tmp"
with open(tmp, "w") as f:
f.write("\n".join(lines) + "\n")
os.replace(tmp, TEXTFILE)
def write_metrics(sample):
@@ -70,7 +98,8 @@ def main():
sample = read_one_sample()
if sample is None:
print("Failed to read intel_gpu_top sample", file=sys.stderr)
sys.exit(1)
write_empty_metrics()
sys.exit(0)
write_metrics(sample)

View File

@@ -0,0 +1,40 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
{
systemd.services.llama-cpp-annotations = {
description = "LLM request annotation service for Grafana";
after = [
"network.target"
"grafana.service"
"llama-cpp.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${./llama-cpp-annotations.py}";
Restart = "always";
RestartSec = "10s";
DynamicUser = true;
StateDirectory = "llama-cpp-annotations";
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
MemoryDenyWriteExecute = true;
};
environment = {
LLAMA_CPP_URL = "http://127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}";
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
STATE_FILE = "/var/lib/llama-cpp-annotations/state.json";
POLL_INTERVAL = "5";
};
};
}

View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python3
import json
import os
import sys
import time
import urllib.request
LLAMA_CPP_URL = os.environ.get("LLAMA_CPP_URL", "http://127.0.0.1:6688")
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/llama-cpp-annotations/state.json")
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "5"))
def http_json(method, url, body=None):
data = json.dumps(body).encode() if body is not None else None
req = urllib.request.Request(
url,
data=data,
headers={"Content-Type": "application/json", "Accept": "application/json"},
method=method,
)
with urllib.request.urlopen(req, timeout=5) as resp:
return json.loads(resp.read())
def get_slots():
try:
req = urllib.request.Request(
f"{LLAMA_CPP_URL}/slots",
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=5) as resp:
return json.loads(resp.read())
except Exception as e:
print(f"Error fetching slots: {e}", file=sys.stderr)
return None
def load_state():
try:
with open(STATE_FILE) as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return {}
def save_state(state):
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
tmp = STATE_FILE + ".tmp"
with open(tmp, "w") as f:
json.dump(state, f)
os.replace(tmp, STATE_FILE)
def grafana_post(text, start_ms):
try:
result = http_json(
"POST",
f"{GRAFANA_URL}/api/annotations",
{"time": start_ms, "text": text, "tags": ["llama-cpp"]},
)
return result.get("id")
except Exception as e:
print(f"Error posting annotation: {e}", file=sys.stderr)
return None
def grafana_close(grafana_id, end_ms, text=None):
try:
body = {"timeEnd": end_ms}
if text is not None:
body["text"] = text
http_json(
"PATCH",
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
body,
)
except Exception as e:
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
def main():
state = load_state()
while True:
now_ms = int(time.time() * 1000)
slots = get_slots()
if slots is not None:
# Track which slots are currently processing
processing_ids = set()
for slot in slots:
slot_id = str(slot["id"])
is_processing = slot.get("is_processing", False)
if is_processing:
processing_ids.add(slot_id)
if slot_id not in state:
text = f"LLM request (slot {slot['id']})"
grafana_id = grafana_post(text, now_ms)
if grafana_id is not None:
state[slot_id] = {
"grafana_id": grafana_id,
"start_ms": now_ms,
}
save_state(state)
# Close annotations for slots that stopped processing
for slot_id in [k for k in state if k not in processing_ids]:
info = state.pop(slot_id)
# Try to get token count from the slot data
n_decoded = None
for slot in slots:
if str(slot["id"]) == slot_id:
n_decoded = slot.get("next_token", {}).get("n_decoded")
break
text = f"LLM request (slot {slot_id})"
if n_decoded is not None and n_decoded > 0:
text += f"{n_decoded} tokens"
grafana_close(info["grafana_id"], now_ms, text)
save_state(state)
time.sleep(POLL_INTERVAL)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,35 @@
{
pkgs,
service_configs,
...
}:
{
systemd.services.llama-cpp-xmrig-pause = {
description = "Pause xmrig while llama-cpp is processing requests";
after = [
"network.target"
"llama-cpp.service"
"xmrig.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${./llama-cpp-xmrig-pause.py}";
Restart = "always";
RestartSec = "10s";
NoNewPrivileges = true;
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
MemoryDenyWriteExecute = true;
};
environment = {
LLAMA_CPP_URL = "http://127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}";
POLL_INTERVAL = "3";
GRACE_PERIOD = "10";
};
};
}

View File

@@ -0,0 +1,91 @@
#!/usr/bin/env python3
"""
Pause xmrig while llama-cpp is processing inference requests.
Polls llama-cpp /slots endpoint. When any slot is busy, stops xmrig.
When all slots are idle for GRACE_PERIOD seconds, restarts xmrig.
If llama-cpp is unreachable, does nothing (leaves xmrig in its current state).
"""
import json
import os
import subprocess
import sys
import time
import urllib.request
LLAMA_CPP_URL = os.environ["LLAMA_CPP_URL"].rstrip("/")
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "3"))
GRACE_PERIOD = float(os.environ.get("GRACE_PERIOD", "10"))
def log(msg):
print(f"[llama-cpp-xmrig-pause] {msg}", file=sys.stderr, flush=True)
def get_slots():
"""Fetch /slots from llama-cpp. Returns list of slot dicts, or None on error."""
req = urllib.request.Request(f"{LLAMA_CPP_URL}/slots")
try:
with urllib.request.urlopen(req, timeout=5) as resp:
return json.loads(resp.read())
except (urllib.error.URLError, OSError, json.JSONDecodeError, ValueError) as exc:
log(f"Cannot reach llama-cpp: {exc}")
return None
def any_slot_busy(slots):
return any(s.get("is_processing", False) for s in slots)
def systemctl(action, unit):
result = subprocess.run(
["systemctl", action, unit],
capture_output=True,
text=True,
)
if result.returncode != 0:
log(f"systemctl {action} {unit} failed (rc={result.returncode}): {result.stderr.strip()}")
return result.returncode == 0
def main():
xmrig_paused = False
idle_since = None # monotonic timestamp when slots first went idle
log(f"Starting: url={LLAMA_CPP_URL} poll={POLL_INTERVAL}s grace={GRACE_PERIOD}s")
while True:
slots = get_slots()
if slots is None:
# llama-cpp unreachable — leave xmrig alone, reset idle timer
idle_since = None
time.sleep(POLL_INTERVAL)
continue
busy = any_slot_busy(slots)
if busy:
idle_since = None
if not xmrig_paused:
log("Slot busy — stopping xmrig")
if systemctl("stop", "xmrig"):
xmrig_paused = True
else:
# All slots idle
if xmrig_paused:
now = time.monotonic()
if idle_since is None:
idle_since = now
elif now - idle_since >= GRACE_PERIOD:
log("Slots idle past grace period — starting xmrig")
if systemctl("start", "xmrig"):
xmrig_paused = False
idle_since = None
time.sleep(POLL_INTERVAL)
if __name__ == "__main__":
main()

View File

@@ -29,14 +29,18 @@
"turbo4"
"-fa"
"on"
"--api-key-file"
config.age.secrets.llama-cpp-api-key.path
];
};
# have to do this in order to get vulkan to work
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
# Auth handled by llama-cpp --api-key-file (Bearer token).
# No caddy_auth — the API key is the auth layer, and caddy_auth's basic
# auth would block Bearer-only clients like oh-my-pi.
services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${toString config.services.llama-cpp.port}
'';
}

View File

@@ -79,6 +79,17 @@ let
'';
};
diskUsageCollector = pkgs.writeShellApplication {
name = "disk-usage-collector";
runtimeInputs = with pkgs; [
coreutils
gawk
config.boot.zfs.package
util-linux # for mountpoint
];
text = builtins.readFile ./disk-usage-collector.sh;
};
dashboard = {
editable = true;
graphTooltip = 1;
@@ -120,6 +131,18 @@ let
type = "tags";
tags = [ "zfs-scrub" ];
}
{
name = "LLM Requests";
datasource = {
type = "grafana";
uid = "-- Grafana --";
};
enable = true;
iconColor = "purple";
showIn = 0;
type = "tags";
tags = [ "llama-cpp" ];
}
];
panels = [
@@ -657,6 +680,94 @@ let
overrides = [ ];
};
}
# -- Row 5: Storage --
{
id = 12;
type = "timeseries";
title = "ZFS Pool Utilization";
gridPos = {
h = 8;
w = 12;
x = 0;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "zpool_used_bytes{pool=\"tank\"} / zpool_size_bytes{pool=\"tank\"} * 100";
legendFormat = "tank";
refId = "A";
}
{
datasource = promDs;
expr = "zpool_used_bytes{pool=\"hdds\"} / zpool_size_bytes{pool=\"hdds\"} * 100";
legendFormat = "hdds";
refId = "B";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
{
id = 13;
type = "timeseries";
title = "Boot Drive Partitions";
gridPos = {
h = 8;
w = 12;
x = 12;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "partition_used_bytes{mount=\"/boot\"} / partition_size_bytes{mount=\"/boot\"} * 100";
legendFormat = "/boot";
refId = "A";
}
{
datasource = promDs;
expr = "partition_used_bytes{mount=\"/persistent\"} / partition_size_bytes{mount=\"/persistent\"} * 100";
legendFormat = "/persistent";
refId = "B";
}
{
datasource = promDs;
expr = "partition_used_bytes{mount=\"/nix\"} / partition_size_bytes{mount=\"/nix\"} * 100";
legendFormat = "/nix";
refId = "C";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
];
};
in
@@ -863,6 +974,24 @@ in
};
};
# -- Disk/pool usage textfile collector --
systemd.services.disk-usage-collector = {
description = "Collect ZFS pool and partition usage metrics for Prometheus";
serviceConfig = {
Type = "oneshot";
ExecStart = lib.getExe diskUsageCollector;
};
environment.TEXTFILE = "${textfileDir}/disk-usage.prom";
};
systemd.timers.disk-usage-collector = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*:*:0/60"; # every 60 seconds
RandomizedDelaySec = "10s";
};
};
systemd.tmpfiles.rules = [
"d ${textfileDir} 0755 root root -"
];

26
services/trilium.nix Normal file
View File

@@ -0,0 +1,26 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "trilium-server" service_configs.zpool_ssds [
(service_configs.services_dir + "/trilium")
])
];
services.trilium-server = {
enable = true;
port = service_configs.ports.private.trilium.port;
host = "127.0.0.1";
dataDir = service_configs.trilium.dataDir;
};
services.caddy.virtualHosts."notes.${service_configs.https.domain}".extraConfig = ''
import ${config.age.secrets.caddy_auth.path}
reverse_proxy :${toString service_configs.ports.private.trilium.port}
'';
}

View File

@@ -0,0 +1,179 @@
{
lib,
pkgs,
...
}:
let
mockGrafana = ./mock-grafana-server.py;
script = ../services/llama-cpp-annotations.py;
python = pkgs.python3;
mockLlamaCpp = pkgs.writeText "mock-llama-cpp-server.py" ''
import http.server, json, sys, os
PORT = int(sys.argv[1])
STATE_FILE = sys.argv[2]
if not os.path.exists(STATE_FILE):
with open(STATE_FILE, "w") as f:
json.dump([{"id": 0, "is_processing": False, "next_token": {"n_decoded": 0}}], f)
class Handler(http.server.BaseHTTPRequestHandler):
def log_message(self, fmt, *args):
pass
def _json(self, code, body):
data = json.dumps(body).encode()
self.send_response(code)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(data)
def do_GET(self):
if self.path == "/slots":
with open(STATE_FILE) as f:
slots = json.load(f)
self._json(200, slots)
else:
self.send_response(404)
self.end_headers()
def do_POST(self):
if self.path == "/test/set-slots":
length = int(self.headers.get("Content-Length", 0))
body = json.loads(self.rfile.read(length)) if length else []
with open(STATE_FILE, "w") as f:
json.dump(body, f)
self._json(200, {"ok": True})
else:
self.send_response(404)
self.end_headers()
http.server.HTTPServer(("127.0.0.1", PORT), Handler).serve_forever()
'';
in
pkgs.testers.runNixOSTest {
name = "llama-cpp-annotations";
nodes.machine =
{ pkgs, ... }:
{
environment.systemPackages = [
pkgs.python3
pkgs.curl
];
};
testScript = ''
import json
import time
GRAFANA_PORT = 13000
LLAMA_PORT = 16688
ANNOTS_FILE = "/tmp/annotations.json"
SLOTS_FILE = "/tmp/llama-slots.json"
STATE_FILE = "/tmp/llama-annot-state.json"
PYTHON = "${python}/bin/python3"
MOCK_GRAFANA = "${mockGrafana}"
MOCK_LLAMA = "${mockLlamaCpp}"
SCRIPT = "${script}"
def read_annotations():
out = machine.succeed(f"cat {ANNOTS_FILE} 2>/dev/null || echo '[]'")
return json.loads(out.strip())
def set_slots(slots):
machine.succeed(
f"curl -sf -X POST http://127.0.0.1:{LLAMA_PORT}/test/set-slots "
f"-H 'Content-Type: application/json' "
f"-d '{json.dumps(slots)}'"
)
start_all()
machine.wait_for_unit("multi-user.target")
with subtest("Start mock services"):
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
machine.succeed(
f"systemd-run --unit=mock-grafana {PYTHON} {MOCK_GRAFANA} {GRAFANA_PORT} {ANNOTS_FILE}"
)
machine.succeed(
f"echo '[{{\"id\": 0, \"is_processing\": false, \"next_token\": {{\"n_decoded\": 0}}}}]' > {SLOTS_FILE}"
)
machine.succeed(
f"systemd-run --unit=mock-llama {PYTHON} {MOCK_LLAMA} {LLAMA_PORT} {SLOTS_FILE}"
)
machine.wait_until_succeeds(
f"curl -sf http://127.0.0.1:{GRAFANA_PORT}/api/annotations -X POST "
f"-H 'Content-Type: application/json' -d '{{\"text\":\"ping\",\"tags\":[]}}' | grep -q id",
timeout=10,
)
machine.wait_until_succeeds(
f"curl -sf http://127.0.0.1:{LLAMA_PORT}/slots | grep -q is_processing",
timeout=10,
)
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
with subtest("Start annotation service"):
machine.succeed(
f"systemd-run --unit=llama-annot "
f"--setenv=LLAMA_CPP_URL=http://127.0.0.1:{LLAMA_PORT} "
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
f"--setenv=STATE_FILE={STATE_FILE} "
f"--setenv=POLL_INTERVAL=2 "
f"{PYTHON} {SCRIPT}"
)
time.sleep(3)
with subtest("No annotations when slots are idle"):
annots = read_annotations()
assert annots == [], f"Expected no annotations, got: {annots}"
with subtest("Annotation created when slot starts processing"):
set_slots([{"id": 0, "is_processing": True, "next_token": {"n_decoded": 0}}])
machine.wait_until_succeeds(
f"cat {ANNOTS_FILE} | {PYTHON} -c "
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if a else 1)\"",
timeout=15,
)
annots = read_annotations()
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
assert "llama-cpp" in annots[0].get("tags", []), f"Missing tag: {annots[0]}"
assert "slot 0" in annots[0]["text"], f"Missing slot info: {annots[0]['text']}"
assert "timeEnd" not in annots[0], f"timeEnd should not be set: {annots[0]}"
with subtest("Annotation closed when slot stops processing"):
set_slots([{"id": 0, "is_processing": False, "next_token": {"n_decoded": 42}}])
machine.wait_until_succeeds(
f"cat {ANNOTS_FILE} | {PYTHON} -c "
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if a and 'timeEnd' in a[0] else 1)\"",
timeout=15,
)
annots = read_annotations()
assert len(annots) == 1, f"Expected 1, got: {annots}"
assert "timeEnd" in annots[0], f"timeEnd missing: {annots[0]}"
assert annots[0]["timeEnd"] > annots[0]["time"], "timeEnd should be after time"
assert "42 tokens" in annots[0].get("text", ""), f"Token count missing: {annots[0]}"
with subtest("State survives restart"):
set_slots([{"id": 0, "is_processing": True, "next_token": {"n_decoded": 0}}])
machine.wait_until_succeeds(
f"cat {ANNOTS_FILE} | {PYTHON} -c "
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if len(a)==2 else 1)\"",
timeout=15,
)
machine.succeed("systemctl stop llama-annot || true")
time.sleep(1)
machine.succeed(
f"systemd-run --unit=llama-annot-2 "
f"--setenv=LLAMA_CPP_URL=http://127.0.0.1:{LLAMA_PORT} "
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
f"--setenv=STATE_FILE={STATE_FILE} "
f"--setenv=POLL_INTERVAL=2 "
f"{PYTHON} {SCRIPT}"
)
time.sleep(4)
annots = read_annotations()
assert len(annots) == 2, f"Restart should not duplicate, got: {annots}"
'';
}

View File

@@ -28,6 +28,9 @@ in
# zfs scrub annotations test
zfsScrubAnnotationsTest = handleTest ./zfs-scrub-annotations.nix;
# llama-cpp annotation service test
llamaCppAnnotationsTest = handleTest ./llama-cpp-annotations.nix;
# ntfy alerts test
ntfyAlertsTest = handleTest ./ntfy-alerts.nix;