llama-cpp: xmrig + grafana hooks
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
@@ -8,49 +7,7 @@ let
|
||||
script = ../services/llama-cpp-annotations.py;
|
||||
python = pkgs.python3;
|
||||
|
||||
mockLlamaCpp = pkgs.writeText "mock-llama-cpp-server.py" ''
|
||||
import http.server, json, sys, os
|
||||
|
||||
PORT = int(sys.argv[1])
|
||||
STATE_FILE = sys.argv[2]
|
||||
|
||||
if not os.path.exists(STATE_FILE):
|
||||
with open(STATE_FILE, "w") as f:
|
||||
json.dump([{"id": 0, "is_processing": False, "next_token": {"n_decoded": 0}}], f)
|
||||
|
||||
class Handler(http.server.BaseHTTPRequestHandler):
|
||||
def log_message(self, fmt, *args):
|
||||
pass
|
||||
|
||||
def _json(self, code, body):
|
||||
data = json.dumps(body).encode()
|
||||
self.send_response(code)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(data)
|
||||
|
||||
def do_GET(self):
|
||||
if self.path == "/slots":
|
||||
with open(STATE_FILE) as f:
|
||||
slots = json.load(f)
|
||||
self._json(200, slots)
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def do_POST(self):
|
||||
if self.path == "/test/set-slots":
|
||||
length = int(self.headers.get("Content-Length", 0))
|
||||
body = json.loads(self.rfile.read(length)) if length else []
|
||||
with open(STATE_FILE, "w") as f:
|
||||
json.dump(body, f)
|
||||
self._json(200, {"ok": True})
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
http.server.HTTPServer(("127.0.0.1", PORT), Handler).serve_forever()
|
||||
'';
|
||||
mockLlamaProcess = ./mock-llama-server-proc.py;
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "llama-cpp-annotations";
|
||||
@@ -61,6 +18,7 @@ pkgs.testers.runNixOSTest {
|
||||
environment.systemPackages = [
|
||||
pkgs.python3
|
||||
pkgs.curl
|
||||
pkgs.procps
|
||||
];
|
||||
};
|
||||
|
||||
@@ -69,25 +27,23 @@ pkgs.testers.runNixOSTest {
|
||||
import time
|
||||
|
||||
GRAFANA_PORT = 13000
|
||||
LLAMA_PORT = 16688
|
||||
ANNOTS_FILE = "/tmp/annotations.json"
|
||||
SLOTS_FILE = "/tmp/llama-slots.json"
|
||||
LLAMA_STATE = "/tmp/llama-state.txt"
|
||||
STATE_FILE = "/tmp/llama-annot-state.json"
|
||||
PYTHON = "${python}/bin/python3"
|
||||
MOCK_GRAFANA = "${mockGrafana}"
|
||||
MOCK_LLAMA = "${mockLlamaCpp}"
|
||||
MOCK_LLAMA = "${mockLlamaProcess}"
|
||||
SCRIPT = "${script}"
|
||||
|
||||
def read_annotations():
|
||||
out = machine.succeed(f"cat {ANNOTS_FILE} 2>/dev/null || echo '[]'")
|
||||
return json.loads(out.strip())
|
||||
|
||||
def set_slots(slots):
|
||||
machine.succeed(
|
||||
f"curl -sf -X POST http://127.0.0.1:{LLAMA_PORT}/test/set-slots "
|
||||
f"-H 'Content-Type: application/json' "
|
||||
f"-d '{json.dumps(slots)}'"
|
||||
)
|
||||
def set_busy():
|
||||
machine.succeed(f"echo busy > {LLAMA_STATE}")
|
||||
|
||||
def set_idle():
|
||||
machine.succeed(f"echo idle > {LLAMA_STATE}")
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
@@ -98,10 +54,7 @@ pkgs.testers.runNixOSTest {
|
||||
f"systemd-run --unit=mock-grafana {PYTHON} {MOCK_GRAFANA} {GRAFANA_PORT} {ANNOTS_FILE}"
|
||||
)
|
||||
machine.succeed(
|
||||
f"echo '[{{\"id\": 0, \"is_processing\": false, \"next_token\": {{\"n_decoded\": 0}}}}]' > {SLOTS_FILE}"
|
||||
)
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=mock-llama {PYTHON} {MOCK_LLAMA} {LLAMA_PORT} {SLOTS_FILE}"
|
||||
f"systemd-run --unit=mock-llama {PYTHON} {MOCK_LLAMA} {LLAMA_STATE}"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf http://127.0.0.1:{GRAFANA_PORT}/api/annotations -X POST "
|
||||
@@ -109,7 +62,7 @@ pkgs.testers.runNixOSTest {
|
||||
timeout=10,
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf http://127.0.0.1:{LLAMA_PORT}/slots | grep -q is_processing",
|
||||
"pgrep -x llama-server",
|
||||
timeout=10,
|
||||
)
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
@@ -117,62 +70,62 @@ pkgs.testers.runNixOSTest {
|
||||
with subtest("Start annotation service"):
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=llama-annot "
|
||||
f"--setenv=LLAMA_CPP_URL=http://127.0.0.1:{LLAMA_PORT} "
|
||||
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
|
||||
f"--setenv=STATE_FILE={STATE_FILE} "
|
||||
f"--setenv=POLL_INTERVAL=2 "
|
||||
f"--setenv=CPU_THRESHOLD=10 "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
time.sleep(3)
|
||||
time.sleep(5)
|
||||
|
||||
with subtest("No annotations when slots are idle"):
|
||||
with subtest("No annotations when idle"):
|
||||
annots = read_annotations()
|
||||
assert annots == [], f"Expected no annotations, got: {annots}"
|
||||
|
||||
with subtest("Annotation created when slot starts processing"):
|
||||
set_slots([{"id": 0, "is_processing": True, "next_token": {"n_decoded": 0}}])
|
||||
with subtest("Annotation created when llama-server becomes busy"):
|
||||
set_busy()
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | {PYTHON} -c "
|
||||
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if a else 1)\"",
|
||||
timeout=15,
|
||||
timeout=20,
|
||||
)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
|
||||
assert "llama-cpp" in annots[0].get("tags", []), f"Missing tag: {annots[0]}"
|
||||
assert "slot 0" in annots[0]["text"], f"Missing slot info: {annots[0]['text']}"
|
||||
assert "LLM request" in annots[0]["text"], f"Missing text: {annots[0]['text']}"
|
||||
assert "timeEnd" not in annots[0], f"timeEnd should not be set: {annots[0]}"
|
||||
|
||||
with subtest("Annotation closed when slot stops processing"):
|
||||
set_slots([{"id": 0, "is_processing": False, "next_token": {"n_decoded": 42}}])
|
||||
with subtest("Annotation closed when llama-server becomes idle"):
|
||||
set_idle()
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | {PYTHON} -c "
|
||||
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if a and 'timeEnd' in a[0] else 1)\"",
|
||||
timeout=15,
|
||||
timeout=20,
|
||||
)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1, got: {annots}"
|
||||
assert "timeEnd" in annots[0], f"timeEnd missing: {annots[0]}"
|
||||
assert annots[0]["timeEnd"] > annots[0]["time"], "timeEnd should be after time"
|
||||
assert "42 tokens" in annots[0].get("text", ""), f"Token count missing: {annots[0]}"
|
||||
assert "s)" in annots[0].get("text", ""), f"Duration missing: {annots[0]}"
|
||||
|
||||
with subtest("State survives restart"):
|
||||
set_slots([{"id": 0, "is_processing": True, "next_token": {"n_decoded": 0}}])
|
||||
set_busy()
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | {PYTHON} -c "
|
||||
f"\"import sys,json; a=json.load(sys.stdin); exit(0 if len(a)==2 else 1)\"",
|
||||
timeout=15,
|
||||
timeout=20,
|
||||
)
|
||||
machine.succeed("systemctl stop llama-annot || true")
|
||||
time.sleep(1)
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=llama-annot-2 "
|
||||
f"--setenv=LLAMA_CPP_URL=http://127.0.0.1:{LLAMA_PORT} "
|
||||
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
|
||||
f"--setenv=STATE_FILE={STATE_FILE} "
|
||||
f"--setenv=POLL_INTERVAL=2 "
|
||||
f"--setenv=CPU_THRESHOLD=10 "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
time.sleep(4)
|
||||
time.sleep(6)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 2, f"Restart should not duplicate, got: {annots}"
|
||||
'';
|
||||
|
||||
162
tests/llama-cpp-xmrig-pause.nix
Normal file
162
tests/llama-cpp-xmrig-pause.nix
Normal file
@@ -0,0 +1,162 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
script = ../services/llama-cpp-xmrig-pause.py;
|
||||
python = pkgs.python3;
|
||||
|
||||
# SmolLM-135M Q2_K: 85MB, modern GGUFv3, generates ~30 tok/s on one CPU
|
||||
# thread — slow enough that a 200-token request keeps the process busy for
|
||||
# several seconds, fast enough that tests don't crawl.
|
||||
tinyModel = pkgs.fetchurl {
|
||||
url = "https://huggingface.co/QuantFactory/SmolLM-135M-GGUF/resolve/main/SmolLM-135M.Q2_K.gguf";
|
||||
hash = "sha256-DX46drPNJILNba21xfY2tyE0/yPWgOhz43gJdeSYKh4=";
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "llama-cpp-xmrig-pause";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.python3
|
||||
pkgs.procps
|
||||
pkgs.curl
|
||||
pkgs.llama-cpp
|
||||
];
|
||||
|
||||
# Mock xmrig as a simple sleep process that can be stopped/started.
|
||||
systemd.services.xmrig = {
|
||||
description = "Mock xmrig miner";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.coreutils}/bin/sleep infinity";
|
||||
Type = "simple";
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
|
||||
PORT = 18088
|
||||
MODEL = "${tinyModel}"
|
||||
PYTHON = "${python}/bin/python3"
|
||||
SCRIPT = "${script}"
|
||||
|
||||
# Tuned for test speed while remaining realistic.
|
||||
# POLL_INTERVAL=1 keeps detection latency low.
|
||||
# GRACE_PERIOD=5 is long enough to verify "stays stopped" but short enough
|
||||
# that the full test completes in ~2 minutes.
|
||||
# CPU_THRESHOLD=10 is low because the VM has limited cores and the model
|
||||
# is small — but any active inference still saturates a core.
|
||||
POLL_INTERVAL = "1"
|
||||
GRACE_PERIOD = "5"
|
||||
CPU_THRESHOLD = "10"
|
||||
|
||||
infer_counter = 0
|
||||
|
||||
def send_completion(n_predict=200):
|
||||
"""Fire a completion request in the background via a transient systemd unit."""
|
||||
global infer_counter
|
||||
infer_counter += 1
|
||||
name = f"infer-{infer_counter}"
|
||||
machine.succeed(
|
||||
f"systemd-run --unit={name} --property=Type=exec "
|
||||
f"curl -sf -X POST http://127.0.0.1:{PORT}/completion "
|
||||
f"-H 'Content-Type: application/json' "
|
||||
f"-d '{{\"prompt\": \"Once upon a time in a land far away there lived\", \"n_predict\": {n_predict}}}'"
|
||||
)
|
||||
return name
|
||||
|
||||
def wait_inference_done(unit_name, timeout=60):
|
||||
"""Wait for a background inference request to finish."""
|
||||
machine.wait_until_fails(
|
||||
f"systemctl is-active {unit_name}",
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_unit("xmrig.service")
|
||||
|
||||
with subtest("Start llama-server"):
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=llama-server "
|
||||
# Single inference thread to maximise per-core CPU%, which is
|
||||
# what the monitor measures. Keeps token generation slow enough
|
||||
# (~30 tok/s) that a 200-token request sustains load for seconds.
|
||||
f"llama-server --model {MODEL} --port {PORT} --ctx-size 512 -t 1 -np 1"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf http://127.0.0.1:{PORT}/health",
|
||||
timeout=30,
|
||||
)
|
||||
machine.succeed("pgrep -x llama-server")
|
||||
|
||||
with subtest("Start pause monitor"):
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=llama-xmrig-pause "
|
||||
f"--setenv=POLL_INTERVAL={POLL_INTERVAL} "
|
||||
f"--setenv=GRACE_PERIOD={GRACE_PERIOD} "
|
||||
f"--setenv=CPU_THRESHOLD={CPU_THRESHOLD} "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
# The monitor needs two consecutive polls to compute a CPU delta.
|
||||
# Wait for baseline to stabilise.
|
||||
time.sleep(3)
|
||||
|
||||
with subtest("xmrig stays running while llama-server is idle"):
|
||||
machine.succeed("systemctl is-active xmrig")
|
||||
|
||||
with subtest("xmrig stopped during prompt processing"):
|
||||
unit = send_completion(n_predict=200)
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
with subtest("xmrig remains stopped during grace period after inference ends"):
|
||||
wait_inference_done(unit)
|
||||
# Inference just finished. The monitor will need 1-2 polls to detect
|
||||
# idle, then the grace period starts. Checking 2s after completion
|
||||
# is well within the 5s grace window.
|
||||
time.sleep(2)
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
with subtest("xmrig resumes after grace period expires"):
|
||||
# Already idle since previous subtest. Grace period (5s) plus
|
||||
# detection delay (~2 polls) means xmrig should restart within ~8s.
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=15)
|
||||
|
||||
with subtest("Sequential prompts do not cause xmrig flapping"):
|
||||
# First prompt — stop xmrig
|
||||
unit1 = send_completion(n_predict=200)
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
wait_inference_done(unit1)
|
||||
|
||||
# Brief idle gap — shorter than grace period
|
||||
time.sleep(2)
|
||||
|
||||
# Second prompt arrives before grace period expires, resetting it
|
||||
unit2 = send_completion(n_predict=200)
|
||||
time.sleep(3)
|
||||
|
||||
# xmrig must still be stopped
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
wait_inference_done(unit2)
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=15)
|
||||
|
||||
with subtest("xmrig stays stopped during sustained inference"):
|
||||
unit = send_completion(n_predict=500)
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# Stay busy longer than the grace period to prove continuous
|
||||
# activity keeps xmrig stopped indefinitely.
|
||||
time.sleep(8)
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
wait_inference_done(unit)
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=15)
|
||||
'';
|
||||
}
|
||||
42
tests/mock-llama-server-proc.py
Normal file
42
tests/mock-llama-server-proc.py
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Mock llama-server process for NixOS VM tests.
|
||||
|
||||
Sets /proc/self/comm to "llama-server" via prctl so that monitoring scripts
|
||||
(llama-cpp-annotations, llama-cpp-xmrig-pause) can discover this process
|
||||
the same way they discover the real one.
|
||||
|
||||
Usage: python3 mock-llama-server-proc.py <state-file>
|
||||
|
||||
The state file controls behavior:
|
||||
"busy" -> burn CPU in a tight loop (simulates prompt processing / inference)
|
||||
"idle" -> sleep (simulates waiting for requests)
|
||||
"""
|
||||
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
import sys
|
||||
import time
|
||||
|
||||
STATE_FILE = sys.argv[1]
|
||||
|
||||
# PR_SET_NAME = 15, sets /proc/self/comm
|
||||
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
|
||||
libc.prctl(15, b"llama-server", 0, 0, 0)
|
||||
|
||||
with open(STATE_FILE, "w") as f:
|
||||
f.write("idle")
|
||||
|
||||
while True:
|
||||
try:
|
||||
with open(STATE_FILE) as f:
|
||||
state = f.read().strip()
|
||||
except Exception:
|
||||
state = "idle"
|
||||
|
||||
if state == "busy":
|
||||
end = time.monotonic() + 0.1
|
||||
while time.monotonic() < end:
|
||||
_ = sum(range(10000))
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
@@ -28,9 +28,9 @@ in
|
||||
# zfs scrub annotations test
|
||||
zfsScrubAnnotationsTest = handleTest ./zfs-scrub-annotations.nix;
|
||||
|
||||
# llama-cpp annotation service test
|
||||
# llama-cpp tests
|
||||
llamaCppAnnotationsTest = handleTest ./llama-cpp-annotations.nix;
|
||||
|
||||
llamaCppXmrigPauseTest = handleTest ./llama-cpp-xmrig-pause.nix;
|
||||
# ntfy alerts test
|
||||
ntfyAlertsTest = handleTest ./ntfy-alerts.nix;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user