Files
nixos/services/llama-cpp.nix

146 lines
4.7 KiB
Nix

{
pkgs,
service_configs,
config,
inputs,
lib,
utils,
...
}:
let
cfg = config.services.llama-cpp;
modelUrl = "https://huggingface.co/bartowski/google_gemma-4-E2B-it-GGUF/resolve/main/google_gemma-4-E2B-it-IQ2_M.gguf";
modelAlias = lib.removeSuffix ".gguf" (baseNameOf modelUrl);
in
{
imports = [
(lib.mkCaddyReverseProxy {
subdomain = "llm";
port = service_configs.ports.private.llama_cpp.port;
})
];
# Per-vhost Caddy access log for fail2ban to tail. llama.cpp's own
# "Invalid API Key" warning has no client IP, and behind Caddy the
# llama-server access log only sees 127.0.0.1. Caddy's JSON log has
# the real client IP via request.remote_ip.
services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = ''
log {
output file /var/log/caddy/access-llama-cpp.log
format json
}
'';
# Ensure the log file exists on boot so fail2ban can start before Caddy
# has received its first request.
systemd.tmpfiles.rules = [
"d /var/log/caddy 755 caddy caddy"
"f /var/log/caddy/access-llama-cpp.log 644 caddy caddy"
];
# Ban IPs that repeatedly fail API key validation. llama.cpp's public
# endpoints (/, /index.html, /bundle.{js,css}, /health, /v1/models,
# /v1/health, /models, /api/tags, /props) bypass auth, so any 401 on
# this vhost is an authenticated-endpoint failure -- no need to filter
# on the Authorization header the way caddy-auth does.
services.fail2ban.jails.llama-cpp = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "/var/log/caddy/access-llama-cpp.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
# NAT hairpinning sends LAN traffic via the router IP. Don't ban
# 192.168.1.0/24 or we lock ourselves out.
ignoreip = "127.0.0.1/8 ::1 192.168.1.0/24";
};
filter.Definition = {
failregex = ''^.*"remote_ip":"<HOST>".*"status":401.*$'';
ignoreregex = "";
datepattern = ''"ts":{Epoch}\.'';
};
};
services.llama-cpp = {
enable = true;
model = toString (
pkgs.fetchurl {
url = modelUrl;
sha256 = "17e869ac54d0e59faa884d5319fc55ad84cd866f50f0b3073fbb25accc875a23";
}
);
port = service_configs.ports.private.llama_cpp.port;
host = "0.0.0.0";
package = lib.optimizePackage (
inputs.llamacpp.packages.${pkgs.system}.vulkan.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [
];
})
);
extraFlags = [
"-ngl"
"999"
"-c"
"65536"
"-ctk"
"turbo3"
"-ctv"
"turbo3"
"-fa"
"on"
"--api-key-file"
config.age.secrets.llama-cpp-api-key.path
"--metrics"
"--alias"
modelAlias
"-b"
"4096"
"-ub"
"4096"
"--parallel"
"2"
];
};
# have to do this in order to get vulkan to work
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
# ANV driver's turbo3 shader compilation exceeds the default 8 MB thread stack.
systemd.services.llama-cpp.serviceConfig.LimitSTACK = lib.mkForce "67108864"; # 64 MB soft+hard
# llama-server tries to create ~/.cache; ProtectSystem=strict + impermanent
# root make /root read-only. Give it a writable cache dir and point HOME there.
systemd.services.llama-cpp.serviceConfig.CacheDirectory = "llama-cpp";
systemd.services.llama-cpp.environment.HOME = "/var/cache/llama-cpp";
# turbo3 KV cache quantization runs a 14-barrier WHT butterfly per 128-element
# workgroup in SET_ROWS. With 4 concurrent slots and batch=4096, the combined
# GPU dispatch can exceed the default i915 CCS engine preempt timeout (7.5s),
# causing GPU HANG -> ErrorDeviceLost. Increase compute engine timeouts.
# Note: batch<4096 is not viable -- GDN chunked mode needs a larger compute
# buffer at smaller batch sizes, exceeding the A380's 6 GB VRAM.
# '+' prefix runs as root regardless of service User=.
systemd.services.llama-cpp.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "set-gpu-compute-timeout" ''
for f in /sys/class/drm/card*/engine/ccs*/preempt_timeout_ms; do
[ -w "$f" ] && echo 30000 > "$f"
done
for f in /sys/class/drm/card*/engine/ccs*/heartbeat_interval_ms; do
[ -w "$f" ] && echo 10000 > "$f"
done
''}"
];
# upstream module hardcodes --log-disable; override ExecStart to keep logs
# so we can see prompt processing progress via journalctl
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
"${cfg.package}/bin/llama-server"
+ " --host ${cfg.host}"
+ " --port ${toString cfg.port}"
+ " -m ${cfg.model}"
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
);
}