All checks were successful
Build and Deploy / deploy (push) Successful in 1m18s
66 lines
1.8 KiB
Nix
66 lines
1.8 KiB
Nix
{
|
|
pkgs,
|
|
service_configs,
|
|
config,
|
|
inputs,
|
|
lib,
|
|
utils,
|
|
...
|
|
}:
|
|
let
|
|
cfg = config.services.llama-cpp;
|
|
modelUrl = "https://huggingface.co/unsloth/gemma-4-E4B-it-GGUF/resolve/main/gemma-4-E4B-it-Q4_K_M.gguf";
|
|
modelAlias = lib.removeSuffix ".gguf" (builtins.baseNameOf modelUrl);
|
|
in
|
|
{
|
|
services.llama-cpp = {
|
|
enable = true;
|
|
model = toString (
|
|
pkgs.fetchurl {
|
|
url = modelUrl;
|
|
sha256 = "sha256-4bxEJwn+eAqksuybIsFqf83/VC8X8B7Q4yAxFNKPnzQ=";
|
|
}
|
|
);
|
|
port = service_configs.ports.private.llama_cpp.port;
|
|
host = "0.0.0.0";
|
|
package = (lib.optimizePackage inputs.llamacpp.packages.${pkgs.system}.default);
|
|
extraFlags = [
|
|
# "-ngl"
|
|
# "12"
|
|
"-c"
|
|
"32768"
|
|
"-ctk"
|
|
"q8_0"
|
|
"-ctv"
|
|
"turbo4"
|
|
"-fa"
|
|
"on"
|
|
"--api-key-file"
|
|
config.age.secrets.llama-cpp-api-key.path
|
|
"--metrics"
|
|
"--alias"
|
|
modelAlias
|
|
];
|
|
};
|
|
|
|
# have to do this in order to get vulkan to work
|
|
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
|
|
|
|
# upstream module hardcodes --log-disable; override ExecStart to keep logs
|
|
# so we can see prompt processing progress via journalctl
|
|
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
|
|
"${cfg.package}/bin/llama-server"
|
|
+ " --host ${cfg.host}"
|
|
+ " --port ${toString cfg.port}"
|
|
+ " -m ${cfg.model}"
|
|
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
|
|
);
|
|
|
|
# Auth handled by llama-cpp --api-key-file (Bearer token).
|
|
# No caddy_auth — the API key is the auth layer, and caddy_auth's basic
|
|
# auth would block Bearer-only clients like oh-my-pi.
|
|
services.caddy.virtualHosts."llm.${service_configs.https.domain}".extraConfig = ''
|
|
reverse_proxy :${toString config.services.llama-cpp.port}
|
|
'';
|
|
}
|