llama-cpp: do logging
All checks were successful
Build and Deploy / deploy (push) Successful in 2m27s
All checks were successful
Build and Deploy / deploy (push) Successful in 2m27s
This commit is contained in:
@@ -4,8 +4,12 @@
|
|||||||
config,
|
config,
|
||||||
inputs,
|
inputs,
|
||||||
lib,
|
lib,
|
||||||
|
utils,
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.llama-cpp;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
services.llama-cpp = {
|
services.llama-cpp = {
|
||||||
enable = true;
|
enable = true;
|
||||||
@@ -37,6 +41,16 @@
|
|||||||
# have to do this in order to get vulkan to work
|
# have to do this in order to get vulkan to work
|
||||||
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
|
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
|
||||||
|
|
||||||
|
# upstream module hardcodes --log-disable; override ExecStart to keep logs
|
||||||
|
# so we can see prompt processing progress via journalctl
|
||||||
|
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
|
||||||
|
"${cfg.package}/bin/llama-server"
|
||||||
|
+ " --host ${cfg.host}"
|
||||||
|
+ " --port ${toString cfg.port}"
|
||||||
|
+ " -m ${cfg.model}"
|
||||||
|
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
|
||||||
|
);
|
||||||
|
|
||||||
# Auth handled by llama-cpp --api-key-file (Bearer token).
|
# Auth handled by llama-cpp --api-key-file (Bearer token).
|
||||||
# No caddy_auth — the API key is the auth layer, and caddy_auth's basic
|
# No caddy_auth — the API key is the auth layer, and caddy_auth's basic
|
||||||
# auth would block Bearer-only clients like oh-my-pi.
|
# auth would block Bearer-only clients like oh-my-pi.
|
||||||
|
|||||||
Reference in New Issue
Block a user