phase 2: promote services/, tests/, patches/, lib/, scripts/
This commit is contained in:
@@ -1,287 +0,0 @@
|
||||
{
|
||||
inputs,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
inputs.nixpkgs.lib.extend (
|
||||
final: prev:
|
||||
let
|
||||
lib = prev;
|
||||
in
|
||||
{
|
||||
optimizeWithFlags =
|
||||
pkg: flags:
|
||||
pkg.overrideAttrs (old: {
|
||||
env = (old.env or { }) // {
|
||||
NIX_CFLAGS_COMPILE =
|
||||
(old.env.NIX_CFLAGS_COMPILE or old.NIX_CFLAGS_COMPILE or "")
|
||||
+ " "
|
||||
+ (lib.concatStringsSep " " flags);
|
||||
};
|
||||
});
|
||||
|
||||
optimizePackage =
|
||||
pkg:
|
||||
final.optimizeWithFlags pkg [
|
||||
"-O3"
|
||||
"-march=${service_configs.cpu_arch}"
|
||||
"-mtune=${service_configs.cpu_arch}"
|
||||
];
|
||||
|
||||
vpnNamespaceOpenPort =
|
||||
port: service:
|
||||
{ ... }:
|
||||
{
|
||||
vpnNamespaces.wg = {
|
||||
portMappings = [
|
||||
{
|
||||
from = port;
|
||||
to = port;
|
||||
}
|
||||
];
|
||||
|
||||
openVPNPorts = [
|
||||
{
|
||||
port = port;
|
||||
protocol = "both";
|
||||
}
|
||||
];
|
||||
};
|
||||
systemd.services.${service}.vpnConfinement = {
|
||||
enable = true;
|
||||
vpnNamespace = "wg";
|
||||
};
|
||||
};
|
||||
|
||||
serviceMountWithZpool =
|
||||
serviceName: zpool: dirs:
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
systemd.services."${serviceName}-mounts" = {
|
||||
wants = [
|
||||
"zfs.target"
|
||||
"zfs-mount.service"
|
||||
]
|
||||
++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
||||
after = [ "zfs-mount.service" ] ++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
||||
before = [ "${serviceName}.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = [
|
||||
(lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "ensure-zfs-mounts-with-pool-${serviceName}-${zpool}";
|
||||
runtimeInputs = with pkgs; [
|
||||
gawk
|
||||
coreutils
|
||||
config.boot.zfs.package
|
||||
];
|
||||
|
||||
text = ''
|
||||
set -euo pipefail
|
||||
|
||||
echo "Ensuring ZFS mounts for service: ${serviceName} (pool: ${zpool})"
|
||||
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
||||
|
||||
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
||||
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
||||
|
||||
# Additional runtime check: verify paths are on correct zpool
|
||||
${lib.optionalString (zpool != "") ''
|
||||
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
||||
|
||||
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
||||
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2043
|
||||
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
||||
echo "Checking: $target"
|
||||
|
||||
# Find dataset that has this mountpoint
|
||||
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
||||
|
||||
if [ -z "$dataset" ]; then
|
||||
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract pool name from dataset (first part before /)
|
||||
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
||||
|
||||
if [ "$actual_pool" != "${zpool}" ]; then
|
||||
echo "ERROR: ZFS pool mismatch for $target" >&2
|
||||
echo " Expected pool: ${zpool}" >&2
|
||||
echo " Actual pool: $actual_pool" >&2
|
||||
echo " Dataset: $dataset" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$target is on $dataset (pool: $actual_pool)"
|
||||
done
|
||||
|
||||
echo "All paths verified successfully on pool '${zpool}'"
|
||||
''}
|
||||
|
||||
echo "Mount validation completed for ${serviceName} (pool: ${zpool})"
|
||||
'';
|
||||
}
|
||||
))
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.${serviceName} = {
|
||||
wants = [
|
||||
"${serviceName}-mounts.service"
|
||||
];
|
||||
after = [
|
||||
"${serviceName}-mounts.service"
|
||||
];
|
||||
requires = [
|
||||
"${serviceName}-mounts.service"
|
||||
];
|
||||
};
|
||||
|
||||
# assert that the pool is even enabled
|
||||
#assertions = lib.optionals (zpool != "") [
|
||||
# {
|
||||
# assertion = builtins.elem zpool config.boot.zfs.extraPools;
|
||||
# message = "${zpool} is not enabled in `boot.zfs.extraPools`";
|
||||
# }
|
||||
#];
|
||||
};
|
||||
|
||||
serviceFilePerms =
|
||||
serviceName: tmpfilesRules:
|
||||
{ pkgs, ... }:
|
||||
let
|
||||
confFile = pkgs.writeText "${serviceName}-file-perms.conf" (
|
||||
lib.concatStringsSep "\n" tmpfilesRules
|
||||
);
|
||||
in
|
||||
{
|
||||
systemd.services."${serviceName}-file-perms" = {
|
||||
after = [ "${serviceName}-mounts.service" ];
|
||||
before = [ "${serviceName}.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = "${pkgs.systemd}/bin/systemd-tmpfiles --create ${confFile}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.${serviceName} = {
|
||||
wants = [ "${serviceName}-file-perms.service" ];
|
||||
after = [ "${serviceName}-file-perms.service" ];
|
||||
};
|
||||
};
|
||||
# Creates a Caddy virtualHost with reverse_proxy to a local or VPN-namespaced port.
|
||||
# Use `subdomain` for "<name>.${domain}" or `domain` for a full custom domain.
|
||||
# Exactly one of `subdomain` or `domain` must be provided.
|
||||
mkCaddyReverseProxy =
|
||||
{
|
||||
subdomain ? null,
|
||||
domain ? null,
|
||||
port,
|
||||
auth ? false,
|
||||
vpn ? false,
|
||||
}:
|
||||
assert (subdomain != null) != (domain != null);
|
||||
{ config, ... }:
|
||||
let
|
||||
vhostDomain = if domain != null then domain else "${subdomain}.${service_configs.https.domain}";
|
||||
upstream =
|
||||
if vpn then
|
||||
"${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString port}"
|
||||
else
|
||||
":${builtins.toString port}";
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${vhostDomain}".extraConfig = lib.concatStringsSep "\n" (
|
||||
lib.optional auth "import ${config.age.secrets.caddy_auth.path}" ++ [ "reverse_proxy ${upstream}" ]
|
||||
);
|
||||
};
|
||||
|
||||
# Creates a fail2ban jail with systemd journal backend.
|
||||
# Covers the common pattern: journal-based detection, http/https ports, default thresholds.
|
||||
mkFail2banJail =
|
||||
{
|
||||
name,
|
||||
unitName ? "${name}.service",
|
||||
failregex,
|
||||
}:
|
||||
{ ... }:
|
||||
{
|
||||
services.fail2ban.jails.${name} = {
|
||||
enabled = true;
|
||||
settings = {
|
||||
backend = "systemd";
|
||||
port = "http,https";
|
||||
# defaults: maxretry=5, findtime=10m, bantime=10m
|
||||
};
|
||||
filter.Definition = {
|
||||
inherit failregex;
|
||||
ignoreregex = "";
|
||||
journalmatch = "_SYSTEMD_UNIT=${unitName}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Creates a hardened Grafana annotation daemon service.
|
||||
# Provides DynamicUser, sandboxing, state directory, and GRAFANA_URL/STATE_FILE automatically.
|
||||
mkGrafanaAnnotationService =
|
||||
{
|
||||
name,
|
||||
description,
|
||||
script,
|
||||
after ? [ ],
|
||||
environment ? { },
|
||||
loadCredential ? null,
|
||||
}:
|
||||
{
|
||||
systemd.services."${name}-annotations" = {
|
||||
inherit description;
|
||||
after = [
|
||||
"network.target"
|
||||
"grafana.service"
|
||||
]
|
||||
++ after;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${script}";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
DynamicUser = true;
|
||||
StateDirectory = "${name}-annotations";
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
MemoryDenyWriteExecute = true;
|
||||
}
|
||||
// lib.optionalAttrs (loadCredential != null) {
|
||||
LoadCredential = loadCredential;
|
||||
};
|
||||
environment = {
|
||||
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
||||
STATE_FILE = "/var/lib/${name}-annotations/state.json";
|
||||
}
|
||||
// environment;
|
||||
};
|
||||
};
|
||||
|
||||
# Shell command to extract an API key from an *arr config.xml file.
|
||||
# Returns a string suitable for $() command substitution in shell scripts.
|
||||
extractArrApiKey =
|
||||
configXmlPath: "${lib.getExe pkgs.gnugrep} -oP '(?<=<ApiKey>)[^<]+' ${configXmlPath}";
|
||||
}
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
final: prev: {
|
||||
ensureZfsMounts = prev.writeShellApplication {
|
||||
name = "zfsEnsureMounted";
|
||||
runtimeInputs = with prev; [
|
||||
zfs
|
||||
gawk
|
||||
coreutils
|
||||
];
|
||||
|
||||
text = ''
|
||||
#!/bin/sh
|
||||
|
||||
if [[ "$#" -eq "0" ]]; then
|
||||
echo "no arguments passed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MOUNTED=$(zfs list -o mountpoint,mounted -H | awk '$NF == "yes" {NF--; print}')
|
||||
|
||||
MISSING=""
|
||||
for target in "$@"; do
|
||||
if ! grep -Fxq "$target" <<< "$MOUNTED"; then
|
||||
MISSING="$MISSING $target"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -n "$MISSING" ]]; then
|
||||
echo "FAILURE, missing:$MISSING" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
reflac = prev.writeShellApplication {
|
||||
name = "reflac";
|
||||
runtimeInputs = with prev; [ flac ];
|
||||
excludeShellChecks = [ "2086" ];
|
||||
|
||||
text = builtins.readFile (
|
||||
prev.fetchurl {
|
||||
url = "https://raw.githubusercontent.com/chungy/reflac/refs/heads/master/reflac";
|
||||
sha256 = "61c6cc8be3d276c6714e68b55e5de0e6491f50bbf195233073dbce14a1e278a7";
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
jellyfin-exporter = prev.buildGoModule rec {
|
||||
pname = "jellyfin-exporter";
|
||||
version = "unstable-2025-03-27";
|
||||
src = prev.fetchFromGitHub {
|
||||
owner = "rebelcore";
|
||||
repo = "jellyfin_exporter";
|
||||
rev = "8e3970cb1bdf3cb21fac099c13072bb7c1b20cf9";
|
||||
hash = "sha256-wDnhepYj1MyLRZlwKfmwf4xiEEL3mgQY6V+7TnBd0MY=";
|
||||
};
|
||||
vendorHash = "sha256-e08u10e/wNapNZSsD/fGVN9ybMHe3sW0yDIOqI8ZcYs=";
|
||||
# upstream tests require a running Jellyfin instance
|
||||
doCheck = false;
|
||||
meta.mainProgram = "jellyfin_exporter";
|
||||
};
|
||||
|
||||
igpu-exporter = prev.buildGoModule rec {
|
||||
pname = "igpu-exporter";
|
||||
version = "unstable-2025-03-27";
|
||||
src = prev.fetchFromGitHub {
|
||||
owner = "mike1808";
|
||||
repo = "igpu-exporter";
|
||||
rev = "db2dace1a895c2b950f6d3ba1a2e46729251d124";
|
||||
hash = "sha256-xWTiu26UzTZIK/6jeda+x6VePUgoWTS0AekejFdgFWs=";
|
||||
};
|
||||
vendorHash = "sha256-oeCSKwDKVwvYQ1fjXXTwQSXNl/upDE3WAAk680vqh3U=";
|
||||
subPackages = [ "cmd" ];
|
||||
postInstall = ''
|
||||
mv $out/bin/cmd $out/bin/igpu-exporter
|
||||
'';
|
||||
meta.mainProgram = "igpu-exporter";
|
||||
};
|
||||
}
|
||||
@@ -1,379 +0,0 @@
|
||||
From ab57092a60123e361cf0de1c1a314a9888c45219 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Gardling <titaniumtown@proton.me>
|
||||
Date: Sat, 21 Mar 2026 09:24:39 -0400
|
||||
Subject: [PATCH] temp
|
||||
|
||||
---
|
||||
.../services/networking/firefox-syncserver.md | 23 +++
|
||||
.../networking/firefox-syncserver.nix | 140 ++++++++++++++----
|
||||
pkgs/by-name/sy/syncstorage-rs/package.nix | 49 ++++--
|
||||
3 files changed, 174 insertions(+), 38 deletions(-)
|
||||
|
||||
diff --git a/nixos/modules/services/networking/firefox-syncserver.md b/nixos/modules/services/networking/firefox-syncserver.md
|
||||
index 991e97f799d6..3bc45cfa5640 100644
|
||||
--- a/nixos/modules/services/networking/firefox-syncserver.md
|
||||
+++ b/nixos/modules/services/networking/firefox-syncserver.md
|
||||
@@ -32,6 +32,29 @@ This configuration should never be used in production. It is not encrypted and
|
||||
stores its secrets in a world-readable location.
|
||||
:::
|
||||
|
||||
+## Database backends {#module-services-firefox-syncserver-database}
|
||||
+
|
||||
+The sync server supports MySQL/MariaDB (the default) and PostgreSQL as database
|
||||
+backends. Set `database.type` to choose the backend:
|
||||
+
|
||||
+```nix
|
||||
+{
|
||||
+ services.firefox-syncserver = {
|
||||
+ enable = true;
|
||||
+ database.type = "postgresql";
|
||||
+ secrets = "/run/secrets/firefox-syncserver";
|
||||
+ singleNode = {
|
||||
+ enable = true;
|
||||
+ hostname = "localhost";
|
||||
+ url = "http://localhost:5000";
|
||||
+ };
|
||||
+ };
|
||||
+}
|
||||
+```
|
||||
+
|
||||
+When `database.createLocally` is `true` (the default), the module will
|
||||
+automatically enable and configure the corresponding database service.
|
||||
+
|
||||
## More detailed setup {#module-services-firefox-syncserver-configuration}
|
||||
|
||||
The `firefox-syncserver` service provides a number of options to make setting up
|
||||
diff --git a/nixos/modules/services/networking/firefox-syncserver.nix b/nixos/modules/services/networking/firefox-syncserver.nix
|
||||
index 6a50e49fc096..70a56314e323 100644
|
||||
--- a/nixos/modules/services/networking/firefox-syncserver.nix
|
||||
+++ b/nixos/modules/services/networking/firefox-syncserver.nix
|
||||
@@ -13,7 +13,21 @@ let
|
||||
defaultUser = "firefox-syncserver";
|
||||
|
||||
dbIsLocal = cfg.database.host == "localhost";
|
||||
- dbURL = "mysql://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?socket=/run/mysqld/mysqld.sock"}";
|
||||
+ dbIsMySQL = cfg.database.type == "mysql";
|
||||
+ dbIsPostgreSQL = cfg.database.type == "postgresql";
|
||||
+
|
||||
+ dbURL =
|
||||
+ if dbIsMySQL then
|
||||
+ "mysql://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?socket=/run/mysqld/mysqld.sock"}"
|
||||
+ else
|
||||
+ "postgres://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?host=/run/postgresql"}";
|
||||
+
|
||||
+ # postgresql.target waits for postgresql-setup.service (which runs
|
||||
+ # ensureDatabases / ensureUsers) to complete, avoiding race conditions
|
||||
+ # where the syncserver starts before its database and role exist.
|
||||
+ dbService = if dbIsMySQL then "mysql.service" else "postgresql.target";
|
||||
+
|
||||
+ syncserver = cfg.package.override { dbBackend = cfg.database.type; };
|
||||
|
||||
format = pkgs.formats.toml { };
|
||||
settings = {
|
||||
@@ -22,7 +36,7 @@ let
|
||||
database_url = dbURL;
|
||||
};
|
||||
tokenserver = {
|
||||
- node_type = "mysql";
|
||||
+ node_type = if dbIsMySQL then "mysql" else "postgres";
|
||||
database_url = dbURL;
|
||||
fxa_email_domain = "api.accounts.firefox.com";
|
||||
fxa_oauth_server_url = "https://oauth.accounts.firefox.com/v1";
|
||||
@@ -41,7 +55,8 @@ let
|
||||
};
|
||||
};
|
||||
configFile = format.generate "syncstorage.toml" (lib.recursiveUpdate settings cfg.settings);
|
||||
- setupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
|
||||
+
|
||||
+ mysqlSetupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
|
||||
set -euo pipefail
|
||||
shopt -s inherit_errexit
|
||||
|
||||
@@ -79,6 +94,47 @@ let
|
||||
echo "Single-node setup failed"
|
||||
exit 1
|
||||
'';
|
||||
+
|
||||
+ postgresqlSetupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
|
||||
+ set -euo pipefail
|
||||
+ shopt -s inherit_errexit
|
||||
+
|
||||
+ schema_configured() {
|
||||
+ psql -d ${cfg.database.name} -tAc "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'services')" | grep -q t
|
||||
+ }
|
||||
+
|
||||
+ update_config() {
|
||||
+ psql -d ${cfg.database.name} <<'EOF'
|
||||
+ BEGIN;
|
||||
+
|
||||
+ INSERT INTO services (id, service, pattern)
|
||||
+ VALUES (1, 'sync-1.5', '{node}/1.5/{uid}')
|
||||
+ ON CONFLICT (id) DO UPDATE SET service = 'sync-1.5', pattern = '{node}/1.5/{uid}';
|
||||
+ INSERT INTO nodes (id, service, node, available, current_load,
|
||||
+ capacity, downed, backoff)
|
||||
+ VALUES (1, 1, '${cfg.singleNode.url}', ${toString cfg.singleNode.capacity},
|
||||
+ 0, ${toString cfg.singleNode.capacity}, 0, 0)
|
||||
+ ON CONFLICT (id) DO UPDATE SET node = '${cfg.singleNode.url}', capacity = ${toString cfg.singleNode.capacity};
|
||||
+
|
||||
+ COMMIT;
|
||||
+ EOF
|
||||
+ }
|
||||
+
|
||||
+
|
||||
+ for (( try = 0; try < 60; try++ )); do
|
||||
+ if ! schema_configured; then
|
||||
+ sleep 2
|
||||
+ else
|
||||
+ update_config
|
||||
+ exit 0
|
||||
+ fi
|
||||
+ done
|
||||
+
|
||||
+ echo "Single-node setup failed"
|
||||
+ exit 1
|
||||
+ '';
|
||||
+
|
||||
+ setupScript = if dbIsMySQL then mysqlSetupScript else postgresqlSetupScript;
|
||||
in
|
||||
|
||||
{
|
||||
@@ -88,25 +144,26 @@ in
|
||||
the Firefox Sync storage service.
|
||||
|
||||
Out of the box this will not be very useful unless you also configure at least
|
||||
- one service and one nodes by inserting them into the mysql database manually, e.g.
|
||||
- by running
|
||||
-
|
||||
- ```
|
||||
- INSERT INTO `services` (`id`, `service`, `pattern`) VALUES ('1', 'sync-1.5', '{node}/1.5/{uid}');
|
||||
- INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`,
|
||||
- `capacity`, `downed`, `backoff`)
|
||||
- VALUES ('1', '1', 'https://mydomain.tld', '1', '0', '10', '0', '0');
|
||||
- ```
|
||||
+ one service and one nodes by inserting them into the database manually, e.g.
|
||||
+ by running the equivalent SQL for your database backend.
|
||||
|
||||
{option}`${opt.singleNode.enable}` does this automatically when enabled
|
||||
'';
|
||||
|
||||
package = lib.mkPackageOption pkgs "syncstorage-rs" { };
|
||||
|
||||
+ database.type = lib.mkOption {
|
||||
+ type = lib.types.enum [
|
||||
+ "mysql"
|
||||
+ "postgresql"
|
||||
+ ];
|
||||
+ default = "mysql";
|
||||
+ description = ''
|
||||
+ Which database backend to use for storage.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
database.name = lib.mkOption {
|
||||
- # the mysql module does not allow `-quoting without resorting to shell
|
||||
- # escaping, so we restrict db names for forward compaitiblity should this
|
||||
- # behavior ever change.
|
||||
type = lib.types.strMatching "[a-z_][a-z0-9_]*";
|
||||
default = defaultDatabase;
|
||||
description = ''
|
||||
@@ -117,9 +174,15 @@ in
|
||||
|
||||
database.user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
- default = defaultUser;
|
||||
+ default = if dbIsPostgreSQL then defaultDatabase else defaultUser;
|
||||
+ defaultText = lib.literalExpression ''
|
||||
+ if database.type == "postgresql" then "${defaultDatabase}" else "${defaultUser}"
|
||||
+ '';
|
||||
description = ''
|
||||
- Username for database connections.
|
||||
+ Username for database connections. When using PostgreSQL with
|
||||
+ `createLocally`, this defaults to the database name so that
|
||||
+ `ensureDBOwnership` works (it requires user and database names
|
||||
+ to match).
|
||||
'';
|
||||
};
|
||||
|
||||
@@ -137,7 +200,8 @@ in
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to create database and user on the local machine if they do not exist.
|
||||
- This includes enabling unix domain socket authentication for the configured user.
|
||||
+ This includes enabling the configured database service and setting up
|
||||
+ authentication for the configured user.
|
||||
'';
|
||||
};
|
||||
|
||||
@@ -237,7 +301,7 @@ in
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
- services.mysql = lib.mkIf cfg.database.createLocally {
|
||||
+ services.mysql = lib.mkIf (cfg.database.createLocally && dbIsMySQL) {
|
||||
enable = true;
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
ensureUsers = [
|
||||
@@ -250,16 +314,27 @@ in
|
||||
];
|
||||
};
|
||||
|
||||
+ services.postgresql = lib.mkIf (cfg.database.createLocally && dbIsPostgreSQL) {
|
||||
+ enable = true;
|
||||
+ ensureDatabases = [ cfg.database.name ];
|
||||
+ ensureUsers = [
|
||||
+ {
|
||||
+ name = cfg.database.user;
|
||||
+ ensureDBOwnership = true;
|
||||
+ }
|
||||
+ ];
|
||||
+ };
|
||||
+
|
||||
systemd.services.firefox-syncserver = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
- requires = lib.mkIf dbIsLocal [ "mysql.service" ];
|
||||
- after = lib.mkIf dbIsLocal [ "mysql.service" ];
|
||||
+ requires = lib.mkIf dbIsLocal [ dbService ];
|
||||
+ after = lib.mkIf dbIsLocal [ dbService ];
|
||||
restartTriggers = lib.optional cfg.singleNode.enable setupScript;
|
||||
environment.RUST_LOG = cfg.logLevel;
|
||||
serviceConfig = {
|
||||
- User = defaultUser;
|
||||
- Group = defaultUser;
|
||||
- ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
|
||||
+ User = cfg.database.user;
|
||||
+ Group = cfg.database.user;
|
||||
+ ExecStart = "${syncserver}/bin/syncserver --config ${configFile}";
|
||||
EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
|
||||
|
||||
# hardening
|
||||
@@ -303,10 +378,19 @@ in
|
||||
|
||||
systemd.services.firefox-syncserver-setup = lib.mkIf cfg.singleNode.enable {
|
||||
wantedBy = [ "firefox-syncserver.service" ];
|
||||
- requires = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
|
||||
- after = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
|
||||
- path = [ config.services.mysql.package ];
|
||||
- serviceConfig.ExecStart = [ "${setupScript}" ];
|
||||
+ requires = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal dbService;
|
||||
+ after = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal dbService;
|
||||
+ path =
|
||||
+ if dbIsMySQL then [ config.services.mysql.package ] else [ config.services.postgresql.package ];
|
||||
+ serviceConfig = {
|
||||
+ ExecStart = [ "${setupScript}" ];
|
||||
+ }
|
||||
+ // lib.optionalAttrs dbIsPostgreSQL {
|
||||
+ # PostgreSQL peer authentication requires the system user to match the
|
||||
+ # database user. Run as the superuser so we can access all databases.
|
||||
+ User = "postgres";
|
||||
+ Group = "postgres";
|
||||
+ };
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts = lib.mkIf cfg.singleNode.enableNginx {
|
||||
diff --git a/pkgs/by-name/sy/syncstorage-rs/package.nix b/pkgs/by-name/sy/syncstorage-rs/package.nix
|
||||
index 39b2b53ab03c..944ed72525af 100644
|
||||
--- a/pkgs/by-name/sy/syncstorage-rs/package.nix
|
||||
+++ b/pkgs/by-name/sy/syncstorage-rs/package.nix
|
||||
@@ -1,14 +1,18 @@
|
||||
{
|
||||
fetchFromGitHub,
|
||||
+ fetchurl,
|
||||
rustPlatform,
|
||||
pkg-config,
|
||||
python3,
|
||||
cmake,
|
||||
libmysqlclient,
|
||||
+ libpq,
|
||||
+ openssl,
|
||||
makeBinaryWrapper,
|
||||
lib,
|
||||
nix-update-script,
|
||||
nixosTests,
|
||||
+ dbBackend ? "mysql",
|
||||
}:
|
||||
|
||||
let
|
||||
@@ -19,17 +23,23 @@ let
|
||||
p.tokenlib
|
||||
p.cryptography
|
||||
]);
|
||||
+ # utoipa-swagger-ui downloads Swagger UI assets at build time.
|
||||
+ # Prefetch the archive for sandboxed builds.
|
||||
+ swaggerUi = fetchurl {
|
||||
+ url = "https://github.com/swagger-api/swagger-ui/archive/refs/tags/v5.17.14.zip";
|
||||
+ hash = "sha256-SBJE0IEgl7Efuu73n3HZQrFxYX+cn5UU5jrL4T5xzNw=";
|
||||
+ };
|
||||
in
|
||||
|
||||
-rustPlatform.buildRustPackage rec {
|
||||
+rustPlatform.buildRustPackage (finalAttrs: {
|
||||
pname = "syncstorage-rs";
|
||||
- version = "0.21.1-unstable-2026-01-26";
|
||||
+ version = "0.21.1-unstable-2026-02-24";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "mozilla-services";
|
||||
repo = "syncstorage-rs";
|
||||
- rev = "11659d98f9c69948a0aab353437ce2036c388711";
|
||||
- hash = "sha256-G37QvxTNh/C3gmKG0UYHI6QBr0F+KLGRNI/Sx33uOsc=";
|
||||
+ rev = "50a739b58dc9ec81995f86e71d992aa14ccc450e";
|
||||
+ hash = "sha256-idq0RGdwoV6GVuq36IVVVCFbyMTe8i/EpVWE59D/dhM=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
@@ -39,16 +49,35 @@ rustPlatform.buildRustPackage rec {
|
||||
python3
|
||||
];
|
||||
|
||||
- buildInputs = [
|
||||
- libmysqlclient
|
||||
- ];
|
||||
+ buildInputs =
|
||||
+ lib.optional (dbBackend == "mysql") libmysqlclient
|
||||
+ ++ lib.optionals (dbBackend == "postgresql") [
|
||||
+ libpq
|
||||
+ openssl
|
||||
+ ];
|
||||
+
|
||||
+ buildNoDefaultFeatures = true;
|
||||
+ # The syncserver "postgres" feature only enables syncstorage-db/postgres.
|
||||
+ # tokenserver-db/postgres must be enabled separately so the tokenserver
|
||||
+ # can also connect to PostgreSQL (it dispatches on the URL scheme at runtime).
|
||||
+ buildFeatures =
|
||||
+ let
|
||||
+ cargoFeature = if dbBackend == "postgresql" then "postgres" else dbBackend;
|
||||
+ in
|
||||
+ [
|
||||
+ cargoFeature
|
||||
+ "tokenserver-db/${cargoFeature}"
|
||||
+ "py_verifier"
|
||||
+ ];
|
||||
+
|
||||
+ SWAGGER_UI_DOWNLOAD_URL = "file://${swaggerUi}";
|
||||
|
||||
preFixup = ''
|
||||
wrapProgram $out/bin/syncserver \
|
||||
--prefix PATH : ${lib.makeBinPath [ pyFxADeps ]}
|
||||
'';
|
||||
|
||||
- cargoHash = "sha256-9Dcf5mDyK/XjsKTlCPXTHoBkIq+FFPDg1zfK24Y9nHQ=";
|
||||
+ cargoHash = "sha256-80EztkSX+SnmqsRWIXbChUB8AeV1Tp9WXoWNbDY8rUE=";
|
||||
|
||||
# almost all tests need a DB to test against
|
||||
doCheck = false;
|
||||
@@ -60,10 +89,10 @@ rustPlatform.buildRustPackage rec {
|
||||
meta = {
|
||||
description = "Mozilla Sync Storage built with Rust";
|
||||
homepage = "https://github.com/mozilla-services/syncstorage-rs";
|
||||
- changelog = "https://github.com/mozilla-services/syncstorage-rs/releases/tag/${version}";
|
||||
+ changelog = "https://github.com/mozilla-services/syncstorage-rs/releases/tag/${finalAttrs.version}";
|
||||
license = lib.licenses.mpl20;
|
||||
maintainers = [ ];
|
||||
platforms = lib.platforms.linux;
|
||||
mainProgram = "syncserver";
|
||||
};
|
||||
-}
|
||||
+})
|
||||
--
|
||||
2.53.0
|
||||
|
||||
@@ -1,443 +0,0 @@
|
||||
From f0582558f0a8b0ef543b3251c4a07afab89fde63 Mon Sep 17 00:00:00 2001
|
||||
From: Simon Gardling <titaniumtown@proton.me>
|
||||
Date: Fri, 17 Apr 2026 19:37:11 -0400
|
||||
Subject: [PATCH] nixos/jellyfin: add declarative network.xml options
|
||||
|
||||
Adds services.jellyfin.network.* (baseUrl, ports, IPv4/6, LAN subnets,
|
||||
known proxies, remote IP filter, etc.) and services.jellyfin.forceNetworkConfig,
|
||||
mirroring the existing hardwareAcceleration / forceEncodingConfig pattern.
|
||||
|
||||
Motivation: running Jellyfin behind a reverse proxy requires configuring
|
||||
KnownProxies (so the real client IP is extracted from X-Forwarded-For)
|
||||
and LocalNetworkSubnets (so LAN clients are correctly classified and not
|
||||
subject to RemoteClientBitrateLimit). These settings previously had no
|
||||
declarative option -- they could only be set via the web dashboard or
|
||||
by hand-editing network.xml, with no guarantee they would survive a
|
||||
reinstall or be consistent across deployments.
|
||||
|
||||
Implementation:
|
||||
- Adds a networkXmlText template alongside the existing encodingXmlText.
|
||||
- Factors the force-vs-soft install logic out of preStart into a
|
||||
small 'manage_config_xml' shell helper; encoding.xml and network.xml
|
||||
now share the same install/backup semantics.
|
||||
- Extends the VM test with a machineWithNetworkConfig node and a
|
||||
subtest that verifies the declared values land in network.xml,
|
||||
Jellyfin parses them at startup, and the backup-on-overwrite path
|
||||
works (same shape as the existing 'Force encoding config' subtest).
|
||||
---
|
||||
nixos/modules/services/misc/jellyfin.nix | 303 ++++++++++++++++++++---
|
||||
nixos/tests/jellyfin.nix | 50 ++++
|
||||
2 files changed, 317 insertions(+), 36 deletions(-)
|
||||
|
||||
diff --git a/nixos/modules/services/misc/jellyfin.nix b/nixos/modules/services/misc/jellyfin.nix
|
||||
index 5c08fc478e45..387da907c652 100644
|
||||
--- a/nixos/modules/services/misc/jellyfin.nix
|
||||
+++ b/nixos/modules/services/misc/jellyfin.nix
|
||||
@@ -26,8 +26,10 @@ let
|
||||
bool
|
||||
enum
|
||||
ints
|
||||
+ listOf
|
||||
nullOr
|
||||
path
|
||||
+ port
|
||||
str
|
||||
submodule
|
||||
;
|
||||
@@ -68,6 +70,41 @@ let
|
||||
</EncodingOptions>
|
||||
'';
|
||||
encodingXmlFile = pkgs.writeText "encoding.xml" encodingXmlText;
|
||||
+ stringListToXml =
|
||||
+ tag: items:
|
||||
+ if items == [ ] then
|
||||
+ "<${tag} />"
|
||||
+ else
|
||||
+ "<${tag}>\n ${
|
||||
+ concatMapStringsSep "\n " (item: "<string>${escapeXML item}</string>") items
|
||||
+ }\n </${tag}>";
|
||||
+ networkXmlText = ''
|
||||
+ <?xml version="1.0" encoding="utf-8"?>
|
||||
+ <NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
|
||||
+ <BaseUrl>${escapeXML cfg.network.baseUrl}</BaseUrl>
|
||||
+ <EnableHttps>${boolToString cfg.network.enableHttps}</EnableHttps>
|
||||
+ <RequireHttps>${boolToString cfg.network.requireHttps}</RequireHttps>
|
||||
+ <InternalHttpPort>${toString cfg.network.internalHttpPort}</InternalHttpPort>
|
||||
+ <InternalHttpsPort>${toString cfg.network.internalHttpsPort}</InternalHttpsPort>
|
||||
+ <PublicHttpPort>${toString cfg.network.publicHttpPort}</PublicHttpPort>
|
||||
+ <PublicHttpsPort>${toString cfg.network.publicHttpsPort}</PublicHttpsPort>
|
||||
+ <AutoDiscovery>${boolToString cfg.network.autoDiscovery}</AutoDiscovery>
|
||||
+ <EnableUPnP>${boolToString cfg.network.enableUPnP}</EnableUPnP>
|
||||
+ <EnableIPv4>${boolToString cfg.network.enableIPv4}</EnableIPv4>
|
||||
+ <EnableIPv6>${boolToString cfg.network.enableIPv6}</EnableIPv6>
|
||||
+ <EnableRemoteAccess>${boolToString cfg.network.enableRemoteAccess}</EnableRemoteAccess>
|
||||
+ ${stringListToXml "LocalNetworkSubnets" cfg.network.localNetworkSubnets}
|
||||
+ ${stringListToXml "LocalNetworkAddresses" cfg.network.localNetworkAddresses}
|
||||
+ ${stringListToXml "KnownProxies" cfg.network.knownProxies}
|
||||
+ <IgnoreVirtualInterfaces>${boolToString cfg.network.ignoreVirtualInterfaces}</IgnoreVirtualInterfaces>
|
||||
+ ${stringListToXml "VirtualInterfaceNames" cfg.network.virtualInterfaceNames}
|
||||
+ <EnablePublishedServerUriByRequest>${boolToString cfg.network.enablePublishedServerUriByRequest}</EnablePublishedServerUriByRequest>
|
||||
+ ${stringListToXml "PublishedServerUriBySubnet" cfg.network.publishedServerUriBySubnet}
|
||||
+ ${stringListToXml "RemoteIPFilter" cfg.network.remoteIPFilter}
|
||||
+ <IsRemoteIPFilterBlacklist>${boolToString cfg.network.isRemoteIPFilterBlacklist}</IsRemoteIPFilterBlacklist>
|
||||
+ </NetworkConfiguration>
|
||||
+ '';
|
||||
+ networkXmlFile = pkgs.writeText "network.xml" networkXmlText;
|
||||
codecListToType =
|
||||
desc: list:
|
||||
submodule {
|
||||
@@ -205,6 +242,196 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
+ network = {
|
||||
+ baseUrl = mkOption {
|
||||
+ type = str;
|
||||
+ default = "";
|
||||
+ example = "/jellyfin";
|
||||
+ description = ''
|
||||
+ Prefix added to Jellyfin's internal URLs when it sits behind a reverse proxy at a sub-path.
|
||||
+ Leave empty when Jellyfin is served at the root of its host.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ enableHttps = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = ''
|
||||
+ Serve HTTPS directly from Jellyfin. Usually unnecessary when terminating TLS in a reverse proxy.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ requireHttps = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = ''
|
||||
+ Redirect plaintext HTTP requests to HTTPS. Only meaningful when {option}`enableHttps` is true.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ internalHttpPort = mkOption {
|
||||
+ type = port;
|
||||
+ default = 8096;
|
||||
+ description = "TCP port Jellyfin binds for HTTP.";
|
||||
+ };
|
||||
+
|
||||
+ internalHttpsPort = mkOption {
|
||||
+ type = port;
|
||||
+ default = 8920;
|
||||
+ description = "TCP port Jellyfin binds for HTTPS. Only used when {option}`enableHttps` is true.";
|
||||
+ };
|
||||
+
|
||||
+ publicHttpPort = mkOption {
|
||||
+ type = port;
|
||||
+ default = 8096;
|
||||
+ description = "HTTP port Jellyfin advertises in server discovery responses and published URIs.";
|
||||
+ };
|
||||
+
|
||||
+ publicHttpsPort = mkOption {
|
||||
+ type = port;
|
||||
+ default = 8920;
|
||||
+ description = "HTTPS port Jellyfin advertises in server discovery responses and published URIs.";
|
||||
+ };
|
||||
+
|
||||
+ autoDiscovery = mkOption {
|
||||
+ type = bool;
|
||||
+ default = true;
|
||||
+ description = "Respond to LAN client auto-discovery broadcasts (UDP 7359).";
|
||||
+ };
|
||||
+
|
||||
+ enableUPnP = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = "Attempt to open the public ports on the router via UPnP.";
|
||||
+ };
|
||||
+
|
||||
+ enableIPv4 = mkOption {
|
||||
+ type = bool;
|
||||
+ default = true;
|
||||
+ description = "Listen on IPv4.";
|
||||
+ };
|
||||
+
|
||||
+ enableIPv6 = mkOption {
|
||||
+ type = bool;
|
||||
+ default = true;
|
||||
+ description = "Listen on IPv6.";
|
||||
+ };
|
||||
+
|
||||
+ enableRemoteAccess = mkOption {
|
||||
+ type = bool;
|
||||
+ default = true;
|
||||
+ description = ''
|
||||
+ Allow connections from clients outside the subnets listed in {option}`localNetworkSubnets`.
|
||||
+ When false, Jellyfin rejects non-local requests regardless of reverse proxy configuration.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ localNetworkSubnets = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ ];
|
||||
+ example = [
|
||||
+ "192.168.1.0/24"
|
||||
+ "10.0.0.0/8"
|
||||
+ ];
|
||||
+ description = ''
|
||||
+ CIDR ranges (or bare IPs) that Jellyfin classifies as the local network.
|
||||
+ Clients originating from these ranges -- as seen after {option}`knownProxies` X-Forwarded-For
|
||||
+ unwrapping -- are not subject to {option}`services.jellyfin` remote-client bitrate limits.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ localNetworkAddresses = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ ];
|
||||
+ example = [ "192.168.1.50" ];
|
||||
+ description = ''
|
||||
+ Specific interface addresses Jellyfin binds to. Leave empty to bind all interfaces.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ knownProxies = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ ];
|
||||
+ example = [ "127.0.0.1" ];
|
||||
+ description = ''
|
||||
+ Addresses of reverse proxies trusted to forward the real client IP via `X-Forwarded-For`.
|
||||
+ Without this, Jellyfin sees the proxy's address for every request and cannot apply
|
||||
+ {option}`localNetworkSubnets` classification to the true client.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ ignoreVirtualInterfaces = mkOption {
|
||||
+ type = bool;
|
||||
+ default = true;
|
||||
+ description = "Skip virtual network interfaces (matching {option}`virtualInterfaceNames`) during auto-bind.";
|
||||
+ };
|
||||
+
|
||||
+ virtualInterfaceNames = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ "veth" ];
|
||||
+ description = "Interface name prefixes treated as virtual when {option}`ignoreVirtualInterfaces` is true.";
|
||||
+ };
|
||||
+
|
||||
+ enablePublishedServerUriByRequest = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = ''
|
||||
+ Derive the server's public URI from the incoming request's Host header instead of any
|
||||
+ configured {option}`publishedServerUriBySubnet` entry.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ publishedServerUriBySubnet = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ ];
|
||||
+ example = [ "192.168.1.0/24=http://jellyfin.lan:8096" ];
|
||||
+ description = ''
|
||||
+ Per-subnet overrides for the URI Jellyfin advertises to clients, in `subnet=uri` form.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ remoteIPFilter = mkOption {
|
||||
+ type = listOf str;
|
||||
+ default = [ ];
|
||||
+ example = [ "203.0.113.0/24" ];
|
||||
+ description = ''
|
||||
+ IPs or CIDRs used as the allow- or denylist for remote access.
|
||||
+ Behaviour is controlled by {option}`isRemoteIPFilterBlacklist`.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
+ isRemoteIPFilterBlacklist = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = ''
|
||||
+ When true, {option}`remoteIPFilter` is a denylist; when false, it is an allowlist
|
||||
+ (and an empty list allows all remote addresses).
|
||||
+ '';
|
||||
+ };
|
||||
+ };
|
||||
+
|
||||
+ forceNetworkConfig = mkOption {
|
||||
+ type = bool;
|
||||
+ default = false;
|
||||
+ description = ''
|
||||
+ Whether to overwrite Jellyfin's `network.xml` configuration file on each service start.
|
||||
+
|
||||
+ When enabled, the network configuration specified in {option}`services.jellyfin.network`
|
||||
+ is applied on every service restart. A backup of the existing `network.xml` will be
|
||||
+ created at `network.xml.backup-$timestamp`.
|
||||
+
|
||||
+ ::: {.warning}
|
||||
+ Enabling this option means that any changes made to networking settings through
|
||||
+ Jellyfin's web dashboard will be lost on the next service restart. The NixOS configuration
|
||||
+ becomes the single source of truth for network settings.
|
||||
+ :::
|
||||
+
|
||||
+ When disabled (the default), the network configuration is only written if no `network.xml`
|
||||
+ exists yet. This allows settings to be changed through Jellyfin's web dashboard and persist
|
||||
+ across restarts, but means the NixOS configuration options will be ignored after the initial setup.
|
||||
+ '';
|
||||
+ };
|
||||
+
|
||||
transcoding = {
|
||||
maxConcurrentStreams = mkOption {
|
||||
type = nullOr ints.positive;
|
||||
@@ -384,46 +611,50 @@ in
|
||||
wants = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
- preStart = mkIf cfg.hardwareAcceleration.enable (
|
||||
- ''
|
||||
- configDir=${escapeShellArg cfg.configDir}
|
||||
- encodingXml="$configDir/encoding.xml"
|
||||
- ''
|
||||
- + (
|
||||
- if cfg.forceEncodingConfig then
|
||||
- ''
|
||||
- if [[ -e $encodingXml ]]; then
|
||||
+ preStart =
|
||||
+ let
|
||||
+ # manage_config_xml <source> <destination> <force> <description>
|
||||
+ #
|
||||
+ # Installs a NixOS-declared XML config at <destination>, preserving
|
||||
+ # any existing file as a timestamped backup when <force> is true.
|
||||
+ # With <force>=false, leaves existing files untouched and warns if
|
||||
+ # the on-disk content differs from the declared content.
|
||||
+ helper = ''
|
||||
+ manage_config_xml() {
|
||||
+ local src="$1" dest="$2" force="$3" desc="$4"
|
||||
+ if [[ -e "$dest" ]]; then
|
||||
# this intentionally removes trailing newlines
|
||||
- currentText="$(<"$encodingXml")"
|
||||
- configuredText="$(<${encodingXmlFile})"
|
||||
- if [[ $currentText == "$configuredText" ]]; then
|
||||
- # don't need to do anything
|
||||
- exit 0
|
||||
- else
|
||||
- encodingXmlBackup="$configDir/encoding.xml.backup-$(date -u +"%FT%H_%M_%SZ")"
|
||||
- mv --update=none-fail -T "$encodingXml" "$encodingXmlBackup"
|
||||
+ local currentText configuredText
|
||||
+ currentText="$(<"$dest")"
|
||||
+ configuredText="$(<"$src")"
|
||||
+ if [[ "$currentText" == "$configuredText" ]]; then
|
||||
+ return 0
|
||||
fi
|
||||
- fi
|
||||
- cp --update=none-fail -T ${encodingXmlFile} "$encodingXml"
|
||||
- chmod u+w "$encodingXml"
|
||||
- ''
|
||||
- else
|
||||
- ''
|
||||
- if [[ -e $encodingXml ]]; then
|
||||
- # this intentionally removes trailing newlines
|
||||
- currentText="$(<"$encodingXml")"
|
||||
- configuredText="$(<${encodingXmlFile})"
|
||||
- if [[ $currentText != "$configuredText" ]]; then
|
||||
- echo "WARN: $encodingXml already exists and is different from the configured settings. transcoding options NOT applied." >&2
|
||||
- echo "WARN: Set config.services.jellyfin.forceEncodingConfig = true to override." >&2
|
||||
+ if [[ "$force" == true ]]; then
|
||||
+ local backup
|
||||
+ backup="$dest.backup-$(date -u +"%FT%H_%M_%SZ")"
|
||||
+ mv --update=none-fail -T "$dest" "$backup"
|
||||
+ else
|
||||
+ echo "WARN: $dest already exists and is different from the configured settings. $desc options NOT applied." >&2
|
||||
+ echo "WARN: Set the corresponding force*Config option to override." >&2
|
||||
+ return 0
|
||||
fi
|
||||
- else
|
||||
- cp --update=none-fail -T ${encodingXmlFile} "$encodingXml"
|
||||
- chmod u+w "$encodingXml"
|
||||
fi
|
||||
- ''
|
||||
- )
|
||||
- );
|
||||
+ cp --update=none-fail -T "$src" "$dest"
|
||||
+ chmod u+w "$dest"
|
||||
+ }
|
||||
+ configDir=${escapeShellArg cfg.configDir}
|
||||
+ '';
|
||||
+ in
|
||||
+ (
|
||||
+ helper
|
||||
+ + optionalString cfg.hardwareAcceleration.enable ''
|
||||
+ manage_config_xml ${encodingXmlFile} "$configDir/encoding.xml" ${boolToString cfg.forceEncodingConfig} transcoding
|
||||
+ ''
|
||||
+ + ''
|
||||
+ manage_config_xml ${networkXmlFile} "$configDir/network.xml" ${boolToString cfg.forceNetworkConfig} network
|
||||
+ ''
|
||||
+ );
|
||||
|
||||
# This is mostly follows: https://github.com/jellyfin/jellyfin/blob/master/fedora/jellyfin.service
|
||||
# Upstream also disable some hardenings when running in LXC, we do the same with the isContainer option
|
||||
diff --git a/nixos/tests/jellyfin.nix b/nixos/tests/jellyfin.nix
|
||||
index 4896c13d4eca..0c9191960f78 100644
|
||||
--- a/nixos/tests/jellyfin.nix
|
||||
+++ b/nixos/tests/jellyfin.nix
|
||||
@@ -63,6 +63,26 @@
|
||||
environment.systemPackages = with pkgs; [ ffmpeg ];
|
||||
virtualisation.diskSize = 3 * 1024;
|
||||
};
|
||||
+
|
||||
+ machineWithNetworkConfig = {
|
||||
+ services.jellyfin = {
|
||||
+ enable = true;
|
||||
+ forceNetworkConfig = true;
|
||||
+ network = {
|
||||
+ localNetworkSubnets = [
|
||||
+ "192.168.1.0/24"
|
||||
+ "10.0.0.0/8"
|
||||
+ ];
|
||||
+ knownProxies = [ "127.0.0.1" ];
|
||||
+ enableUPnP = false;
|
||||
+ enableIPv6 = false;
|
||||
+ remoteIPFilter = [ "203.0.113.5" ];
|
||||
+ isRemoteIPFilterBlacklist = true;
|
||||
+ };
|
||||
+ };
|
||||
+ environment.systemPackages = with pkgs; [ ffmpeg ];
|
||||
+ virtualisation.diskSize = 3 * 1024;
|
||||
+ };
|
||||
};
|
||||
|
||||
# Documentation of the Jellyfin API: https://api.jellyfin.org/
|
||||
@@ -122,6 +142,36 @@
|
||||
# Verify the new encoding.xml does not have the marker (was overwritten)
|
||||
machineWithForceConfig.fail("grep -q 'MARKER' /var/lib/jellyfin/config/encoding.xml")
|
||||
|
||||
+ # Test forceNetworkConfig and network.xml generation
|
||||
+ with subtest("Force network config writes declared values and backs up on overwrite"):
|
||||
+ wait_for_jellyfin(machineWithNetworkConfig)
|
||||
+
|
||||
+ # Verify network.xml exists and contains the declared values
|
||||
+ machineWithNetworkConfig.succeed("test -f /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<string>192.168.1.0/24</string>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<string>10.0.0.0/8</string>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<string>127.0.0.1</string>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<string>203.0.113.5</string>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<IsRemoteIPFilterBlacklist>true</IsRemoteIPFilterBlacklist>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<EnableIPv6>false</EnableIPv6>' /var/lib/jellyfin/config/network.xml")
|
||||
+ machineWithNetworkConfig.succeed("grep -F '<EnableUPnP>false</EnableUPnP>' /var/lib/jellyfin/config/network.xml")
|
||||
+
|
||||
+ # Stop service before modifying config
|
||||
+ machineWithNetworkConfig.succeed("systemctl stop jellyfin.service")
|
||||
+
|
||||
+ # Plant a marker so we can prove the backup-and-overwrite path runs
|
||||
+ machineWithNetworkConfig.succeed("echo '<!-- NETMARKER -->' > /var/lib/jellyfin/config/network.xml")
|
||||
+
|
||||
+ # Restart the service to trigger the backup
|
||||
+ machineWithNetworkConfig.succeed("systemctl restart jellyfin.service")
|
||||
+ wait_for_jellyfin(machineWithNetworkConfig)
|
||||
+
|
||||
+ # Verify the marked content was preserved as a timestamped backup
|
||||
+ machineWithNetworkConfig.succeed("grep -q 'NETMARKER' /var/lib/jellyfin/config/network.xml.backup-*")
|
||||
+
|
||||
+ # Verify the new network.xml does not have the marker (was overwritten)
|
||||
+ machineWithNetworkConfig.fail("grep -q 'NETMARKER' /var/lib/jellyfin/config/network.xml")
|
||||
+
|
||||
auth_header = 'MediaBrowser Client="NixOS Integration Tests", DeviceId="1337", Device="Apple II", Version="20.09"'
|
||||
|
||||
|
||||
--
|
||||
2.53.0
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
DISK="${1:-}"
|
||||
FLAKE_DIR="$(dirname "$(realpath "$0")")"
|
||||
|
||||
if [[ -z "$DISK" ]]; then
|
||||
echo "Usage: $0 <disk_device>"
|
||||
echo "Example: $0 /dev/nvme0n1"
|
||||
echo " $0 /dev/sda"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -b "$DISK" ]]; then
|
||||
echo "Error: $DISK is not a block device"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing NixOS to $DISK using flake at $FLAKE_DIR"
|
||||
|
||||
# Create temporary directories
|
||||
mkdir -p /tmp/secureboot
|
||||
mkdir -p /tmp/persistent
|
||||
|
||||
# Function to cleanup on exit
|
||||
cleanup() {
|
||||
echo "Cleaning up..."
|
||||
rm -rf /tmp/secureboot 2>/dev/null || true
|
||||
rm -rf /tmp/persistent 2>/dev/null || true
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Decrypt secureboot keys using the key in the repo
|
||||
echo "Decrypting secureboot keys..."
|
||||
if [[ ! -f "$FLAKE_DIR/usb-secrets/usb-secrets-key" ]]; then
|
||||
echo "Error: usb-secrets-key not found at $FLAKE_DIR/usb-secrets/usb-secrets-key"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
nix-shell -p age --run "age -d -i '$FLAKE_DIR/usb-secrets/usb-secrets-key' '$FLAKE_DIR/secrets/secureboot.tar.age'" | \
|
||||
tar -x -C /tmp/secureboot
|
||||
|
||||
echo "Secureboot keys extracted"
|
||||
|
||||
# Extract persistent partition secrets
|
||||
echo "Extracting persistent partition contents..."
|
||||
if [[ -f "$FLAKE_DIR/secrets/persistent.tar" ]]; then
|
||||
tar -xzf "$FLAKE_DIR/secrets/persistent.tar" -C /tmp/persistent
|
||||
echo "Persistent partition contents extracted"
|
||||
else
|
||||
echo "Warning: persistent.tar not found, skipping persistent secrets"
|
||||
fi
|
||||
|
||||
# Check if disko-install is available
|
||||
if ! command -v disko-install >/dev/null 2>&1; then
|
||||
echo "Running disko-install via nix..."
|
||||
DISKO_INSTALL="nix run github:nix-community/disko#disko-install --"
|
||||
else
|
||||
DISKO_INSTALL="disko-install"
|
||||
fi
|
||||
|
||||
echo "Running disko-install to partition, format, and install NixOS..."
|
||||
|
||||
# Build the extra-files arguments
|
||||
EXTRA_FILES_ARGS=(
|
||||
--extra-files /tmp/secureboot /etc/secureboot
|
||||
--extra-files "$FLAKE_DIR/usb-secrets/usb-secrets-key" /mnt/usb-secrets/usb-secrets-key
|
||||
)
|
||||
|
||||
# Add each top-level item from persistent separately to avoid nesting
|
||||
# cp -ar creates /dst/src when copying directories, so we need to copy each item
|
||||
#
|
||||
# Also disko-install actually copies the files from extra-files, so we are good here
|
||||
if [[ -d /tmp/persistent ]] && [[ -n "$(ls -A /tmp/persistent 2>/dev/null)" ]]; then
|
||||
for item in /tmp/persistent/*; do
|
||||
if [[ -e "$item" ]]; then
|
||||
basename=$(basename "$item")
|
||||
EXTRA_FILES_ARGS+=(--extra-files "$item" "/persistent/$basename")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Run disko-install with secureboot keys available
|
||||
sudo $DISKO_INSTALL \
|
||||
--mode format \
|
||||
--flake "$FLAKE_DIR#muffin" \
|
||||
--disk main "$DISK" \
|
||||
"${EXTRA_FILES_ARGS[@]}"
|
||||
@@ -1,115 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
|
||||
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
|
||||
|
||||
radarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
|
||||
sonarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
|
||||
|
||||
curl = "${pkgs.curl}/bin/curl";
|
||||
jq = "${pkgs.jq}/bin/jq";
|
||||
|
||||
# Max items to search per cycle per category (missing + cutoff) per app
|
||||
maxPerCycle = 5;
|
||||
|
||||
searchScript = pkgs.writeShellScript "arr-search" ''
|
||||
set -euo pipefail
|
||||
|
||||
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
|
||||
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
|
||||
|
||||
search_radarr() {
|
||||
local endpoint="$1"
|
||||
local label="$2"
|
||||
|
||||
local ids
|
||||
ids=$(${curl} -sf --max-time 30 \
|
||||
-H "X-Api-Key: $RADARR_KEY" \
|
||||
"${radarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending" \
|
||||
| ${jq} -r '.records[].id // empty')
|
||||
|
||||
if [ -z "$ids" ]; then
|
||||
echo "radarr: no $label items"
|
||||
return
|
||||
fi
|
||||
|
||||
local id_array
|
||||
id_array=$(echo "$ids" | ${jq} -Rs '[split("\n") | .[] | select(. != "") | tonumber]')
|
||||
echo "radarr: searching $label: $id_array"
|
||||
|
||||
${curl} -sf --max-time 60 \
|
||||
-H "X-Api-Key: $RADARR_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X POST "${radarrUrl}/api/v3/command" \
|
||||
-d "{\"name\": \"MoviesSearch\", \"movieIds\": $id_array}" > /dev/null
|
||||
}
|
||||
|
||||
search_sonarr() {
|
||||
local endpoint="$1"
|
||||
local label="$2"
|
||||
|
||||
local series_ids
|
||||
series_ids=$(${curl} -sf --max-time 30 \
|
||||
-H "X-Api-Key: $SONARR_KEY" \
|
||||
"${sonarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending&includeSeries=true" \
|
||||
| ${jq} -r '[.records[].seriesId] | unique | .[] // empty')
|
||||
|
||||
if [ -z "$series_ids" ]; then
|
||||
echo "sonarr: no $label items"
|
||||
return
|
||||
fi
|
||||
|
||||
# search per series (sonarr searches by series, not episode)
|
||||
for sid in $series_ids; do
|
||||
echo "sonarr: searching $label series $sid"
|
||||
${curl} -sf --max-time 60 \
|
||||
-H "X-Api-Key: $SONARR_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X POST "${sonarrUrl}/api/v3/command" \
|
||||
-d "{\"name\": \"SeriesSearch\", \"seriesId\": $sid}" > /dev/null
|
||||
done
|
||||
}
|
||||
|
||||
echo "=== arr-search $(date -Iseconds) ==="
|
||||
|
||||
search_radarr "missing" "missing"
|
||||
search_radarr "cutoff" "cutoff-unmet"
|
||||
|
||||
search_sonarr "missing" "missing"
|
||||
search_sonarr "cutoff" "cutoff-unmet"
|
||||
|
||||
echo "=== done ==="
|
||||
'';
|
||||
in
|
||||
{
|
||||
systemd.services.arr-search = {
|
||||
description = "Search for missing and cutoff-unmet media in Radarr/Sonarr";
|
||||
after = [
|
||||
"network-online.target"
|
||||
"radarr.service"
|
||||
"sonarr.service"
|
||||
];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "+${searchScript}"; # + prefix: runs as root to read API keys from config.xml
|
||||
TimeoutSec = 300;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.arr-search = {
|
||||
description = "Periodically search for missing and cutoff-unmet media";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 03:00:00"; # daily at 3 AM
|
||||
Persistent = true; # run on boot if missed
|
||||
RandomizedDelaySec = "30m";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_ssds [
|
||||
service_configs.bazarr.dataDir
|
||||
])
|
||||
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_hdds [
|
||||
service_configs.torrents_path
|
||||
])
|
||||
(lib.serviceFilePerms "bazarr" [
|
||||
"Z ${service_configs.bazarr.dataDir} 0700 ${config.services.bazarr.user} ${config.services.bazarr.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "bazarr";
|
||||
port = service_configs.ports.private.bazarr.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.bazarr = {
|
||||
enable = true;
|
||||
listenPort = service_configs.ports.private.bazarr.port;
|
||||
};
|
||||
|
||||
users.users.${config.services.bazarr.user}.extraGroups = [
|
||||
service_configs.media_group
|
||||
];
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
{ config, service_configs, ... }:
|
||||
{
|
||||
services.arrInit = {
|
||||
prowlarr = {
|
||||
enable = true;
|
||||
serviceName = "prowlarr";
|
||||
port = service_configs.ports.private.prowlarr.port;
|
||||
dataDir = service_configs.prowlarr.dataDir;
|
||||
apiVersion = "v1";
|
||||
networkNamespacePath = "/run/netns/wg";
|
||||
networkNamespaceService = "wg";
|
||||
# Guarantee critical config.xml elements before startup. Prowlarr has a
|
||||
# history of losing <Port> from config.xml, causing the service to run
|
||||
# without binding any socket. See arr-init's configXml for details.
|
||||
configXml = {
|
||||
Port = service_configs.ports.private.prowlarr.port;
|
||||
BindAddress = "*";
|
||||
EnableSsl = false;
|
||||
};
|
||||
# Prowlarr runs in the wg netns; Sonarr/Radarr in the host netns.
|
||||
# From host netns, Prowlarr is reachable at the wg namespace address,
|
||||
# not at localhost (which resolves to the host's own netns).
|
||||
# Health checks can now run — the reverse-connect is reachable.
|
||||
healthChecks = true;
|
||||
syncedApps = [
|
||||
{
|
||||
name = "Sonarr";
|
||||
implementation = "Sonarr";
|
||||
configContract = "SonarrSettings";
|
||||
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
|
||||
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.sonarr.port}";
|
||||
apiKeyFrom = "${service_configs.sonarr.dataDir}/config.xml";
|
||||
serviceName = "sonarr";
|
||||
}
|
||||
{
|
||||
name = "Radarr";
|
||||
implementation = "Radarr";
|
||||
configContract = "RadarrSettings";
|
||||
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
|
||||
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.radarr.port}";
|
||||
apiKeyFrom = "${service_configs.radarr.dataDir}/config.xml";
|
||||
serviceName = "radarr";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
sonarr = {
|
||||
enable = true;
|
||||
serviceName = "sonarr";
|
||||
port = service_configs.ports.private.sonarr.port;
|
||||
dataDir = service_configs.sonarr.dataDir;
|
||||
healthChecks = true;
|
||||
configXml = {
|
||||
Port = service_configs.ports.private.sonarr.port;
|
||||
BindAddress = "*";
|
||||
EnableSsl = false;
|
||||
};
|
||||
rootFolders = [ service_configs.media.tvDir ];
|
||||
naming = {
|
||||
renameEpisodes = true;
|
||||
replaceIllegalCharacters = true;
|
||||
standardEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
|
||||
dailyEpisodeFormat = "{Series Title} - {Air-Date} - {Episode Title} {Quality Full}";
|
||||
animeEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
|
||||
seasonFolderFormat = "Season {season}";
|
||||
seriesFolderFormat = "{Series Title}";
|
||||
};
|
||||
downloadClients = [
|
||||
{
|
||||
name = "qBittorrent";
|
||||
implementation = "QBittorrent";
|
||||
configContract = "QBittorrentSettings";
|
||||
serviceName = "qbittorrent";
|
||||
fields = {
|
||||
host = config.vpnNamespaces.wg.namespaceAddress;
|
||||
port = service_configs.ports.private.torrent.port;
|
||||
useSsl = false;
|
||||
tvCategory = "tvshows";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
radarr = {
|
||||
enable = true;
|
||||
serviceName = "radarr";
|
||||
port = service_configs.ports.private.radarr.port;
|
||||
dataDir = service_configs.radarr.dataDir;
|
||||
healthChecks = true;
|
||||
configXml = {
|
||||
Port = service_configs.ports.private.radarr.port;
|
||||
BindAddress = "*";
|
||||
EnableSsl = false;
|
||||
};
|
||||
rootFolders = [ service_configs.media.moviesDir ];
|
||||
naming = {
|
||||
renameMovies = true;
|
||||
replaceIllegalCharacters = true;
|
||||
standardMovieFormat = "{Movie Title} ({Release Year}) {Quality Full}";
|
||||
movieFolderFormat = "{Movie Title} ({Release Year})";
|
||||
};
|
||||
downloadClients = [
|
||||
{
|
||||
name = "qBittorrent";
|
||||
implementation = "QBittorrent";
|
||||
configContract = "QBittorrentSettings";
|
||||
serviceName = "qbittorrent";
|
||||
fields = {
|
||||
host = config.vpnNamespaces.wg.namespaceAddress;
|
||||
port = service_configs.ports.private.torrent.port;
|
||||
useSsl = false;
|
||||
movieCategory = "movies";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.bazarrInit = {
|
||||
enable = true;
|
||||
dataDir = "/var/lib/bazarr";
|
||||
port = service_configs.ports.private.bazarr.port;
|
||||
sonarr = {
|
||||
enable = true;
|
||||
dataDir = service_configs.sonarr.dataDir;
|
||||
port = service_configs.ports.private.sonarr.port;
|
||||
serviceName = "sonarr";
|
||||
};
|
||||
radarr = {
|
||||
enable = true;
|
||||
dataDir = service_configs.radarr.dataDir;
|
||||
port = service_configs.ports.private.radarr.port;
|
||||
serviceName = "radarr";
|
||||
};
|
||||
};
|
||||
|
||||
services.jellyseerrInit = {
|
||||
enable = true;
|
||||
configDir = service_configs.jellyseerr.configDir;
|
||||
radarr = {
|
||||
profileName = "Remux + WEB 2160p";
|
||||
dataDir = service_configs.radarr.dataDir;
|
||||
port = service_configs.ports.private.radarr.port;
|
||||
serviceName = "radarr";
|
||||
};
|
||||
sonarr = {
|
||||
profileName = "WEB-2160p";
|
||||
dataDir = service_configs.sonarr.dataDir;
|
||||
port = service_configs.ports.private.sonarr.port;
|
||||
serviceName = "sonarr";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "jellyseerr" service_configs.zpool_ssds [
|
||||
service_configs.jellyseerr.configDir
|
||||
])
|
||||
(lib.serviceFilePerms "jellyseerr" [
|
||||
"Z ${service_configs.jellyseerr.configDir} 0700 jellyseerr jellyseerr"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "jellyseerr";
|
||||
port = service_configs.ports.private.jellyseerr.port;
|
||||
})
|
||||
];
|
||||
|
||||
services.jellyseerr = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.jellyseerr.port;
|
||||
configDir = service_configs.jellyseerr.configDir;
|
||||
};
|
||||
|
||||
systemd.services.jellyseerr.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "jellyseerr";
|
||||
Group = "jellyseerr";
|
||||
ReadWritePaths = [ service_configs.jellyseerr.configDir ];
|
||||
};
|
||||
|
||||
users.users.jellyseerr = {
|
||||
isSystemUser = true;
|
||||
group = "jellyseerr";
|
||||
home = service_configs.jellyseerr.configDir;
|
||||
};
|
||||
|
||||
users.groups.jellyseerr = { };
|
||||
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
service_configs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "prowlarr" service_configs.zpool_ssds [
|
||||
service_configs.prowlarr.dataDir
|
||||
])
|
||||
(lib.vpnNamespaceOpenPort service_configs.ports.private.prowlarr.port "prowlarr")
|
||||
(lib.serviceFilePerms "prowlarr" [
|
||||
"Z ${service_configs.prowlarr.dataDir} 0700 prowlarr prowlarr"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "prowlarr";
|
||||
port = service_configs.ports.private.prowlarr.port;
|
||||
auth = true;
|
||||
vpn = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.prowlarr = {
|
||||
enable = true;
|
||||
dataDir = service_configs.prowlarr.dataDir;
|
||||
settings.server.port = service_configs.ports.private.prowlarr.port;
|
||||
};
|
||||
|
||||
# The upstream prowlarr module uses DynamicUser=true which is incompatible
|
||||
# with ZFS-backed persistent storage — the dynamic user can't access files
|
||||
# on the ZFS mount. Override with a static user to match sonarr/radarr.
|
||||
users.users.prowlarr = {
|
||||
isSystemUser = true;
|
||||
group = "prowlarr";
|
||||
home = service_configs.prowlarr.dataDir;
|
||||
};
|
||||
users.groups.prowlarr = { };
|
||||
|
||||
# The upstream prowlarr module hardcodes root:root in tmpfiles for custom dataDirs
|
||||
# (systemd.tmpfiles.settings."10-prowlarr"), which gets applied by
|
||||
# systemd-tmpfiles-setup.service on every boot/deploy, resetting the directory
|
||||
# ownership and making Prowlarr unable to access its SQLite databases.
|
||||
# Override to use the correct user as we disable DynamicUser
|
||||
systemd.tmpfiles.settings."10-prowlarr".${service_configs.prowlarr.dataDir}.d = lib.mkForce {
|
||||
user = "prowlarr";
|
||||
group = "prowlarr";
|
||||
mode = "0700";
|
||||
};
|
||||
|
||||
systemd.services.prowlarr.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "prowlarr";
|
||||
Group = "prowlarr";
|
||||
StateDirectory = lib.mkForce "";
|
||||
ExecStart = lib.mkForce "${lib.getExe pkgs.prowlarr} -nobrowser -data=${service_configs.prowlarr.dataDir}";
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "radarr" service_configs.zpool_ssds [
|
||||
service_configs.radarr.dataDir
|
||||
])
|
||||
(lib.serviceMountWithZpool "radarr" service_configs.zpool_hdds [
|
||||
service_configs.torrents_path
|
||||
])
|
||||
(lib.serviceFilePerms "radarr" [
|
||||
"Z ${service_configs.radarr.dataDir} 0700 ${config.services.radarr.user} ${config.services.radarr.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "radarr";
|
||||
port = service_configs.ports.private.radarr.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.radarr = {
|
||||
enable = true;
|
||||
dataDir = service_configs.radarr.dataDir;
|
||||
settings.server.port = service_configs.ports.private.radarr.port;
|
||||
settings.update.mechanism = "external";
|
||||
};
|
||||
|
||||
users.users.${config.services.radarr.user}.extraGroups = [
|
||||
service_configs.media_group
|
||||
];
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
|
||||
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
|
||||
configPath = "/var/lib/recyclarr/config.json";
|
||||
|
||||
# Runs as root (via + prefix) after the NixOS module writes config.json.
|
||||
# Extracts API keys from radarr/sonarr config.xml and injects them via jq.
|
||||
injectApiKeys = pkgs.writeShellScript "recyclarr-inject-api-keys" ''
|
||||
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
|
||||
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
|
||||
${pkgs.jq}/bin/jq \
|
||||
--arg rk "$RADARR_KEY" \
|
||||
--arg sk "$SONARR_KEY" \
|
||||
'.radarr.movies.api_key = $rk | .sonarr.series.api_key = $sk' \
|
||||
${configPath} > ${configPath}.tmp
|
||||
mv ${configPath}.tmp ${configPath}
|
||||
chown recyclarr:recyclarr ${configPath}
|
||||
'';
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "recyclarr" service_configs.zpool_ssds [
|
||||
service_configs.recyclarr.dataDir
|
||||
])
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${service_configs.recyclarr.dataDir} 0755 recyclarr recyclarr -"
|
||||
];
|
||||
|
||||
services.recyclarr = {
|
||||
enable = true;
|
||||
command = "sync";
|
||||
schedule = "daily";
|
||||
user = "recyclarr";
|
||||
group = "recyclarr";
|
||||
|
||||
configuration = {
|
||||
radarr.movies = {
|
||||
base_url = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
|
||||
|
||||
# Recyclarr is the sole authority for custom formats and scores.
|
||||
# Overwrite any manually-created CFs and delete stale ones.
|
||||
replace_existing_custom_formats = true;
|
||||
delete_old_custom_formats = true;
|
||||
|
||||
include = [
|
||||
{ template = "radarr-quality-definition-movie"; }
|
||||
{ template = "radarr-quality-profile-remux-web-2160p"; }
|
||||
{ template = "radarr-custom-formats-remux-web-2160p"; }
|
||||
];
|
||||
|
||||
# Group WEB 2160p with 1080p in the same quality tier so custom
|
||||
# format scores -- not quality ranking -- decide the winner.
|
||||
# Native 4K with HDR/DV from good release groups scores high and
|
||||
# wins; AI upscales get -10000 from the Upscaled CF and are
|
||||
# blocked by min_format_score. Untagged upscales from unknown
|
||||
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
|
||||
quality_profiles = [
|
||||
{
|
||||
name = "Remux + WEB 2160p";
|
||||
min_format_score = 0;
|
||||
reset_unmatched_scores.enabled = true;
|
||||
upgrade = {
|
||||
allowed = true;
|
||||
until_quality = "Remux-2160p";
|
||||
until_score = 10000;
|
||||
};
|
||||
qualities = [
|
||||
{ name = "Remux-2160p"; }
|
||||
{
|
||||
name = "WEB/Bluray";
|
||||
qualities = [
|
||||
"WEBDL-2160p"
|
||||
"WEBRip-2160p"
|
||||
"Remux-1080p"
|
||||
"Bluray-1080p"
|
||||
"WEBDL-1080p"
|
||||
"WEBRip-1080p"
|
||||
];
|
||||
}
|
||||
{ name = "HDTV-1080p"; }
|
||||
{ name = "Bluray-720p"; }
|
||||
{
|
||||
name = "WEB 720p";
|
||||
qualities = [
|
||||
"WEBDL-720p"
|
||||
"WEBRip-720p"
|
||||
];
|
||||
}
|
||||
{ name = "HDTV-720p"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
custom_formats = [
|
||||
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
|
||||
{
|
||||
trash_ids = [ "923b6abef9b17f937fab56cfcf89e1f1" ];
|
||||
assign_scores_to = [
|
||||
{ name = "Remux + WEB 2160p"; }
|
||||
];
|
||||
}
|
||||
# Upscaled - block AI upscales and other upscaled-to-2160p releases
|
||||
{
|
||||
trash_ids = [ "bfd8eb01832d646a0a89c4deb46f8564" ];
|
||||
assign_scores_to = [
|
||||
{
|
||||
name = "Remux + WEB 2160p";
|
||||
score = -10000;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
sonarr.series = {
|
||||
base_url = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
|
||||
|
||||
# Recyclarr is the sole authority for custom formats and scores.
|
||||
# Overwrite any manually-created CFs and delete stale ones.
|
||||
replace_existing_custom_formats = true;
|
||||
delete_old_custom_formats = true;
|
||||
|
||||
include = [
|
||||
{ template = "sonarr-quality-definition-series"; }
|
||||
{ template = "sonarr-v4-quality-profile-web-2160p"; }
|
||||
{ template = "sonarr-v4-custom-formats-web-2160p"; }
|
||||
];
|
||||
|
||||
# Group WEB 2160p with 1080p in the same quality tier so custom
|
||||
# format scores -- not quality ranking -- decide the winner.
|
||||
# Native 4K with HDR/DV from good release groups scores high and
|
||||
# wins; AI upscales get -10000 from the Upscaled CF and are
|
||||
# blocked by min_format_score. Untagged upscales from unknown
|
||||
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
|
||||
quality_profiles = [
|
||||
{
|
||||
name = "WEB-2160p";
|
||||
min_format_score = 0;
|
||||
reset_unmatched_scores.enabled = true;
|
||||
upgrade = {
|
||||
allowed = true;
|
||||
until_quality = "WEB/Bluray";
|
||||
until_score = 10000;
|
||||
};
|
||||
qualities = [
|
||||
{
|
||||
name = "WEB/Bluray";
|
||||
qualities = [
|
||||
"WEBDL-2160p"
|
||||
"WEBRip-2160p"
|
||||
"Bluray-1080p Remux"
|
||||
"Bluray-1080p"
|
||||
"WEBDL-1080p"
|
||||
"WEBRip-1080p"
|
||||
];
|
||||
}
|
||||
{ name = "HDTV-1080p"; }
|
||||
{ name = "Bluray-720p"; }
|
||||
{
|
||||
name = "WEB 720p";
|
||||
qualities = [
|
||||
"WEBDL-720p"
|
||||
"WEBRip-720p"
|
||||
];
|
||||
}
|
||||
{ name = "HDTV-720p"; }
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
custom_formats = [
|
||||
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
|
||||
{
|
||||
trash_ids = [ "9b27ab6498ec0f31a3353992e19434ca" ];
|
||||
assign_scores_to = [
|
||||
{ name = "WEB-2160p"; }
|
||||
];
|
||||
}
|
||||
# Upscaled - block AI upscales and other upscaled-to-2160p releases
|
||||
{
|
||||
trash_ids = [ "23297a736ca77c0fc8e70f8edd7ee56c" ];
|
||||
assign_scores_to = [
|
||||
{
|
||||
name = "WEB-2160p";
|
||||
score = -10000;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Trigger immediate sync on deploy when recyclarr config changes.
|
||||
# restartTriggers on the oneshot service are unreliable (systemd may
|
||||
# no-op a restart of an inactive oneshot). Instead, embed a config
|
||||
# hash in the timer unit -- NixOS restarts changed timers reliably,
|
||||
# and OnActiveSec fires the sync within seconds.
|
||||
systemd.timers.recyclarr = {
|
||||
timerConfig.OnActiveSec = "5s";
|
||||
unitConfig.X-ConfigHash = builtins.hashString "sha256" (
|
||||
builtins.toJSON config.services.recyclarr.configuration
|
||||
);
|
||||
};
|
||||
|
||||
systemd.services.recyclarr = {
|
||||
after = [
|
||||
"network-online.target"
|
||||
"radarr.service"
|
||||
"sonarr.service"
|
||||
];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig.ExecStartPre = [ "+${injectApiKeys}" ];
|
||||
};
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_ssds [
|
||||
service_configs.sonarr.dataDir
|
||||
])
|
||||
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_hdds [
|
||||
service_configs.torrents_path
|
||||
])
|
||||
(lib.serviceFilePerms "sonarr" [
|
||||
"Z ${service_configs.sonarr.dataDir} 0700 ${config.services.sonarr.user} ${config.services.sonarr.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "sonarr";
|
||||
port = service_configs.ports.private.sonarr.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /torrents/media 2775 root ${service_configs.media_group} -"
|
||||
"d ${service_configs.media.tvDir} 2775 root ${service_configs.media_group} -"
|
||||
"d ${service_configs.media.moviesDir} 2775 root ${service_configs.media_group} -"
|
||||
];
|
||||
|
||||
services.sonarr = {
|
||||
enable = true;
|
||||
dataDir = service_configs.sonarr.dataDir;
|
||||
settings.server.port = service_configs.ports.private.sonarr.port;
|
||||
settings.update.mechanism = "external";
|
||||
};
|
||||
|
||||
users.users.${config.services.sonarr.user}.extraGroups = [
|
||||
service_configs.media_group
|
||||
];
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
systemd.services.torrent-audit = {
|
||||
description = "Audit qBittorrent for unmanaged and abandoned upgrade torrents";
|
||||
after = [
|
||||
"network-online.target"
|
||||
"sonarr.service"
|
||||
"radarr.service"
|
||||
"qbittorrent.service"
|
||||
];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "+${
|
||||
pkgs.python3.withPackages (
|
||||
ps: with ps; [
|
||||
pyarr
|
||||
qbittorrent-api
|
||||
]
|
||||
)
|
||||
}/bin/python ${./torrent-audit.py}";
|
||||
TimeoutSec = 300;
|
||||
};
|
||||
|
||||
environment = {
|
||||
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
|
||||
RADARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
|
||||
RADARR_CONFIG = "${service_configs.radarr.dataDir}/config.xml";
|
||||
SONARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
|
||||
SONARR_CONFIG = "${service_configs.sonarr.dataDir}/config.xml";
|
||||
CATEGORIES = lib.concatStringsSep "," (builtins.attrNames service_configs.torrent.categories);
|
||||
TAG_TORRENTS = "true";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,382 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Audit qBittorrent torrents against Radarr/Sonarr.
|
||||
|
||||
Reports two categories:
|
||||
|
||||
UNMANAGED -- torrents in qBittorrent that no *arr service has ever touched.
|
||||
These were added manually or by some other tool.
|
||||
|
||||
ABANDONED -- torrents that *arr grabbed but later replaced with a better
|
||||
version. The old torrent is still seeding while the library
|
||||
points to the new one.
|
||||
|
||||
Abandoned detection uses API cross-referencing (not filesystem hardlinks) and
|
||||
verifies against the *arr's current file state:
|
||||
|
||||
1. HISTORY -- group imports by content unit (movieId / episodeId); the
|
||||
most recent import is the keeper, older ones are candidates.
|
||||
2. CURRENT -- verify against the *arr's active file mapping.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import qbittorrentapi
|
||||
from pyarr import RadarrAPI, SonarrAPI
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(message)s",
|
||||
stream=sys.stderr,
|
||||
)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_api_key(config_path: str) -> str:
|
||||
tree = ElementTree.parse(config_path)
|
||||
return tree.find(".//ApiKey").text
|
||||
|
||||
|
||||
def paginate(arr_client, endpoint: str, page_size: int = 1000):
|
||||
method = getattr(arr_client, f"get_{endpoint}")
|
||||
page = 1
|
||||
while True:
|
||||
data = method(page=page, page_size=page_size)
|
||||
yield from data["records"]
|
||||
if page * page_size >= data["totalRecords"]:
|
||||
break
|
||||
page += 1
|
||||
|
||||
|
||||
def get_qbit_torrents(qbit_client, category: str) -> dict[str, dict]:
|
||||
torrents = qbit_client.torrents_info(category=category)
|
||||
return {t["hash"].upper(): t for t in torrents}
|
||||
|
||||
|
||||
def gib(size_bytes: int) -> str:
|
||||
return f"{size_bytes / 1073741824:.1f}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Collect all known hashes from *arr history + queue
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def collect_all_known_hashes(arr_client, page_size: int = 1000) -> set[str]:
|
||||
hashes = set()
|
||||
for endpoint in ("queue", "history"):
|
||||
for rec in paginate(arr_client, endpoint, page_size):
|
||||
did = (rec.get("downloadId") or "").upper()
|
||||
if did:
|
||||
hashes.add(did)
|
||||
return hashes
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unmanaged: torrents with hashes not in any *arr history/queue
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def find_unmanaged(qbit_torrents: dict, known_hashes: set) -> list[dict]:
|
||||
results = []
|
||||
for uhash, torrent in qbit_torrents.items():
|
||||
if uhash not in known_hashes:
|
||||
results.append(torrent)
|
||||
return sorted(results, key=lambda t: t["added_on"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Abandoned movies: group imports by movieId, older = abandoned
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def find_movie_abandoned(radarr, qbit_movies):
|
||||
log.info("Analysing Radarr import history ...")
|
||||
imports_by_movie = defaultdict(list)
|
||||
for rec in paginate(radarr, "history"):
|
||||
if rec.get("eventType") != "downloadFolderImported":
|
||||
continue
|
||||
did = (rec.get("downloadId") or "").upper()
|
||||
if not did:
|
||||
continue
|
||||
mid = rec.get("movieId")
|
||||
if not mid:
|
||||
continue
|
||||
imports_by_movie[mid].append(
|
||||
{"downloadId": did, "date": rec["date"]}
|
||||
)
|
||||
|
||||
# Identify keeper (latest) and abandoned (older) hashes per movie.
|
||||
abandoned_hashes: set[str] = set()
|
||||
keeper_hashes: set[str] = set()
|
||||
hash_to_movie: dict[str, int] = {}
|
||||
|
||||
for mid, events in imports_by_movie.items():
|
||||
ordered = sorted(events, key=lambda e: e["date"])
|
||||
keeper_hashes.add(ordered[-1]["downloadId"])
|
||||
for e in ordered[:-1]:
|
||||
abandoned_hashes.add(e["downloadId"])
|
||||
hash_to_movie[e["downloadId"]] = mid
|
||||
|
||||
# A hash that is a keeper for *any* movie must not be deleted.
|
||||
abandoned_hashes -= keeper_hashes
|
||||
|
||||
log.info("Fetching Radarr current movie state ...")
|
||||
radarr_movies = {m["id"]: m for m in radarr.get_movie()}
|
||||
|
||||
results = []
|
||||
for ahash in abandoned_hashes:
|
||||
torrent = qbit_movies.get(ahash)
|
||||
if torrent is None:
|
||||
continue
|
||||
|
||||
mid = hash_to_movie.get(ahash)
|
||||
movie = radarr_movies.get(mid) if mid else None
|
||||
mf = (movie or {}).get("movieFile") or {}
|
||||
|
||||
current_quality = (mf.get("quality") or {}).get("quality", {}).get("name", "?")
|
||||
current_size = mf.get("size", 0)
|
||||
|
||||
status = "SAFE"
|
||||
notes = []
|
||||
|
||||
if not movie or not movie.get("hasFile"):
|
||||
notes.append("movie removed or has no file in Radarr")
|
||||
status = "REVIEW"
|
||||
elif torrent["size"] > current_size * 1.05:
|
||||
notes.append(
|
||||
f"abandoned is larger than current "
|
||||
f"({gib(torrent['size'])} > {gib(current_size)} GiB)"
|
||||
)
|
||||
status = "REVIEW"
|
||||
|
||||
results.append(
|
||||
{
|
||||
"name": torrent["name"],
|
||||
"size": torrent["size"],
|
||||
"state": torrent["state"],
|
||||
"hash": torrent["hash"],
|
||||
"added_on": torrent["added_on"],
|
||||
"status": status,
|
||||
"notes": notes,
|
||||
"current_quality": current_quality,
|
||||
}
|
||||
)
|
||||
|
||||
return sorted(results, key=lambda r: r["added_on"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Abandoned TV: group imports by episodeId, a hash is abandoned only when
|
||||
# it is NOT the latest import for ANY episode it covers.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def find_tv_abandoned(sonarr, qbit_tvshows):
|
||||
log.info("Analysing Sonarr import history ...")
|
||||
episode_imports = defaultdict(list)
|
||||
all_download_ids: set[str] = set()
|
||||
hash_to_series: dict[str, int] = {}
|
||||
|
||||
for rec in paginate(sonarr, "history"):
|
||||
if rec.get("eventType") != "downloadFolderImported":
|
||||
continue
|
||||
did = (rec.get("downloadId") or "").upper()
|
||||
eid = rec.get("episodeId")
|
||||
if not did or not eid:
|
||||
continue
|
||||
episode_imports[eid].append({"downloadId": did, "date": rec["date"]})
|
||||
all_download_ids.add(did)
|
||||
sid = rec.get("seriesId")
|
||||
if sid:
|
||||
hash_to_series[did] = sid
|
||||
|
||||
# A hash is "active" if it is the latest import for *any* episode.
|
||||
active_hashes: set[str] = set()
|
||||
for events in episode_imports.values():
|
||||
latest = max(events, key=lambda e: e["date"])
|
||||
active_hashes.add(latest["downloadId"])
|
||||
|
||||
abandoned_hashes = all_download_ids - active_hashes
|
||||
|
||||
log.info("Fetching Sonarr current series state ...")
|
||||
current_series = {s["id"] for s in sonarr.get_series()}
|
||||
|
||||
results = []
|
||||
for ahash in abandoned_hashes:
|
||||
torrent = qbit_tvshows.get(ahash)
|
||||
if torrent is None:
|
||||
continue
|
||||
|
||||
status = "SAFE"
|
||||
notes = []
|
||||
sid = hash_to_series.get(ahash)
|
||||
if sid and sid not in current_series:
|
||||
notes.append("series removed from Sonarr")
|
||||
status = "REVIEW"
|
||||
|
||||
results.append(
|
||||
{
|
||||
"name": torrent["name"],
|
||||
"size": torrent["size"],
|
||||
"state": torrent["state"],
|
||||
"hash": torrent["hash"],
|
||||
"added_on": torrent["added_on"],
|
||||
"status": status,
|
||||
"notes": notes,
|
||||
}
|
||||
)
|
||||
|
||||
return sorted(results, key=lambda r: r["added_on"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Report
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def print_section(torrents, show_status=False):
|
||||
if not torrents:
|
||||
print(" (none)\n")
|
||||
return
|
||||
|
||||
total_size = sum(t["size"] for t in torrents)
|
||||
for t in torrents:
|
||||
prefix = f"[{t['status']:6s}] " if show_status else " "
|
||||
print(f" {prefix}{t['name']}")
|
||||
extra = f"{gib(t['size'])} GiB | {t['state']}"
|
||||
print(f" {' ' * len(prefix)}{extra}")
|
||||
for note in t.get("notes", []):
|
||||
print(f" {' ' * len(prefix)}** {note}")
|
||||
print()
|
||||
|
||||
if show_status:
|
||||
safe = [t for t in torrents if t["status"] == "SAFE"]
|
||||
review = [t for t in torrents if t["status"] == "REVIEW"]
|
||||
print(
|
||||
f" total={len(torrents)} ({gib(total_size)} GiB) | "
|
||||
f"safe={len(safe)} | review={len(review)}"
|
||||
)
|
||||
else:
|
||||
print(f" total={len(torrents)} ({gib(total_size)} GiB)")
|
||||
print()
|
||||
|
||||
|
||||
AUDIT_TAGS = {"audit:unmanaged", "audit:abandoned-safe", "audit:abandoned-review"}
|
||||
|
||||
|
||||
def tag_torrents(qbit_client, qbit_torrents, all_known, all_abandoned):
|
||||
log.info("Tagging torrents ...")
|
||||
|
||||
abandoned_by_hash = {t["hash"].upper(): t for t in all_abandoned}
|
||||
|
||||
all_hashes = []
|
||||
for torrents in qbit_torrents.values():
|
||||
all_hashes.extend(torrents.keys())
|
||||
|
||||
for h in all_hashes:
|
||||
current_tags = set()
|
||||
torrent_info = None
|
||||
for torrents in qbit_torrents.values():
|
||||
if h in torrents:
|
||||
torrent_info = torrents[h]
|
||||
break
|
||||
if not torrent_info:
|
||||
continue
|
||||
|
||||
existing_tags = {t.strip() for t in torrent_info.get("tags", "").split(",") if t.strip()}
|
||||
existing_audit_tags = existing_tags & AUDIT_TAGS
|
||||
|
||||
if h in abandoned_by_hash:
|
||||
status = abandoned_by_hash[h]["status"]
|
||||
desired = "audit:abandoned-safe" if status == "SAFE" else "audit:abandoned-review"
|
||||
elif h not in all_known:
|
||||
desired = "audit:unmanaged"
|
||||
else:
|
||||
desired = None
|
||||
|
||||
tags_to_remove = existing_audit_tags - ({desired} if desired else set())
|
||||
tags_to_add = ({desired} if desired else set()) - existing_audit_tags
|
||||
|
||||
low_hash = torrent_info["hash"]
|
||||
for tag in tags_to_remove:
|
||||
qbit_client.torrents_remove_tags(tags=tag, torrent_hashes=low_hash)
|
||||
for tag in tags_to_add:
|
||||
qbit_client.torrents_add_tags(tags=tag, torrent_hashes=low_hash)
|
||||
|
||||
log.info("Tagging complete")
|
||||
|
||||
|
||||
def main():
|
||||
qbit_url = os.environ["QBITTORRENT_URL"]
|
||||
radarr_url = os.environ["RADARR_URL"]
|
||||
radarr_config = os.environ["RADARR_CONFIG"]
|
||||
sonarr_url = os.environ["SONARR_URL"]
|
||||
sonarr_config = os.environ["SONARR_CONFIG"]
|
||||
categories = os.environ.get("CATEGORIES", "tvshows,movies,anime").split(",")
|
||||
|
||||
radarr_key = get_api_key(radarr_config)
|
||||
sonarr_key = get_api_key(sonarr_config)
|
||||
|
||||
radarr = RadarrAPI(radarr_url, radarr_key)
|
||||
sonarr = SonarrAPI(sonarr_url, sonarr_key)
|
||||
qbit = qbittorrentapi.Client(host=qbit_url)
|
||||
|
||||
log.info("Getting qBittorrent state ...")
|
||||
qbit_torrents = {cat: get_qbit_torrents(qbit, cat) for cat in categories}
|
||||
for cat, torrents in qbit_torrents.items():
|
||||
log.info(" %s: %d torrents", cat, len(torrents))
|
||||
|
||||
log.info("Collecting known hashes from Sonarr ...")
|
||||
sonarr_hashes = collect_all_known_hashes(sonarr)
|
||||
log.info(" %d unique hashes", len(sonarr_hashes))
|
||||
|
||||
log.info("Collecting known hashes from Radarr ...")
|
||||
radarr_hashes = collect_all_known_hashes(radarr)
|
||||
log.info(" %d unique hashes", len(radarr_hashes))
|
||||
|
||||
all_known = sonarr_hashes | radarr_hashes
|
||||
|
||||
# -- Unmanaged --
|
||||
print("\n========== UNMANAGED TORRENTS ==========\n")
|
||||
for cat in categories:
|
||||
unmanaged = find_unmanaged(qbit_torrents[cat], all_known)
|
||||
print(f"--- {cat} ({len(unmanaged)} unmanaged / {len(qbit_torrents[cat])} total) ---\n")
|
||||
print_section(unmanaged)
|
||||
|
||||
# -- Abandoned --
|
||||
print("========== ABANDONED UPGRADE LEFTOVERS ==========\n")
|
||||
|
||||
movie_abandoned = find_movie_abandoned(
|
||||
radarr, qbit_torrents.get("movies", {})
|
||||
)
|
||||
print(f"--- movies ({len(movie_abandoned)} abandoned) ---\n")
|
||||
print_section(movie_abandoned, show_status=True)
|
||||
|
||||
tv_abandoned = find_tv_abandoned(
|
||||
sonarr, qbit_torrents.get("tvshows", {})
|
||||
)
|
||||
print(f"--- tvshows ({len(tv_abandoned)} abandoned) ---\n")
|
||||
print_section(tv_abandoned, show_status=True)
|
||||
|
||||
# -- Summary --
|
||||
all_abandoned = movie_abandoned + tv_abandoned
|
||||
safe = [t for t in all_abandoned if t["status"] == "SAFE"]
|
||||
|
||||
print("=" * 50)
|
||||
print(
|
||||
f"ABANDONED: {len(all_abandoned)} total ({len(safe)} safe to delete)"
|
||||
)
|
||||
print(f"SAFE TO RECLAIM: {gib(sum(t['size'] for t in safe))} GiB")
|
||||
|
||||
# -- Tagging --
|
||||
if os.environ.get("TAG_TORRENTS", "").lower() in ("1", "true", "yes"):
|
||||
tag_torrents(qbit, qbit_torrents, all_known, all_abandoned)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,113 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
service_configs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
prowlarrPort = toString service_configs.ports.private.prowlarr.port;
|
||||
sonarrPort = toString service_configs.ports.private.sonarr.port;
|
||||
radarrPort = toString service_configs.ports.private.radarr.port;
|
||||
bitmagnetPort = toString service_configs.ports.private.bitmagnet.port;
|
||||
bridgeAddr = config.vpnNamespaces.wg.bridgeAddress;
|
||||
|
||||
prowlarrConfigXml = "${service_configs.prowlarr.dataDir}/config.xml";
|
||||
sonarrConfigXml = "${service_configs.sonarr.dataDir}/config.xml";
|
||||
radarrConfigXml = "${service_configs.radarr.dataDir}/config.xml";
|
||||
|
||||
curl = "${pkgs.curl}/bin/curl";
|
||||
jq = "${pkgs.jq}/bin/jq";
|
||||
|
||||
# Clears the escalating failure backoff for the Bitmagnet indexer across
|
||||
# Prowlarr, Sonarr, and Radarr so searches resume immediately after
|
||||
# Bitmagnet restarts instead of waiting hours for disable timers to expire.
|
||||
recoveryScript = pkgs.writeShellScript "prowlarr-bitmagnet-recovery" ''
|
||||
set -euo pipefail
|
||||
|
||||
wait_for() {
|
||||
for _ in $(seq 1 "$2"); do
|
||||
${curl} -sf --max-time 5 "$1" > /dev/null && return 0
|
||||
sleep 5
|
||||
done
|
||||
echo "$1 not reachable, aborting" >&2; exit 1
|
||||
}
|
||||
|
||||
# Test a Bitmagnet-named indexer to clear its failure status.
|
||||
# A successful test triggers RecordSuccess() which resets the backoff.
|
||||
clear_status() {
|
||||
local key indexer
|
||||
key=$(${lib.extractArrApiKey ''"$3"''}) || return 0
|
||||
indexer=$(${curl} -sf --max-time 10 \
|
||||
-H "X-Api-Key: $key" "$2/api/$1/indexer" | \
|
||||
${jq} 'first(.[] | select(.name | test("Bitmagnet"; "i")))') || return 0
|
||||
[ -n "$indexer" ] && [ "$indexer" != "null" ] || return 0
|
||||
${curl} -sf --max-time 30 \
|
||||
-H "X-Api-Key: $key" -H "Content-Type: application/json" \
|
||||
-X POST "$2/api/$1/indexer/test" -d "$indexer" > /dev/null
|
||||
}
|
||||
|
||||
wait_for "http://localhost:${bitmagnetPort}" 12
|
||||
wait_for "http://localhost:${prowlarrPort}/ping" 6
|
||||
|
||||
# Prowlarr first — downstream apps route searches through it.
|
||||
clear_status v1 "http://localhost:${prowlarrPort}" "${prowlarrConfigXml}" || true
|
||||
clear_status v3 "http://${bridgeAddr}:${sonarrPort}" "${sonarrConfigXml}" || true
|
||||
clear_status v3 "http://${bridgeAddr}:${radarrPort}" "${radarrConfigXml}" || true
|
||||
'';
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.vpnNamespaceOpenPort service_configs.ports.private.bitmagnet.port "bitmagnet")
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "bitmagnet";
|
||||
port = service_configs.ports.private.bitmagnet.port;
|
||||
auth = true;
|
||||
vpn = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.bitmagnet = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
postgres = {
|
||||
host = service_configs.postgres.socket;
|
||||
};
|
||||
http_server = {
|
||||
# TODO! make issue about this being a string and not a `port` type
|
||||
port = ":" + (toString service_configs.ports.private.bitmagnet.port);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# The upstream default (Restart=on-failure) leaves Bitmagnet dead after
|
||||
# clean exits (e.g. systemd stop during deploy). Always restart it.
|
||||
systemd.services.bitmagnet.serviceConfig = {
|
||||
Restart = lib.mkForce "always";
|
||||
RestartSec = 10;
|
||||
};
|
||||
|
||||
# After Bitmagnet restarts, clear the escalating failure backoff across
|
||||
# Prowlarr, Sonarr, and Radarr so searches resume immediately instead of
|
||||
# waiting hours for the disable timers to expire.
|
||||
systemd.services.prowlarr-bitmagnet-recovery = {
|
||||
description = "Clear Prowlarr/Sonarr/Radarr failure status for Bitmagnet indexer";
|
||||
after = [
|
||||
"bitmagnet.service"
|
||||
"prowlarr.service"
|
||||
"sonarr.service"
|
||||
"radarr.service"
|
||||
];
|
||||
bindsTo = [ "bitmagnet.service" ];
|
||||
wantedBy = [ "bitmagnet.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = recoveryScript;
|
||||
# Same VPN namespace as Bitmagnet and Prowlarr.
|
||||
NetworkNamespacePath = "/run/netns/wg";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "vaultwarden" service_configs.zpool_ssds [
|
||||
service_configs.vaultwarden.path
|
||||
])
|
||||
(lib.serviceFilePerms "vaultwarden" [
|
||||
"Z ${service_configs.vaultwarden.path} 0700 vaultwarden vaultwarden"
|
||||
])
|
||||
(lib.mkFail2banJail {
|
||||
name = "vaultwarden";
|
||||
failregex = ''^.*Username or password is incorrect\. Try again\. IP: <HOST>\..*$'';
|
||||
})
|
||||
];
|
||||
|
||||
services.vaultwarden = {
|
||||
enable = true;
|
||||
dbBackend = "postgresql";
|
||||
configurePostgres = true;
|
||||
config = {
|
||||
# Refer to https://github.com/dani-garcia/vaultwarden/blob/main/.env.template
|
||||
DOMAIN = "https://bitwarden.${service_configs.https.domain}";
|
||||
SIGNUPS_ALLOWED = false;
|
||||
|
||||
ROCKET_ADDRESS = "127.0.0.1";
|
||||
ROCKET_PORT = service_configs.ports.private.vaultwarden.port;
|
||||
ROCKET_LOG = "critical";
|
||||
};
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts."bitwarden.${service_configs.https.domain}".extraConfig = ''
|
||||
encode zstd gzip
|
||||
|
||||
reverse_proxy :${toString config.services.vaultwarden.config.ROCKET_PORT} {
|
||||
header_up X-Real-IP {remote_host}
|
||||
}
|
||||
'';
|
||||
|
||||
}
|
||||
@@ -1,162 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
pkgs,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
theme = pkgs.fetchFromGitHub {
|
||||
owner = "kaiiiz";
|
||||
repo = "hugo-theme-monochrome";
|
||||
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
|
||||
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
|
||||
};
|
||||
|
||||
hugo-neko = pkgs.fetchFromGitHub {
|
||||
owner = "ystepanoff";
|
||||
repo = "hugo-neko";
|
||||
rev = "5a50034acbb1ae0cec19775af64e7167ca22725e";
|
||||
sha256 = "VLwr4zEeFQU/b+vj0XTLSuEiosuNFu2du4uud7m8bnw=";
|
||||
};
|
||||
|
||||
hugoWebsite = pkgs.stdenv.mkDerivation {
|
||||
pname = "hugo-site";
|
||||
version = "0.1";
|
||||
|
||||
src = inputs.website;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
hugo
|
||||
go
|
||||
git
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
rm -fr themes/theme modules/hugo-neko
|
||||
cp -r ${theme} themes/theme
|
||||
cp -r ${hugo-neko} modules/hugo-neko
|
||||
hugo --minify -d $out;
|
||||
'';
|
||||
};
|
||||
|
||||
newDomain = service_configs.https.domain;
|
||||
oldDomain = service_configs.https.old_domain;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "caddy" service_configs.zpool_ssds [
|
||||
config.services.caddy.dataDir
|
||||
])
|
||||
];
|
||||
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
email = "titaniumtown@proton.me";
|
||||
|
||||
# Build with Njalla DNS provider for DNS-01 ACME challenges (wildcard certs)
|
||||
package = pkgs.caddy.withPlugins {
|
||||
plugins = [ "github.com/caddy-dns/njalla@v0.0.0-20250823094507-f709141f1fe6" ];
|
||||
hash = "sha256-rrOAR6noTDpV/I/hZXxhz0OXVJKu0mFQRq87RUrpmzw=";
|
||||
};
|
||||
|
||||
globalConfig = ''
|
||||
# Wildcard cert for *.${newDomain} via DNS-01 challenge
|
||||
acme_dns njalla {
|
||||
api_token {env.NJALLA_API_TOKEN}
|
||||
}
|
||||
|
||||
# On-demand TLS for old domain redirects
|
||||
on_demand_tls {
|
||||
ask http://localhost:9123/check
|
||||
}
|
||||
'';
|
||||
|
||||
# Internal endpoint to validate on-demand TLS requests
|
||||
# Only allows certs for *.${oldDomain}
|
||||
extraConfig = ''
|
||||
http://localhost:9123 {
|
||||
@allowed expression {query.domain}.endsWith(".${oldDomain}") || {query.domain} == "${oldDomain}" || {query.domain} == "www.${oldDomain}"
|
||||
respond @allowed 200
|
||||
respond 403
|
||||
}
|
||||
'';
|
||||
|
||||
virtualHosts = {
|
||||
${newDomain} = {
|
||||
extraConfig = ''
|
||||
root * ${hugoWebsite}
|
||||
file_server browse
|
||||
'';
|
||||
|
||||
serverAliases = [ "www.${newDomain}" ];
|
||||
};
|
||||
|
||||
# Redirect old domain (bare + www) to new domain
|
||||
${oldDomain} = {
|
||||
extraConfig = ''
|
||||
redir https://${newDomain}{uri} permanent
|
||||
'';
|
||||
serverAliases = [ "www.${oldDomain}" ];
|
||||
};
|
||||
|
||||
# Wildcard redirect for all old domain subdomains
|
||||
# Uses on-demand TLS - certs issued automatically on first request
|
||||
"*.${oldDomain}" = {
|
||||
extraConfig = ''
|
||||
tls {
|
||||
on_demand
|
||||
}
|
||||
# {labels.2} extracts subdomain from *.gardling.com
|
||||
redir https://{labels.2}.${newDomain}{uri} permanent
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Inject Njalla API token for DNS-01 challenge
|
||||
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.njalla-api-token-env.path;
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${config.services.caddy.dataDir} 700 ${config.services.caddy.user} ${config.services.caddy.group}"
|
||||
];
|
||||
|
||||
systemd.packages = with pkgs; [ nssTools ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
service_configs.ports.public.https.port
|
||||
|
||||
# http (but really acmeCA challenges)
|
||||
service_configs.ports.public.http.port
|
||||
];
|
||||
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
service_configs.ports.public.https.port
|
||||
];
|
||||
|
||||
# Protect Caddy basic auth endpoints from brute force attacks
|
||||
services.fail2ban.jails.caddy-auth = {
|
||||
enabled = true;
|
||||
settings = {
|
||||
backend = "auto";
|
||||
port = "http,https";
|
||||
logpath = "/var/log/caddy/access-*.log";
|
||||
# defaults: maxretry=5, findtime=10m, bantime=10m
|
||||
|
||||
# Ignore local network IPs - NAT hairpinning causes all LAN traffic to
|
||||
# appear from the router IP (192.168.1.1). Banning it blocks all internal access.
|
||||
ignoreip = "127.0.0.1/8 ::1 192.168.1.0/24";
|
||||
};
|
||||
filter.Definition = {
|
||||
# Only match 401s where an Authorization header was actually sent.
|
||||
# Without this, the normal HTTP Basic Auth challenge-response flow
|
||||
# (browser probes without credentials, gets 401, then resends with
|
||||
# credentials) counts every page visit as a "failure."
|
||||
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
|
||||
ignoreregex = "";
|
||||
datepattern = ''"ts":{Epoch}\.'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
theme = pkgs.fetchFromGitHub {
|
||||
owner = "kaiiiz";
|
||||
repo = "hugo-theme-monochrome";
|
||||
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
|
||||
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
|
||||
};
|
||||
|
||||
hugoWebsite = pkgs.stdenv.mkDerivation {
|
||||
pname = "hugo-site";
|
||||
version = "0.1";
|
||||
|
||||
src = inputs.senior_project-website;
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
hugo
|
||||
];
|
||||
|
||||
installPhase = ''
|
||||
rm -fr themes/theme
|
||||
cp -rv ${theme} themes/theme
|
||||
hugo --minify -d $out;
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."senior-project.${service_configs.https.domain}".extraConfig = ''
|
||||
root * ${hugoWebsite}
|
||||
file_server browse
|
||||
'';
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./caddy.nix
|
||||
# KEEP UNTIL 2028
|
||||
./caddy_senior_project.nix
|
||||
];
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.ddns-updater = {
|
||||
enable = true;
|
||||
environment = {
|
||||
PERIOD = "5m";
|
||||
# ddns-updater reads config from this path at runtime
|
||||
CONFIG_FILEPATH = config.age.secrets.ddns-updater-config.path;
|
||||
};
|
||||
};
|
||||
|
||||
users.users.ddns-updater = {
|
||||
isSystemUser = true;
|
||||
group = "ddns-updater";
|
||||
};
|
||||
users.groups.ddns-updater = { };
|
||||
|
||||
systemd.services.ddns-updater.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "ddns-updater";
|
||||
Group = "ddns-updater";
|
||||
};
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.mkCaddyReverseProxy {
|
||||
domain = service_configs.firefox_syncserver.domain;
|
||||
port = service_configs.ports.private.firefox_syncserver.port;
|
||||
})
|
||||
];
|
||||
|
||||
services.firefox-syncserver = {
|
||||
enable = true;
|
||||
database = {
|
||||
type = "postgresql";
|
||||
createLocally = false;
|
||||
user = "firefox_syncserver";
|
||||
};
|
||||
secrets = config.age.secrets.firefox-syncserver-env.path;
|
||||
settings.port = service_configs.ports.private.firefox_syncserver.port;
|
||||
singleNode = {
|
||||
enable = true;
|
||||
hostname = service_configs.firefox_syncserver.domain;
|
||||
url = "https://${service_configs.firefox_syncserver.domain}";
|
||||
capacity = 1;
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
ensureDatabases = [ "firefox_syncserver" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "firefox_syncserver";
|
||||
ensureDBOwnership = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.gitea-actions-runner.instances.muffin = {
|
||||
enable = true;
|
||||
name = "muffin";
|
||||
url = config.services.gitea.settings.server.ROOT_URL;
|
||||
tokenFile = config.age.secrets.gitea-runner-token.path;
|
||||
labels = [ "nix:host" ];
|
||||
hostPackages = with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
curl
|
||||
gawk
|
||||
git
|
||||
git-crypt
|
||||
gnugrep
|
||||
gnused
|
||||
jq
|
||||
nix
|
||||
nodejs
|
||||
openssh
|
||||
];
|
||||
settings = {
|
||||
runner = {
|
||||
capacity = 1;
|
||||
timeout = "6h";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Override DynamicUser to use our static gitea-runner user, and ensure
|
||||
# the runner doesn't start before the co-located gitea instance is ready
|
||||
# (upstream can't assume locality, so this dependency is ours to add).
|
||||
systemd.services."gitea-runner-muffin" = {
|
||||
requires = [ "gitea.service" ];
|
||||
after = [ "gitea.service" ];
|
||||
serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "gitea-runner";
|
||||
Group = "gitea-runner";
|
||||
};
|
||||
environment.GIT_SSH_COMMAND = "ssh -i /run/agenix/ci-deploy-key -o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts";
|
||||
};
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ])
|
||||
(lib.serviceFilePerms "gitea" [
|
||||
"Z ${config.services.gitea.stateDir} 0700 ${config.services.gitea.user} ${config.services.gitea.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
domain = service_configs.gitea.domain;
|
||||
port = service_configs.ports.private.gitea.port;
|
||||
})
|
||||
(lib.mkFail2banJail {
|
||||
name = "gitea";
|
||||
failregex = "^.*Failed authentication attempt for .* from <HOST>:.*$";
|
||||
})
|
||||
];
|
||||
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
appName = "Simon Gardling's Gitea instance";
|
||||
stateDir = service_configs.gitea.dir;
|
||||
database = {
|
||||
type = "postgres";
|
||||
socket = service_configs.postgres.socket;
|
||||
};
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
SSH_USER = "gitea";
|
||||
DOMAIN = service_configs.gitea.domain;
|
||||
ROOT_URL = "https://" + config.services.gitea.settings.server.DOMAIN;
|
||||
HTTP_PORT = service_configs.ports.private.gitea.port;
|
||||
LANDING_PAGE = "/explore/repos";
|
||||
DISABLE_HTTP_GIT = true;
|
||||
};
|
||||
session = {
|
||||
# https cookies or smth
|
||||
COOKIE_SECURE = true;
|
||||
};
|
||||
# only I shall use gitea
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
actions.ENABLED = true;
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
ensureDatabases = [ config.services.gitea.user ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = config.services.gitea.database.user;
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses.login = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.openssh.settings.AllowUsers = [ config.services.gitea.user ];
|
||||
|
||||
}
|
||||
@@ -1,698 +0,0 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
let
|
||||
promDs = {
|
||||
type = "prometheus";
|
||||
uid = "prometheus";
|
||||
};
|
||||
|
||||
dashboard = {
|
||||
editable = true;
|
||||
graphTooltip = 1;
|
||||
schemaVersion = 39;
|
||||
tags = [
|
||||
"system"
|
||||
"monitoring"
|
||||
];
|
||||
time = {
|
||||
from = "now-6h";
|
||||
to = "now";
|
||||
};
|
||||
timezone = "browser";
|
||||
title = "System Overview";
|
||||
uid = "system-overview";
|
||||
|
||||
annotations.list = [
|
||||
{
|
||||
name = "Jellyfin Streams";
|
||||
datasource = {
|
||||
type = "grafana";
|
||||
uid = "-- Grafana --";
|
||||
};
|
||||
enable = true;
|
||||
iconColor = "green";
|
||||
showIn = 0;
|
||||
type = "tags";
|
||||
tags = [ "jellyfin" ];
|
||||
}
|
||||
{
|
||||
name = "ZFS Scrubs";
|
||||
datasource = {
|
||||
type = "grafana";
|
||||
uid = "-- Grafana --";
|
||||
};
|
||||
enable = true;
|
||||
iconColor = "orange";
|
||||
showIn = 0;
|
||||
type = "tags";
|
||||
tags = [ "zfs-scrub" ];
|
||||
}
|
||||
{
|
||||
name = "LLM Requests";
|
||||
datasource = promDs;
|
||||
enable = true;
|
||||
iconColor = "purple";
|
||||
target = {
|
||||
datasource = promDs;
|
||||
expr = "llamacpp:requests_processing > 0";
|
||||
instant = false;
|
||||
range = true;
|
||||
refId = "A";
|
||||
};
|
||||
titleFormat = "LLM inference";
|
||||
}
|
||||
];
|
||||
|
||||
panels = [
|
||||
# -- Row 1: UPS --
|
||||
{
|
||||
id = 1;
|
||||
type = "timeseries";
|
||||
title = "UPS Power Draw";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 8;
|
||||
x = 0;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts";
|
||||
legendFormat = "Power (W)";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[5m:])";
|
||||
legendFormat = "5m average (W)";
|
||||
refId = "B";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "watt";
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "A";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "custom.lineStyle";
|
||||
value = {
|
||||
fill = "dot";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 10;
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 1;
|
||||
}
|
||||
{
|
||||
id = "custom.pointSize";
|
||||
value = 1;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "B";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 4;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 7;
|
||||
type = "stat";
|
||||
title = "Energy Usage (24h)";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 4;
|
||||
x = 8;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[24h:]) * 24 / 1000";
|
||||
legendFormat = "";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "kwatth";
|
||||
decimals = 2;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 5;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 10;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "none";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 2;
|
||||
type = "gauge";
|
||||
title = "UPS Load";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 12;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_ups_load_percent";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 70;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 90;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options.reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 3;
|
||||
type = "gauge";
|
||||
title = "UPS Battery";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 18;
|
||||
y = 0;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "apcupsd_battery_charge_percent";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "red";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 20;
|
||||
}
|
||||
{
|
||||
color = "green";
|
||||
value = 50;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options.reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 2: System --
|
||||
{
|
||||
id = 4;
|
||||
type = "timeseries";
|
||||
title = "CPU Temperature";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 0;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = ''node_hwmon_temp_celsius{chip=~"pci.*"}'';
|
||||
legendFormat = "CPU {{sensor}}";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "celsius";
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 5;
|
||||
type = "stat";
|
||||
title = "System Uptime";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 12;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "time() - node_boot_time_seconds";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "s";
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "none";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 6;
|
||||
type = "stat";
|
||||
title = "Jellyfin Active Streams";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 6;
|
||||
x = 18;
|
||||
y = 8;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "count(jellyfin_now_playing_state) or vector(0)";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
thresholds = {
|
||||
mode = "absolute";
|
||||
steps = [
|
||||
{
|
||||
color = "green";
|
||||
value = null;
|
||||
}
|
||||
{
|
||||
color = "yellow";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
color = "red";
|
||||
value = 6;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
options = {
|
||||
reduceOptions = {
|
||||
calcs = [ "lastNotNull" ];
|
||||
fields = "";
|
||||
values = false;
|
||||
};
|
||||
colorMode = "value";
|
||||
graphMode = "area";
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 3: qBittorrent --
|
||||
{
|
||||
id = 11;
|
||||
type = "timeseries";
|
||||
title = "qBittorrent Speed";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 24;
|
||||
x = 0;
|
||||
y = 16;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "sum(qbit_dlspeed) or vector(0)";
|
||||
legendFormat = "Download";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "sum(qbit_upspeed) or vector(0)";
|
||||
legendFormat = "Upload";
|
||||
refId = "B";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((sum(qbit_dlspeed) or vector(0))[10m:])";
|
||||
legendFormat = "Download (10m avg)";
|
||||
refId = "C";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "avg_over_time((sum(qbit_upspeed) or vector(0))[10m:])";
|
||||
legendFormat = "Upload (10m avg)";
|
||||
refId = "D";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "binBps";
|
||||
min = 0;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 1;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "A";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "green";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 5;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "B";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "blue";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 5;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "C";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "green";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
matcher = {
|
||||
id = "byFrameRefID";
|
||||
options = "D";
|
||||
};
|
||||
properties = [
|
||||
{
|
||||
id = "color";
|
||||
value = {
|
||||
fixedColor = "blue";
|
||||
mode = "fixed";
|
||||
};
|
||||
}
|
||||
{
|
||||
id = "custom.lineWidth";
|
||||
value = 3;
|
||||
}
|
||||
{
|
||||
id = "custom.fillOpacity";
|
||||
value = 0;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 4: Intel GPU --
|
||||
{
|
||||
id = 8;
|
||||
type = "timeseries";
|
||||
title = "Intel GPU Utilization";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 24;
|
||||
x = 0;
|
||||
y = 24;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "igpu_engines_busy_percent";
|
||||
legendFormat = "{{engine}}";
|
||||
refId = "A";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 10;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
|
||||
# -- Row 5: Storage --
|
||||
{
|
||||
id = 12;
|
||||
type = "timeseries";
|
||||
title = "ZFS Pool Utilization";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 0;
|
||||
y = 32;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "zfs_pool_allocated_bytes{pool=\"tank\"} / zfs_pool_size_bytes{pool=\"tank\"} * 100";
|
||||
legendFormat = "tank";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "zfs_pool_allocated_bytes{pool=\"hdds\"} / zfs_pool_size_bytes{pool=\"hdds\"} * 100";
|
||||
legendFormat = "hdds";
|
||||
refId = "B";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
{
|
||||
id = 13;
|
||||
type = "timeseries";
|
||||
title = "Boot Drive Partitions";
|
||||
gridPos = {
|
||||
h = 8;
|
||||
w = 12;
|
||||
x = 12;
|
||||
y = 32;
|
||||
};
|
||||
datasource = promDs;
|
||||
targets = [
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "(node_filesystem_size_bytes{mountpoint=\"/boot\"} - node_filesystem_avail_bytes{mountpoint=\"/boot\"}) / node_filesystem_size_bytes{mountpoint=\"/boot\"} * 100";
|
||||
legendFormat = "/boot";
|
||||
refId = "A";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "(node_filesystem_size_bytes{mountpoint=\"/persistent\"} - node_filesystem_avail_bytes{mountpoint=\"/persistent\"}) / node_filesystem_size_bytes{mountpoint=\"/persistent\"} * 100";
|
||||
legendFormat = "/persistent";
|
||||
refId = "B";
|
||||
}
|
||||
{
|
||||
datasource = promDs;
|
||||
expr = "(node_filesystem_size_bytes{mountpoint=\"/nix\"} - node_filesystem_avail_bytes{mountpoint=\"/nix\"}) / node_filesystem_size_bytes{mountpoint=\"/nix\"} * 100";
|
||||
legendFormat = "/nix";
|
||||
refId = "C";
|
||||
}
|
||||
];
|
||||
fieldConfig = {
|
||||
defaults = {
|
||||
unit = "percent";
|
||||
min = 0;
|
||||
max = 100;
|
||||
color.mode = "palette-classic";
|
||||
custom = {
|
||||
lineWidth = 2;
|
||||
fillOpacity = 20;
|
||||
spanNulls = true;
|
||||
};
|
||||
};
|
||||
overrides = [ ];
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
environment.etc."grafana-dashboards/system-overview.json" = {
|
||||
text = builtins.toJSON dashboard;
|
||||
mode = "0444";
|
||||
};
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./grafana.nix
|
||||
./prometheus.nix
|
||||
./dashboard.nix
|
||||
./exporters.nix
|
||||
./jellyfin-annotations.nix
|
||||
./zfs-scrub-annotations.nix
|
||||
];
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
inputs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
jellyfinExporterPort = service_configs.ports.private.jellyfin_exporter.port;
|
||||
qbitExporterPort = service_configs.ports.private.qbittorrent_exporter.port;
|
||||
igpuExporterPort = service_configs.ports.private.igpu_exporter.port;
|
||||
in
|
||||
{
|
||||
# -- Jellyfin Prometheus Exporter --
|
||||
# Replaces custom jellyfin-collector.nix textfile timer.
|
||||
# Exposes per-session metrics (jellyfin_now_playing_state) and library stats.
|
||||
systemd.services.jellyfin-exporter =
|
||||
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable)
|
||||
{
|
||||
description = "Prometheus exporter for Jellyfin";
|
||||
after = [
|
||||
"network.target"
|
||||
"jellyfin.service"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "jellyfin-exporter-wrapper";
|
||||
runtimeInputs = [ pkgs.jellyfin-exporter ];
|
||||
text = ''
|
||||
exec jellyfin_exporter \
|
||||
--jellyfin.address=http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port} \
|
||||
--jellyfin.token="$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")" \
|
||||
--web.listen-address=127.0.0.1:${toString jellyfinExporterPort}
|
||||
'';
|
||||
}
|
||||
);
|
||||
Restart = "on-failure";
|
||||
RestartSec = "10s";
|
||||
DynamicUser = true;
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
};
|
||||
};
|
||||
|
||||
# -- qBittorrent Prometheus Exporter --
|
||||
# Replaces custom qbittorrent-collector.nix textfile timer.
|
||||
# Exposes per-torrent metrics (qbit_dlspeed, qbit_upspeed) and aggregate stats.
|
||||
# qBittorrent runs in a VPN namespace; the exporter reaches it via namespace address.
|
||||
systemd.services.qbittorrent-exporter =
|
||||
lib.mkIf (config.services.grafana.enable && config.services.qbittorrent.enable)
|
||||
{
|
||||
description = "Prometheus exporter for qBittorrent";
|
||||
after = [
|
||||
"network.target"
|
||||
"qbittorrent.service"
|
||||
];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart =
|
||||
lib.getExe' inputs.qbittorrent-metrics-exporter.packages.${pkgs.system}.default
|
||||
"qbittorrent-metrics-exporter";
|
||||
Restart = "on-failure";
|
||||
RestartSec = "10s";
|
||||
DynamicUser = true;
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
};
|
||||
environment = {
|
||||
HOST = "127.0.0.1";
|
||||
PORT = toString qbitExporterPort;
|
||||
SCRAPE_INTERVAL = "15";
|
||||
BACKEND = "in-memory";
|
||||
# qBittorrent has AuthSubnetWhitelist=0.0.0.0/0, so no real password needed.
|
||||
# The exporter still expects the env var to be set.
|
||||
QBITTORRENT_PASSWORD = "unused";
|
||||
QBITTORRENT_USERNAME = "admin";
|
||||
TORRENT_HOSTS = "qbit:main=http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}|http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}";
|
||||
RUST_LOG = "warn";
|
||||
};
|
||||
};
|
||||
|
||||
# -- Intel GPU Prometheus Exporter --
|
||||
# Replaces custom intel-gpu-collector.nix + intel-gpu-collector.py textfile timer.
|
||||
# Exposes engine busy%, frequency, and RC6 metrics via /metrics.
|
||||
# Requires privileged access to GPU debug interfaces (intel_gpu_top).
|
||||
systemd.services.igpu-exporter = lib.mkIf config.services.grafana.enable {
|
||||
description = "Prometheus exporter for Intel integrated GPU";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.intel-gpu-tools ];
|
||||
serviceConfig = {
|
||||
ExecStart = lib.getExe pkgs.igpu-exporter;
|
||||
Restart = "on-failure";
|
||||
RestartSec = "10s";
|
||||
# intel_gpu_top requires root-level access to GPU debug interfaces
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
};
|
||||
environment = {
|
||||
PORT = toString igpuExporterPort;
|
||||
REFRESH_PERIOD_MS = "30000";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "grafana" service_configs.zpool_ssds [
|
||||
service_configs.grafana.dir
|
||||
])
|
||||
(lib.serviceFilePerms "grafana" [
|
||||
"Z ${service_configs.grafana.dir} 0700 grafana grafana"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
domain = service_configs.grafana.domain;
|
||||
port = service_configs.ports.private.grafana.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
dataDir = service_configs.grafana.dir;
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = service_configs.ports.private.grafana.port;
|
||||
domain = service_configs.grafana.domain;
|
||||
root_url = "https://${service_configs.grafana.domain}";
|
||||
};
|
||||
|
||||
database = {
|
||||
type = "postgres";
|
||||
host = service_configs.postgres.socket;
|
||||
user = "grafana";
|
||||
};
|
||||
|
||||
"auth.anonymous" = {
|
||||
enabled = true;
|
||||
org_role = "Admin";
|
||||
};
|
||||
"auth.basic".enabled = false;
|
||||
"auth".disable_login_form = true;
|
||||
|
||||
analytics.reporting_enabled = false;
|
||||
|
||||
feature_toggles.enable = "dataConnectionsConsole=false";
|
||||
|
||||
users.default_theme = "dark";
|
||||
|
||||
# Disable unused built-in integrations
|
||||
alerting.enabled = false;
|
||||
"unified_alerting".enabled = false;
|
||||
explore.enabled = false;
|
||||
news.news_feed_enabled = false;
|
||||
|
||||
plugins = {
|
||||
enable_alpha = false;
|
||||
plugin_admin_enabled = false;
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
datasources.settings = {
|
||||
apiVersion = 1;
|
||||
datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
url = "http://127.0.0.1:${toString service_configs.ports.private.prometheus.port}";
|
||||
access = "proxy";
|
||||
isDefault = true;
|
||||
editable = false;
|
||||
uid = "prometheus";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
dashboards.settings.providers = [
|
||||
{
|
||||
name = "system";
|
||||
type = "file";
|
||||
options.path = "/etc/grafana-dashboards";
|
||||
disableDeletion = true;
|
||||
updateIntervalSeconds = 60;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
ensureDatabases = [ "grafana" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "grafana";
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses.login = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable) (
|
||||
lib.mkGrafanaAnnotationService {
|
||||
name = "jellyfin";
|
||||
description = "Jellyfin stream annotation service for Grafana";
|
||||
script = ./jellyfin-annotations.py;
|
||||
environment = {
|
||||
JELLYFIN_URL = "http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port}";
|
||||
POLL_INTERVAL = "30";
|
||||
};
|
||||
loadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
}
|
||||
)
|
||||
@@ -1,233 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
JELLYFIN_URL = os.environ.get("JELLYFIN_URL", "http://127.0.0.1:8096")
|
||||
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
|
||||
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/jellyfin-annotations/state.json")
|
||||
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "30"))
|
||||
|
||||
|
||||
def get_api_key():
|
||||
cred_dir = os.environ.get("CREDENTIALS_DIRECTORY")
|
||||
if cred_dir:
|
||||
return Path(cred_dir, "jellyfin-api-key").read_text().strip()
|
||||
for p in ["/run/agenix/jellyfin-api-key"]:
|
||||
if Path(p).exists():
|
||||
return Path(p).read_text().strip()
|
||||
sys.exit("ERROR: Cannot find jellyfin-api-key")
|
||||
|
||||
|
||||
def http_json(method, url, body=None):
|
||||
data = json.dumps(body).encode() if body is not None else None
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json", "Accept": "application/json"},
|
||||
method=method,
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
def get_active_sessions(api_key):
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{JELLYFIN_URL}/Sessions?api_key={api_key}",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
sessions = json.loads(resp.read())
|
||||
return [s for s in sessions if s.get("NowPlayingItem")]
|
||||
except Exception as e:
|
||||
print(f"Error fetching sessions: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def _codec(name):
|
||||
if not name:
|
||||
return ""
|
||||
aliases = {"h264": "H.264", "h265": "H.265", "hevc": "H.265", "av1": "AV1",
|
||||
"vp9": "VP9", "vp8": "VP8", "mpeg4": "MPEG-4", "mpeg2video": "MPEG-2",
|
||||
"aac": "AAC", "ac3": "AC3", "eac3": "EAC3", "dts": "DTS",
|
||||
"truehd": "TrueHD", "mp3": "MP3", "opus": "Opus", "flac": "FLAC",
|
||||
"vorbis": "Vorbis"}
|
||||
return aliases.get(name.lower(), name.upper())
|
||||
|
||||
|
||||
def _res(width, height):
|
||||
if not height:
|
||||
return ""
|
||||
common = {2160: "4K", 1440: "1440p", 1080: "1080p", 720: "720p",
|
||||
480: "480p", 360: "360p"}
|
||||
return common.get(height, f"{height}p")
|
||||
|
||||
|
||||
def _channels(n):
|
||||
labels = {1: "Mono", 2: "Stereo", 6: "5.1", 7: "6.1", 8: "7.1"}
|
||||
return labels.get(n, f"{n}ch") if n else ""
|
||||
|
||||
|
||||
def format_label(session):
|
||||
user = session.get("UserName", "Unknown")
|
||||
item = session.get("NowPlayingItem", {}) or {}
|
||||
transcode = session.get("TranscodingInfo") or {}
|
||||
play_state = session.get("PlayState") or {}
|
||||
client = session.get("Client", "")
|
||||
device = session.get("DeviceName", "")
|
||||
|
||||
name = item.get("Name", "Unknown")
|
||||
series = item.get("SeriesName", "")
|
||||
season = item.get("ParentIndexNumber")
|
||||
episode = item.get("IndexNumber")
|
||||
media_type = item.get("Type", "")
|
||||
|
||||
if series and season and episode:
|
||||
title = f"{series} S{season:02d}E{episode:02d} \u2013 {name}"
|
||||
elif series:
|
||||
title = f"{series} \u2013 {name}"
|
||||
elif media_type == "Movie":
|
||||
title = f"{name} (movie)"
|
||||
else:
|
||||
title = name
|
||||
|
||||
play_method = play_state.get("PlayMethod", "")
|
||||
if play_method == "DirectPlay":
|
||||
method = "Direct Play"
|
||||
elif play_method == "DirectStream":
|
||||
method = "Direct Stream"
|
||||
elif play_method == "Transcode" or transcode:
|
||||
method = "Transcode"
|
||||
else:
|
||||
method = "Direct Play"
|
||||
|
||||
media_streams = item.get("MediaStreams") or []
|
||||
video_streams = [s for s in media_streams if s.get("Type") == "Video"]
|
||||
audio_streams = [s for s in media_streams if s.get("Type") == "Audio"]
|
||||
default_audio = next((s for s in audio_streams if s.get("IsDefault")), None)
|
||||
audio_stream = default_audio or (audio_streams[0] if audio_streams else {})
|
||||
video_stream = video_streams[0] if video_streams else {}
|
||||
|
||||
src_vcodec = _codec(video_stream.get("Codec", ""))
|
||||
src_res = _res(video_stream.get("Width") or item.get("Width"),
|
||||
video_stream.get("Height") or item.get("Height"))
|
||||
src_acodec = _codec(audio_stream.get("Codec", ""))
|
||||
src_channels = _channels(audio_stream.get("Channels"))
|
||||
|
||||
is_video_direct = transcode.get("IsVideoDirect", True)
|
||||
is_audio_direct = transcode.get("IsAudioDirect", True)
|
||||
|
||||
if transcode and not is_video_direct:
|
||||
dst_vcodec = _codec(transcode.get("VideoCodec", ""))
|
||||
dst_res = _res(transcode.get("Width"), transcode.get("Height")) or src_res
|
||||
if src_vcodec and dst_vcodec and src_vcodec != dst_vcodec:
|
||||
video_part = f"{src_vcodec}\u2192{dst_vcodec} {dst_res}".strip()
|
||||
else:
|
||||
video_part = f"{dst_vcodec or src_vcodec} {dst_res}".strip()
|
||||
else:
|
||||
video_part = f"{src_vcodec} {src_res}".strip()
|
||||
|
||||
if transcode and not is_audio_direct:
|
||||
dst_acodec = _codec(transcode.get("AudioCodec", ""))
|
||||
dst_channels = _channels(transcode.get("AudioChannels")) or src_channels
|
||||
if src_acodec and dst_acodec and src_acodec != dst_acodec:
|
||||
audio_part = f"{src_acodec}\u2192{dst_acodec} {dst_channels}".strip()
|
||||
else:
|
||||
audio_part = f"{dst_acodec or src_acodec} {dst_channels}".strip()
|
||||
else:
|
||||
audio_part = f"{src_acodec} {src_channels}".strip()
|
||||
|
||||
bitrate = transcode.get("Bitrate") or item.get("Bitrate")
|
||||
bitrate_part = f"{bitrate / 1_000_000:.1f} Mbps" if bitrate else ""
|
||||
|
||||
reasons = transcode.get("TranscodeReasons") or []
|
||||
reason_part = f"[{', '.join(reasons)}]" if reasons else ""
|
||||
|
||||
stream_parts = [p for p in [method, video_part, audio_part, bitrate_part, reason_part] if p]
|
||||
client_str = " \u00b7 ".join(filter(None, [client, device]))
|
||||
|
||||
lines = [f"{user}: {title}", " | ".join(stream_parts)]
|
||||
if client_str:
|
||||
lines.append(client_str)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def load_state():
|
||||
try:
|
||||
with open(STATE_FILE) as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def save_state(state):
|
||||
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
|
||||
tmp = STATE_FILE + ".tmp"
|
||||
with open(tmp, "w") as f:
|
||||
json.dump(state, f)
|
||||
os.replace(tmp, STATE_FILE)
|
||||
|
||||
|
||||
def grafana_post(label, start_ms):
|
||||
try:
|
||||
result = http_json(
|
||||
"POST",
|
||||
f"{GRAFANA_URL}/api/annotations",
|
||||
{"time": start_ms, "text": label, "tags": ["jellyfin"]},
|
||||
)
|
||||
return result.get("id")
|
||||
except Exception as e:
|
||||
print(f"Error posting annotation: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def grafana_close(grafana_id, end_ms):
|
||||
try:
|
||||
http_json(
|
||||
"PATCH",
|
||||
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
|
||||
{"timeEnd": end_ms},
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
|
||||
|
||||
|
||||
def main():
|
||||
api_key = get_api_key()
|
||||
state = load_state()
|
||||
|
||||
while True:
|
||||
now_ms = int(time.time() * 1000)
|
||||
sessions = get_active_sessions(api_key)
|
||||
|
||||
if sessions is not None:
|
||||
current_ids = {s["Id"] for s in sessions}
|
||||
|
||||
for s in sessions:
|
||||
sid = s["Id"]
|
||||
if sid not in state:
|
||||
label = format_label(s)
|
||||
grafana_id = grafana_post(label, now_ms)
|
||||
if grafana_id is not None:
|
||||
state[sid] = {
|
||||
"grafana_id": grafana_id,
|
||||
"label": label,
|
||||
"start_ms": now_ms,
|
||||
}
|
||||
save_state(state)
|
||||
|
||||
for sid in [k for k in state if k not in current_ids]:
|
||||
info = state.pop(sid)
|
||||
grafana_close(info["grafana_id"], now_ms)
|
||||
save_state(state)
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,110 +0,0 @@
|
||||
{
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "prometheus" service_configs.zpool_ssds [
|
||||
"/var/lib/prometheus"
|
||||
])
|
||||
(lib.serviceFilePerms "prometheus" [
|
||||
"Z /var/lib/prometheus 0700 prometheus prometheus"
|
||||
])
|
||||
];
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
stateDir = "prometheus";
|
||||
retentionTime = "0d"; # 0 disables time-based retention (keep forever)
|
||||
|
||||
exporters = {
|
||||
node = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus_node.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
enabledCollectors = [
|
||||
"hwmon"
|
||||
"systemd"
|
||||
"textfile"
|
||||
];
|
||||
extraFlags = [
|
||||
"--collector.textfile.directory=${textfileDir}"
|
||||
];
|
||||
};
|
||||
|
||||
apcupsd = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus_apcupsd.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
apcupsdAddress = "127.0.0.1:3551";
|
||||
};
|
||||
|
||||
zfs = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.prometheus_zfs.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_node.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "apcupsd";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_apcupsd.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "llama-cpp";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "jellyfin";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.jellyfin_exporter.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "qbittorrent";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.qbittorrent_exporter.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "igpu";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.igpu_exporter.port}" ]; }
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "zfs";
|
||||
static_configs = [
|
||||
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_zfs.port}" ]; }
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${textfileDir} 0755 root root -"
|
||||
];
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
grafanaUrl = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
||||
|
||||
script = pkgs.writeShellApplication {
|
||||
name = "zfs-scrub-annotations";
|
||||
runtimeInputs = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
coreutils
|
||||
gnugrep
|
||||
gnused
|
||||
config.boot.zfs.package
|
||||
];
|
||||
text = builtins.readFile ./zfs-scrub-annotations.sh;
|
||||
};
|
||||
in
|
||||
lib.mkIf (config.services.grafana.enable && config.services.zfs.autoScrub.enable) {
|
||||
systemd.services.zfs-scrub = {
|
||||
environment = {
|
||||
GRAFANA_URL = grafanaUrl;
|
||||
STATE_DIR = "/run/zfs-scrub-annotations";
|
||||
};
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "zfs-scrub-annotations";
|
||||
ExecStartPre = [ "-${lib.getExe script} start" ];
|
||||
ExecStopPost = [ "${lib.getExe script} stop" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# ZFS scrub annotation script for Grafana
|
||||
# Usage: zfs-scrub-annotations.sh {start|stop}
|
||||
# Required env: GRAFANA_URL, STATE_DIR
|
||||
# Required on PATH: zpool, curl, jq, paste, date, grep, sed
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
ACTION="${1:-}"
|
||||
GRAFANA_URL="${GRAFANA_URL:?GRAFANA_URL required}"
|
||||
STATE_DIR="${STATE_DIR:?STATE_DIR required}"
|
||||
|
||||
case "$ACTION" in
|
||||
start)
|
||||
POOLS=$(zpool list -H -o name | paste -sd ', ')
|
||||
NOW_MS=$(date +%s%3N)
|
||||
|
||||
RESPONSE=$(curl -sf --max-time 5 \
|
||||
-X POST "$GRAFANA_URL/api/annotations" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg text "ZFS scrub: $POOLS" --argjson time "$NOW_MS" \
|
||||
'{time: $time, text: $text, tags: ["zfs-scrub"]}')" \
|
||||
) || exit 0
|
||||
|
||||
echo "$RESPONSE" | jq -r '.id' > "$STATE_DIR/annotation-id"
|
||||
;;
|
||||
|
||||
stop)
|
||||
ANN_ID=$(cat "$STATE_DIR/annotation-id" 2>/dev/null) || exit 0
|
||||
[ -z "$ANN_ID" ] && exit 0
|
||||
|
||||
NOW_MS=$(date +%s%3N)
|
||||
|
||||
RESULTS=""
|
||||
while IFS= read -r pool; do
|
||||
scan_line=$(zpool status "$pool" | grep "scan:" | sed 's/^[[:space:]]*//')
|
||||
RESULTS="${RESULTS}${pool}: ${scan_line}"$'\n'
|
||||
done < <(zpool list -H -o name)
|
||||
|
||||
TEXT=$(printf "ZFS scrub completed\n%s" "$RESULTS")
|
||||
|
||||
curl -sf --max-time 5 \
|
||||
-X PATCH "$GRAFANA_URL/api/annotations/$ANN_ID" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$(jq -n --arg text "$TEXT" --argjson timeEnd "$NOW_MS" \
|
||||
'{timeEnd: $timeEnd, text: $text}')" || true
|
||||
|
||||
rm -f "$STATE_DIR/annotation-id"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
service_configs,
|
||||
inputs,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
graphing-calculator =
|
||||
inputs.ytbn-graphing-software.packages.${pkgs.stdenv.targetPlatform.system}.web;
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."graphing.${service_configs.https.domain}".extraConfig = ''
|
||||
root * ${graphing-calculator}
|
||||
file_server browse
|
||||
'';
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceFilePerms "harmonia" [
|
||||
"Z /run/agenix/harmonia-sign-key 0400 harmonia harmonia"
|
||||
])
|
||||
];
|
||||
|
||||
services.harmonia = {
|
||||
enable = true;
|
||||
signKeyPaths = [ config.age.secrets.harmonia-sign-key.path ];
|
||||
settings.bind = "127.0.0.1:${toString service_configs.ports.private.harmonia.port}";
|
||||
};
|
||||
|
||||
# serve latest deploy store paths (unauthenticated — just a path string)
|
||||
# CI writes to /var/lib/dotfiles-deploy/<hostname> after building
|
||||
services.caddy.virtualHosts."nix-cache.${service_configs.https.domain}".extraConfig = ''
|
||||
handle_path /deploy/* {
|
||||
root * /var/lib/dotfiles-deploy
|
||||
file_server
|
||||
}
|
||||
|
||||
handle {
|
||||
import ${config.age.secrets.nix-cache-auth.path}
|
||||
reverse_proxy :${toString service_configs.ports.private.harmonia.port}
|
||||
}
|
||||
'';
|
||||
|
||||
# directory for CI to record latest deploy store paths
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/dotfiles-deploy 0755 gitea-runner gitea-runner"
|
||||
];
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
{
|
||||
service_configs,
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "immich-server" service_configs.zpool_ssds [
|
||||
config.services.immich.mediaLocation
|
||||
])
|
||||
(lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [
|
||||
config.services.immich.mediaLocation
|
||||
])
|
||||
(lib.serviceFilePerms "immich-server" [
|
||||
"Z ${config.services.immich.mediaLocation} 0770 ${config.services.immich.user} ${config.services.immich.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "immich";
|
||||
port = service_configs.ports.private.immich.port;
|
||||
})
|
||||
(lib.mkFail2banJail {
|
||||
name = "immich";
|
||||
unitName = "immich-server.service";
|
||||
failregex = "^.*Failed login attempt for user .* from ip address <HOST>.*$";
|
||||
})
|
||||
];
|
||||
|
||||
services.immich = {
|
||||
enable = true;
|
||||
mediaLocation = service_configs.immich.dir;
|
||||
port = service_configs.ports.private.immich.port;
|
||||
# openFirewall = true;
|
||||
host = "0.0.0.0";
|
||||
database = {
|
||||
createDB = false;
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
immich-go
|
||||
];
|
||||
|
||||
users.users.${config.services.immich.user}.extraGroups = [
|
||||
"video"
|
||||
"render"
|
||||
];
|
||||
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./jellyfin.nix
|
||||
./jellyfin-qbittorrent-monitor.nix
|
||||
];
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
service_configs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
webhookPlugin = import ./jellyfin-webhook-plugin.nix { inherit pkgs lib; };
|
||||
jellyfinPort = service_configs.ports.private.jellyfin.port;
|
||||
webhookPort = service_configs.ports.private.jellyfin_qbittorrent_monitor_webhook.port;
|
||||
in
|
||||
lib.mkIf config.services.jellyfin.enable {
|
||||
# Materialise the Jellyfin Webhook plugin into Jellyfin's plugins dir before
|
||||
# Jellyfin starts. Jellyfin rewrites meta.json at runtime, so a read-only
|
||||
# nix-store symlink would EACCES -- we copy instead.
|
||||
#
|
||||
# `wantedBy = [ "jellyfin.service" ]` alone is insufficient on initial rollout:
|
||||
# if jellyfin is already running at activation time, systemd won't start the
|
||||
# oneshot until the next jellyfin restart. `restartTriggers` on jellyfin pinned
|
||||
# to the plugin package + install script forces that restart whenever either
|
||||
# changes, which invokes this unit via the `before`/`wantedBy` chain.
|
||||
systemd.services.jellyfin-webhook-install = {
|
||||
before = [ "jellyfin.service" ];
|
||||
wantedBy = [ "jellyfin.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
User = config.services.jellyfin.user;
|
||||
Group = config.services.jellyfin.group;
|
||||
ExecStart = webhookPlugin.mkInstallScript {
|
||||
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.jellyfin.restartTriggers = [
|
||||
webhookPlugin.package
|
||||
(webhookPlugin.mkInstallScript {
|
||||
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
|
||||
})
|
||||
];
|
||||
|
||||
# After Jellyfin starts, POST the plugin configuration so the webhook
|
||||
# targets the monitor's receiver. Idempotent; runs on every boot.
|
||||
systemd.services.jellyfin-webhook-configure = {
|
||||
after = [ "jellyfin.service" ];
|
||||
wants = [ "jellyfin.service" ];
|
||||
before = [ "jellyfin-qbittorrent-monitor.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
DynamicUser = true;
|
||||
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
ExecStart = webhookPlugin.mkConfigureScript {
|
||||
jellyfinUrl = "http://127.0.0.1:${toString jellyfinPort}";
|
||||
webhooks = [
|
||||
{
|
||||
name = "qBittorrent Monitor";
|
||||
uri = "http://127.0.0.1:${toString webhookPort}/";
|
||||
notificationTypes = [
|
||||
"PlaybackStart"
|
||||
"PlaybackProgress"
|
||||
"PlaybackStop"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."jellyfin-qbittorrent-monitor" = {
|
||||
description = "Monitor Jellyfin streaming and control qBittorrent rate limits";
|
||||
after = [
|
||||
"network.target"
|
||||
"jellyfin.service"
|
||||
"qbittorrent.service"
|
||||
"jellyfin-webhook-configure.service"
|
||||
];
|
||||
wants = [ "jellyfin-webhook-configure.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = pkgs.writeShellScript "jellyfin-monitor-start" ''
|
||||
export JELLYFIN_API_KEY=$(cat $CREDENTIALS_DIRECTORY/jellyfin-api-key)
|
||||
exec ${
|
||||
pkgs.python3.withPackages (ps: with ps; [ requests ])
|
||||
}/bin/python ${./jellyfin-qbittorrent-monitor.py}
|
||||
'';
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
|
||||
# Security hardening
|
||||
DynamicUser = true;
|
||||
NoNewPrivileges = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RemoveIPC = true;
|
||||
|
||||
# Load credentials from agenix secrets
|
||||
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
|
||||
};
|
||||
|
||||
environment = {
|
||||
JELLYFIN_URL = "http://localhost:${builtins.toString jellyfinPort}";
|
||||
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
|
||||
CHECK_INTERVAL = "30";
|
||||
# Bandwidth budget configuration
|
||||
TOTAL_BANDWIDTH_BUDGET = "30000000"; # 30 Mbps in bits per second
|
||||
SERVICE_BUFFER = "5000000"; # 5 Mbps reserved for other services (bps)
|
||||
DEFAULT_STREAM_BITRATE = "10000000"; # 10 Mbps fallback when bitrate unknown (bps)
|
||||
MIN_TORRENT_SPEED = "100"; # KB/s - below this, pause torrents instead
|
||||
STREAM_BITRATE_HEADROOM = "1.1"; # multiplier per stream for bitrate fluctuations
|
||||
# Webhook receiver: Jellyfin Webhook plugin POSTs events here to throttle immediately.
|
||||
WEBHOOK_BIND = "127.0.0.1";
|
||||
WEBHOOK_PORT = toString webhookPort;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,504 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import time
|
||||
import logging
|
||||
import sys
|
||||
import signal
|
||||
import json
|
||||
import ipaddress
|
||||
import threading
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServiceUnavailable(Exception):
|
||||
"""Raised when a monitored service is temporarily unavailable."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class JellyfinQBittorrentMonitor:
|
||||
def __init__(
|
||||
self,
|
||||
jellyfin_url="http://localhost:8096",
|
||||
qbittorrent_url="http://localhost:8080",
|
||||
check_interval=30,
|
||||
jellyfin_api_key=None,
|
||||
streaming_start_delay=10,
|
||||
streaming_stop_delay=60,
|
||||
total_bandwidth_budget=30000000,
|
||||
service_buffer=5000000,
|
||||
default_stream_bitrate=10000000,
|
||||
min_torrent_speed=100,
|
||||
stream_bitrate_headroom=1.1,
|
||||
webhook_port=0,
|
||||
webhook_bind="127.0.0.1",
|
||||
):
|
||||
self.jellyfin_url = jellyfin_url
|
||||
self.qbittorrent_url = qbittorrent_url
|
||||
self.check_interval = check_interval
|
||||
self.jellyfin_api_key = jellyfin_api_key
|
||||
self.total_bandwidth_budget = total_bandwidth_budget
|
||||
self.service_buffer = service_buffer
|
||||
self.default_stream_bitrate = default_stream_bitrate
|
||||
self.min_torrent_speed = min_torrent_speed
|
||||
self.stream_bitrate_headroom = stream_bitrate_headroom
|
||||
self.last_streaming_state = None
|
||||
self.current_state = "unlimited"
|
||||
self.torrents_paused = False
|
||||
self.last_alt_limits = None
|
||||
self.running = True
|
||||
self.session = requests.Session() # Use session for cookies
|
||||
self.last_active_streams = []
|
||||
|
||||
# Hysteresis settings to prevent rapid switching
|
||||
self.streaming_start_delay = streaming_start_delay
|
||||
self.streaming_stop_delay = streaming_stop_delay
|
||||
self.last_state_change = 0
|
||||
|
||||
# Webhook receiver: allows Jellyfin to push events instead of waiting for the poll
|
||||
self.webhook_port = webhook_port
|
||||
self.webhook_bind = webhook_bind
|
||||
self.wake_event = threading.Event()
|
||||
self.webhook_server = None
|
||||
|
||||
# Local network ranges (RFC 1918 private networks + localhost)
|
||||
self.local_networks = [
|
||||
ipaddress.ip_network("10.0.0.0/8"),
|
||||
ipaddress.ip_network("172.16.0.0/12"),
|
||||
ipaddress.ip_network("192.168.0.0/16"),
|
||||
ipaddress.ip_network("127.0.0.0/8"),
|
||||
ipaddress.ip_network("::1/128"), # IPv6 localhost
|
||||
ipaddress.ip_network("fe80::/10"), # IPv6 link-local
|
||||
]
|
||||
|
||||
def is_local_ip(self, ip_address: str) -> bool:
|
||||
"""Check if an IP address is from a local network"""
|
||||
try:
|
||||
ip = ipaddress.ip_address(ip_address)
|
||||
return any(ip in network for network in self.local_networks)
|
||||
except ValueError:
|
||||
logger.warning(f"Invalid IP address format: {ip_address}")
|
||||
return True # Treat invalid IPs as local for safety
|
||||
|
||||
def signal_handler(self, signum, frame):
|
||||
logger.info("Received shutdown signal, cleaning up...")
|
||||
self.running = False
|
||||
if self.webhook_server is not None:
|
||||
# shutdown() blocks until serve_forever returns; run from a thread so we don't deadlock
|
||||
threading.Thread(target=self.webhook_server.shutdown, daemon=True).start()
|
||||
self.restore_normal_limits()
|
||||
sys.exit(0)
|
||||
|
||||
def wake(self) -> None:
|
||||
"""Signal the main loop to re-evaluate state immediately."""
|
||||
self.wake_event.set()
|
||||
|
||||
def sleep_or_wake(self, seconds: float) -> None:
|
||||
"""Wait up to `seconds`, returning early if a webhook wakes the loop."""
|
||||
self.wake_event.wait(seconds)
|
||||
self.wake_event.clear()
|
||||
|
||||
def start_webhook_server(self) -> None:
|
||||
"""Start a background HTTP server that wakes the monitor on any POST."""
|
||||
if not self.webhook_port:
|
||||
return
|
||||
|
||||
monitor = self
|
||||
|
||||
class WebhookHandler(BaseHTTPRequestHandler):
|
||||
def do_POST(self): # noqa: N802
|
||||
length = int(self.headers.get("Content-Length", "0") or "0")
|
||||
body = self.rfile.read(min(length, 65536)) if length else b""
|
||||
event = "unknown"
|
||||
try:
|
||||
if body:
|
||||
event = json.loads(body).get("NotificationType", "unknown")
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass
|
||||
logger.info(f"Webhook received: {event}")
|
||||
self.send_response(204)
|
||||
self.end_headers()
|
||||
monitor.wake()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
return # suppress default access log
|
||||
|
||||
self.webhook_server = HTTPServer(
|
||||
(self.webhook_bind, self.webhook_port), WebhookHandler
|
||||
)
|
||||
threading.Thread(
|
||||
target=self.webhook_server.serve_forever, daemon=True, name="webhook-server"
|
||||
).start()
|
||||
logger.info(
|
||||
f"Webhook receiver listening on http://{self.webhook_bind}:{self.webhook_port}"
|
||||
)
|
||||
|
||||
def check_jellyfin_sessions(self) -> list[dict]:
|
||||
headers = (
|
||||
{"X-Emby-Token": self.jellyfin_api_key} if self.jellyfin_api_key else {}
|
||||
)
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.jellyfin_url}/Sessions", headers=headers, timeout=10
|
||||
)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to check Jellyfin sessions: {e}")
|
||||
raise ServiceUnavailable(f"Jellyfin unavailable: {e}") from e
|
||||
|
||||
try:
|
||||
sessions = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to parse Jellyfin response: {e}")
|
||||
raise ServiceUnavailable(f"Jellyfin returned invalid JSON: {e}") from e
|
||||
|
||||
active_streams = []
|
||||
for session in sessions:
|
||||
if (
|
||||
"NowPlayingItem" in session
|
||||
and not session.get("PlayState", {}).get("IsPaused", True)
|
||||
and not self.is_local_ip(session.get("RemoteEndPoint", ""))
|
||||
):
|
||||
item = session["NowPlayingItem"]
|
||||
item_type = item.get("Type", "").lower()
|
||||
if item_type in ["movie", "episode", "video"]:
|
||||
user = session.get("UserName", "Unknown")
|
||||
stream_name = f"{user}: {item.get('Name', 'Unknown')}"
|
||||
if session.get("TranscodingInfo") and session[
|
||||
"TranscodingInfo"
|
||||
].get("Bitrate"):
|
||||
bitrate = session["TranscodingInfo"]["Bitrate"]
|
||||
elif item.get("Bitrate"):
|
||||
bitrate = item["Bitrate"]
|
||||
elif item.get("MediaSources", [{}])[0].get("Bitrate"):
|
||||
bitrate = item["MediaSources"][0]["Bitrate"]
|
||||
else:
|
||||
bitrate = self.default_stream_bitrate
|
||||
|
||||
bitrate = min(int(bitrate), 100_000_000)
|
||||
# Add headroom to account for bitrate fluctuations
|
||||
bitrate = int(bitrate * self.stream_bitrate_headroom)
|
||||
active_streams.append({"name": stream_name, "bitrate_bps": bitrate})
|
||||
|
||||
return active_streams
|
||||
|
||||
def check_qbittorrent_alternate_limits(self) -> bool:
|
||||
try:
|
||||
response = self.session.get(
|
||||
f"{self.qbittorrent_url}/api/v2/transfer/speedLimitsMode", timeout=10
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.text.strip() == "1"
|
||||
else:
|
||||
logger.warning(
|
||||
f"SpeedLimitsMode endpoint returned HTTP {response.status_code}"
|
||||
)
|
||||
raise ServiceUnavailable(
|
||||
f"qBittorrent returned HTTP {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"SpeedLimitsMode endpoint failed: {e}")
|
||||
raise ServiceUnavailable(f"qBittorrent unavailable: {e}") from e
|
||||
|
||||
def use_alt_limits(self, enable: bool) -> None:
|
||||
action = "enabled" if enable else "disabled"
|
||||
try:
|
||||
current_throttle = self.check_qbittorrent_alternate_limits()
|
||||
|
||||
if current_throttle == enable:
|
||||
logger.debug(
|
||||
f"Alternate speed limits already {action}, no action needed"
|
||||
)
|
||||
return
|
||||
|
||||
response = self.session.post(
|
||||
f"{self.qbittorrent_url}/api/v2/transfer/toggleSpeedLimitsMode",
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
new_state = self.check_qbittorrent_alternate_limits()
|
||||
if new_state == enable:
|
||||
logger.info(f"Alternate speed limits {action}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Toggle may have failed: expected {enable}, got {new_state}"
|
||||
)
|
||||
|
||||
except ServiceUnavailable:
|
||||
logger.warning(
|
||||
f"qBittorrent unavailable, cannot {action} alternate speed limits"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to {action} alternate speed limits: {e}")
|
||||
|
||||
def pause_all_torrents(self) -> None:
|
||||
try:
|
||||
response = self.session.post(
|
||||
f"{self.qbittorrent_url}/api/v2/torrents/stop",
|
||||
data={"hashes": "all"},
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to pause torrents: {e}")
|
||||
|
||||
def resume_all_torrents(self) -> None:
|
||||
try:
|
||||
response = self.session.post(
|
||||
f"{self.qbittorrent_url}/api/v2/torrents/start",
|
||||
data={"hashes": "all"},
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to resume torrents: {e}")
|
||||
|
||||
def set_alt_speed_limits(self, dl_kbs: float, ul_kbs: float) -> None:
|
||||
try:
|
||||
payload = {
|
||||
"alt_dl_limit": int(dl_kbs * 1024),
|
||||
"alt_up_limit": int(ul_kbs * 1024),
|
||||
}
|
||||
response = self.session.post(
|
||||
f"{self.qbittorrent_url}/api/v2/app/setPreferences",
|
||||
data={"json": json.dumps(payload)},
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
self.last_alt_limits = (dl_kbs, ul_kbs)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to set alternate speed limits: {e}")
|
||||
|
||||
def restore_normal_limits(self) -> None:
|
||||
if self.torrents_paused:
|
||||
logger.info("Resuming all torrents before shutdown...")
|
||||
self.resume_all_torrents()
|
||||
self.torrents_paused = False
|
||||
|
||||
if self.current_state != "unlimited":
|
||||
logger.info("Restoring normal speed limits before shutdown...")
|
||||
self.use_alt_limits(False)
|
||||
self.current_state = "unlimited"
|
||||
|
||||
def sync_qbittorrent_state(self) -> None:
|
||||
try:
|
||||
if self.current_state == "unlimited":
|
||||
actual_state = self.check_qbittorrent_alternate_limits()
|
||||
if actual_state:
|
||||
logger.warning(
|
||||
"qBittorrent state mismatch detected: expected alt speed OFF, got ON. Re-syncing..."
|
||||
)
|
||||
self.use_alt_limits(False)
|
||||
elif self.current_state == "throttled":
|
||||
if self.last_alt_limits:
|
||||
self.set_alt_speed_limits(*self.last_alt_limits)
|
||||
actual_state = self.check_qbittorrent_alternate_limits()
|
||||
if not actual_state:
|
||||
logger.warning(
|
||||
"qBittorrent state mismatch detected: expected alt speed ON, got OFF. Re-syncing..."
|
||||
)
|
||||
self.use_alt_limits(True)
|
||||
elif self.current_state == "paused":
|
||||
self.pause_all_torrents()
|
||||
self.torrents_paused = True
|
||||
except ServiceUnavailable:
|
||||
pass
|
||||
|
||||
def should_change_state(self, new_streaming_state: bool) -> bool:
|
||||
"""Apply hysteresis to prevent rapid state changes"""
|
||||
now = time.time()
|
||||
|
||||
if new_streaming_state == self.last_streaming_state:
|
||||
return False
|
||||
|
||||
time_since_change = now - self.last_state_change
|
||||
|
||||
if new_streaming_state and not self.last_streaming_state:
|
||||
if time_since_change >= self.streaming_start_delay:
|
||||
self.last_state_change = now
|
||||
return True
|
||||
else:
|
||||
remaining = self.streaming_start_delay - time_since_change
|
||||
logger.info(
|
||||
f"Streaming started - waiting {remaining:.1f}s before enforcing limits"
|
||||
)
|
||||
|
||||
elif not new_streaming_state and self.last_streaming_state:
|
||||
if time_since_change >= self.streaming_stop_delay:
|
||||
self.last_state_change = now
|
||||
return True
|
||||
else:
|
||||
remaining = self.streaming_stop_delay - time_since_change
|
||||
logger.info(
|
||||
f"Streaming stopped - waiting {remaining:.1f}s before restoring unlimited mode"
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
logger.info("Starting Jellyfin-qBittorrent monitor")
|
||||
logger.info(f"Jellyfin URL: {self.jellyfin_url}")
|
||||
logger.info(f"qBittorrent URL: {self.qbittorrent_url}")
|
||||
logger.info(f"Check interval: {self.check_interval}s")
|
||||
logger.info(f"Streaming start delay: {self.streaming_start_delay}s")
|
||||
logger.info(f"Streaming stop delay: {self.streaming_stop_delay}s")
|
||||
logger.info(f"Total bandwidth budget: {self.total_bandwidth_budget} bps")
|
||||
logger.info(f"Service buffer: {self.service_buffer} bps")
|
||||
logger.info(f"Default stream bitrate: {self.default_stream_bitrate} bps")
|
||||
logger.info(f"Minimum torrent speed: {self.min_torrent_speed} KB/s")
|
||||
logger.info(f"Stream bitrate headroom: {self.stream_bitrate_headroom}x")
|
||||
if self.webhook_port:
|
||||
logger.info(f"Webhook receiver: {self.webhook_bind}:{self.webhook_port}")
|
||||
|
||||
signal.signal(signal.SIGINT, self.signal_handler)
|
||||
signal.signal(signal.SIGTERM, self.signal_handler)
|
||||
|
||||
self.start_webhook_server()
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
self.sync_qbittorrent_state()
|
||||
|
||||
try:
|
||||
active_streams = self.check_jellyfin_sessions()
|
||||
except ServiceUnavailable:
|
||||
logger.warning("Jellyfin unavailable, maintaining current state")
|
||||
self.sleep_or_wake(self.check_interval)
|
||||
continue
|
||||
|
||||
streaming_active = len(active_streams) > 0
|
||||
|
||||
if active_streams:
|
||||
for stream in active_streams:
|
||||
logger.debug(
|
||||
f"Active stream: {stream['name']} ({stream['bitrate_bps']} bps)"
|
||||
)
|
||||
|
||||
if active_streams != self.last_active_streams:
|
||||
if streaming_active:
|
||||
stream_names = ", ".join(
|
||||
stream["name"] for stream in active_streams
|
||||
)
|
||||
logger.info(
|
||||
f"Active streams ({len(active_streams)}): {stream_names}"
|
||||
)
|
||||
elif len(active_streams) == 0 and self.last_streaming_state:
|
||||
logger.info("No active streaming sessions")
|
||||
|
||||
if self.should_change_state(streaming_active):
|
||||
self.last_streaming_state = streaming_active
|
||||
|
||||
streaming_state = bool(self.last_streaming_state)
|
||||
total_streaming_bps = sum(
|
||||
stream["bitrate_bps"] for stream in active_streams
|
||||
)
|
||||
remaining_bps = (
|
||||
self.total_bandwidth_budget
|
||||
- self.service_buffer
|
||||
- total_streaming_bps
|
||||
)
|
||||
remaining_kbs = max(0, remaining_bps) / 8 / 1024
|
||||
|
||||
if not streaming_state:
|
||||
desired_state = "unlimited"
|
||||
elif streaming_active:
|
||||
if remaining_kbs >= self.min_torrent_speed:
|
||||
desired_state = "throttled"
|
||||
else:
|
||||
desired_state = "paused"
|
||||
else:
|
||||
desired_state = self.current_state
|
||||
|
||||
if desired_state != self.current_state:
|
||||
if desired_state == "unlimited":
|
||||
action = "resume torrents, disable alt speed"
|
||||
elif desired_state == "throttled":
|
||||
action = (
|
||||
"set alt limits "
|
||||
f"dl={int(remaining_kbs)}KB/s ul={int(remaining_kbs)}KB/s, enable alt speed"
|
||||
)
|
||||
else:
|
||||
action = "pause torrents"
|
||||
|
||||
logger.info(
|
||||
"State change %s -> %s | streams=%d total_bps=%d remaining_bps=%d action=%s",
|
||||
self.current_state,
|
||||
desired_state,
|
||||
len(active_streams),
|
||||
total_streaming_bps,
|
||||
remaining_bps,
|
||||
action,
|
||||
)
|
||||
|
||||
if desired_state == "unlimited":
|
||||
if self.torrents_paused:
|
||||
self.resume_all_torrents()
|
||||
self.torrents_paused = False
|
||||
self.use_alt_limits(False)
|
||||
elif desired_state == "throttled":
|
||||
if self.torrents_paused:
|
||||
self.resume_all_torrents()
|
||||
self.torrents_paused = False
|
||||
self.set_alt_speed_limits(remaining_kbs, remaining_kbs)
|
||||
self.use_alt_limits(True)
|
||||
else:
|
||||
if not self.torrents_paused:
|
||||
self.pause_all_torrents()
|
||||
self.torrents_paused = True
|
||||
|
||||
self.current_state = desired_state
|
||||
self.last_active_streams = active_streams
|
||||
self.sleep_or_wake(self.check_interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in monitoring loop: {e}")
|
||||
self.sleep_or_wake(self.check_interval)
|
||||
|
||||
self.restore_normal_limits()
|
||||
logger.info("Monitor stopped")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
|
||||
# Configuration from environment variables
|
||||
jellyfin_url = os.getenv("JELLYFIN_URL", "http://localhost:8096")
|
||||
qbittorrent_url = os.getenv("QBITTORRENT_URL", "http://localhost:8080")
|
||||
check_interval = int(os.getenv("CHECK_INTERVAL", "30"))
|
||||
jellyfin_api_key = os.getenv("JELLYFIN_API_KEY")
|
||||
streaming_start_delay = int(os.getenv("STREAMING_START_DELAY", "10"))
|
||||
streaming_stop_delay = int(os.getenv("STREAMING_STOP_DELAY", "60"))
|
||||
total_bandwidth_budget = int(os.getenv("TOTAL_BANDWIDTH_BUDGET", "30000000"))
|
||||
service_buffer = int(os.getenv("SERVICE_BUFFER", "5000000"))
|
||||
default_stream_bitrate = int(os.getenv("DEFAULT_STREAM_BITRATE", "10000000"))
|
||||
min_torrent_speed = int(os.getenv("MIN_TORRENT_SPEED", "100"))
|
||||
stream_bitrate_headroom = float(os.getenv("STREAM_BITRATE_HEADROOM", "1.1"))
|
||||
webhook_port = int(os.getenv("WEBHOOK_PORT", "0"))
|
||||
webhook_bind = os.getenv("WEBHOOK_BIND", "127.0.0.1")
|
||||
|
||||
monitor = JellyfinQBittorrentMonitor(
|
||||
jellyfin_url=jellyfin_url,
|
||||
qbittorrent_url=qbittorrent_url,
|
||||
check_interval=check_interval,
|
||||
jellyfin_api_key=jellyfin_api_key,
|
||||
streaming_start_delay=streaming_start_delay,
|
||||
streaming_stop_delay=streaming_stop_delay,
|
||||
total_bandwidth_budget=total_bandwidth_budget,
|
||||
service_buffer=service_buffer,
|
||||
default_stream_bitrate=default_stream_bitrate,
|
||||
min_torrent_speed=min_torrent_speed,
|
||||
stream_bitrate_headroom=stream_bitrate_headroom,
|
||||
webhook_port=webhook_port,
|
||||
webhook_bind=webhook_bind,
|
||||
)
|
||||
|
||||
monitor.run()
|
||||
@@ -1,105 +0,0 @@
|
||||
{ pkgs, lib }:
|
||||
let
|
||||
pluginVersion = "18.0.0.0";
|
||||
# GUID from the plugin's meta.json; addresses it on /Plugins/<guid>/Configuration.
|
||||
pluginGuid = "71552a5a-5c5c-4350-a2ae-ebe451a30173";
|
||||
|
||||
package = pkgs.stdenvNoCC.mkDerivation {
|
||||
pname = "jellyfin-plugin-webhook";
|
||||
version = pluginVersion;
|
||||
src = pkgs.fetchurl {
|
||||
url = "https://repo.jellyfin.org/files/plugin/webhook/webhook_${pluginVersion}.zip";
|
||||
hash = "sha256-LFFojiPnBGl9KJ0xVyPBnCmatcaeVbllRwRkz5Z3dqI=";
|
||||
};
|
||||
nativeBuildInputs = [ pkgs.unzip ];
|
||||
unpackPhase = ''unzip "$src"'';
|
||||
installPhase = ''
|
||||
mkdir -p "$out"
|
||||
cp *.dll meta.json "$out/"
|
||||
'';
|
||||
dontFixup = true; # managed .NET assemblies must not be patched
|
||||
};
|
||||
|
||||
# Minimal Handlebars template, base64 encoded. The monitor only needs the POST;
|
||||
# NotificationType is parsed for the debug log line.
|
||||
# Decoded: {"NotificationType":"{{NotificationType}}"}
|
||||
templateB64 = "eyJOb3RpZmljYXRpb25UeXBlIjoie3tOb3RpZmljYXRpb25UeXBlfX0ifQ==";
|
||||
|
||||
# Build a PluginConfiguration payload accepted by Jellyfin's JSON deserializer.
|
||||
# Each webhook is `{ name, uri, notificationTypes }`.
|
||||
mkConfigJson =
|
||||
webhooks:
|
||||
builtins.toJSON {
|
||||
ServerUrl = "";
|
||||
GenericOptions = map (w: {
|
||||
NotificationTypes = w.notificationTypes;
|
||||
WebhookName = w.name;
|
||||
WebhookUri = w.uri;
|
||||
EnableMovies = true;
|
||||
EnableEpisodes = true;
|
||||
EnableVideos = true;
|
||||
EnableWebhook = true;
|
||||
Template = templateB64;
|
||||
Headers = [
|
||||
{
|
||||
Key = "Content-Type";
|
||||
Value = "application/json";
|
||||
}
|
||||
];
|
||||
}) webhooks;
|
||||
};
|
||||
|
||||
# Oneshot that POSTs the plugin configuration. Retries past the window
|
||||
# between Jellyfin API health and plugin registration.
|
||||
mkConfigureScript =
|
||||
{ jellyfinUrl, webhooks }:
|
||||
pkgs.writeShellScript "jellyfin-webhook-configure" ''
|
||||
set -euo pipefail
|
||||
export PATH=${
|
||||
lib.makeBinPath [
|
||||
pkgs.coreutils
|
||||
pkgs.curl
|
||||
]
|
||||
}
|
||||
|
||||
URL=${lib.escapeShellArg jellyfinUrl}
|
||||
AUTH="Authorization: MediaBrowser Token=\"$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")\""
|
||||
CONFIG=${lib.escapeShellArg (mkConfigJson webhooks)}
|
||||
|
||||
for _ in $(seq 1 120); do curl -sf -o /dev/null "$URL/health" && break; sleep 1; done
|
||||
curl -sf -o /dev/null "$URL/health"
|
||||
|
||||
for _ in $(seq 1 60); do
|
||||
if printf '%s' "$CONFIG" | curl -sf -X POST \
|
||||
-H "$AUTH" -H "Content-Type: application/json" --data-binary @- \
|
||||
"$URL/Plugins/${pluginGuid}/Configuration"; then
|
||||
echo "Jellyfin webhook plugin configured"; exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "Failed to configure webhook plugin" >&2; exit 1
|
||||
'';
|
||||
|
||||
# Materialise a writable copy of the plugin. Jellyfin rewrites meta.json at
|
||||
# runtime, so a read-only nix-store symlink would EACCES.
|
||||
mkInstallScript =
|
||||
{ pluginsDir }:
|
||||
pkgs.writeShellScript "jellyfin-webhook-install" ''
|
||||
set -euo pipefail
|
||||
export PATH=${lib.makeBinPath [ pkgs.coreutils ]}
|
||||
dst=${lib.escapeShellArg "${pluginsDir}/Webhook_${pluginVersion}"}
|
||||
mkdir -p ${lib.escapeShellArg pluginsDir}
|
||||
rm -rf "$dst" && mkdir -p "$dst"
|
||||
cp ${package}/*.dll ${package}/meta.json "$dst/"
|
||||
chmod u+rw "$dst"/*
|
||||
'';
|
||||
in
|
||||
{
|
||||
inherit
|
||||
package
|
||||
pluginVersion
|
||||
pluginGuid
|
||||
mkConfigureScript
|
||||
mkInstallScript
|
||||
;
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "jellyfin" service_configs.zpool_ssds [
|
||||
config.services.jellyfin.dataDir
|
||||
config.services.jellyfin.cacheDir
|
||||
])
|
||||
(lib.serviceFilePerms "jellyfin" [
|
||||
"Z ${config.services.jellyfin.dataDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
|
||||
"Z ${config.services.jellyfin.cacheDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
|
||||
])
|
||||
];
|
||||
|
||||
services.jellyfin = {
|
||||
enable = true;
|
||||
package = pkgs.jellyfin.override { jellyfin-ffmpeg = (lib.optimizePackage pkgs.jellyfin-ffmpeg); };
|
||||
|
||||
inherit (service_configs.jellyfin) dataDir cacheDir;
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts."jellyfin.${service_configs.https.domain}".extraConfig = ''
|
||||
reverse_proxy :${builtins.toString service_configs.ports.private.jellyfin.port} {
|
||||
# Disable response buffering for streaming. Caddy's default partial
|
||||
# buffering delays fMP4-HLS segments and direct-play responses where
|
||||
# Content-Length is known (so auto-flush doesn't trigger).
|
||||
flush_interval -1
|
||||
transport http {
|
||||
# Localhost: compression wastes CPU re-encoding already-compressed media.
|
||||
compression off
|
||||
}
|
||||
header_up X-Real-IP {remote_host}
|
||||
header_up X-Forwarded-For {remote_host}
|
||||
header_up X-Forwarded-Proto {scheme}
|
||||
}
|
||||
request_body {
|
||||
max_size 4096MB
|
||||
}
|
||||
'';
|
||||
|
||||
users.users.${config.services.jellyfin.user}.extraGroups = [
|
||||
"video"
|
||||
"render"
|
||||
service_configs.media_group
|
||||
];
|
||||
|
||||
# Protect Jellyfin login from brute force attacks
|
||||
services.fail2ban.jails.jellyfin = {
|
||||
enabled = true;
|
||||
settings = {
|
||||
backend = "auto";
|
||||
port = "http,https";
|
||||
logpath = "${config.services.jellyfin.dataDir}/log/log_*.log";
|
||||
# defaults: maxretry=5, findtime=10m, bantime=10m
|
||||
};
|
||||
filter.Definition = {
|
||||
failregex = ''^.*Authentication request for .* has been denied \(IP: "<ADDR>"\)\..*$'';
|
||||
ignoreregex = "";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
service_configs,
|
||||
config,
|
||||
inputs,
|
||||
lib,
|
||||
utils,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.llama-cpp;
|
||||
modelUrl = "https://huggingface.co/bartowski/google_gemma-4-E2B-it-GGUF/resolve/main/google_gemma-4-E2B-it-IQ2_M.gguf";
|
||||
modelAlias = lib.removeSuffix ".gguf" (baseNameOf modelUrl);
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "llm";
|
||||
port = service_configs.ports.private.llama_cpp.port;
|
||||
})
|
||||
];
|
||||
|
||||
services.llama-cpp = {
|
||||
enable = true;
|
||||
model = toString (
|
||||
pkgs.fetchurl {
|
||||
url = modelUrl;
|
||||
sha256 = "17e869ac54d0e59faa884d5319fc55ad84cd866f50f0b3073fbb25accc875a23";
|
||||
}
|
||||
);
|
||||
port = service_configs.ports.private.llama_cpp.port;
|
||||
host = "0.0.0.0";
|
||||
package = lib.optimizePackage (
|
||||
inputs.llamacpp.packages.${pkgs.system}.vulkan.overrideAttrs (old: {
|
||||
patches = (old.patches or [ ]) ++ [
|
||||
];
|
||||
})
|
||||
);
|
||||
extraFlags = [
|
||||
"-ngl"
|
||||
"999"
|
||||
"-c"
|
||||
"65536"
|
||||
"-ctk"
|
||||
"turbo3"
|
||||
"-ctv"
|
||||
"turbo3"
|
||||
"-fa"
|
||||
"on"
|
||||
"--api-key-file"
|
||||
config.age.secrets.llama-cpp-api-key.path
|
||||
"--metrics"
|
||||
"--alias"
|
||||
modelAlias
|
||||
"-b"
|
||||
"4096"
|
||||
"-ub"
|
||||
"4096"
|
||||
"--parallel"
|
||||
"2"
|
||||
];
|
||||
};
|
||||
|
||||
# have to do this in order to get vulkan to work
|
||||
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
|
||||
|
||||
# ANV driver's turbo3 shader compilation exceeds the default 8 MB thread stack.
|
||||
systemd.services.llama-cpp.serviceConfig.LimitSTACK = lib.mkForce "67108864"; # 64 MB soft+hard
|
||||
|
||||
# llama-server tries to create ~/.cache; ProtectSystem=strict + impermanent
|
||||
# root make /root read-only. Give it a writable cache dir and point HOME there.
|
||||
systemd.services.llama-cpp.serviceConfig.CacheDirectory = "llama-cpp";
|
||||
systemd.services.llama-cpp.environment.HOME = "/var/cache/llama-cpp";
|
||||
|
||||
# turbo3 KV cache quantization runs a 14-barrier WHT butterfly per 128-element
|
||||
# workgroup in SET_ROWS. With 4 concurrent slots and batch=4096, the combined
|
||||
# GPU dispatch can exceed the default i915 CCS engine preempt timeout (7.5s),
|
||||
# causing GPU HANG -> ErrorDeviceLost. Increase compute engine timeouts.
|
||||
# Note: batch<4096 is not viable -- GDN chunked mode needs a larger compute
|
||||
# buffer at smaller batch sizes, exceeding the A380's 6 GB VRAM.
|
||||
# '+' prefix runs as root regardless of service User=.
|
||||
systemd.services.llama-cpp.serviceConfig.ExecStartPre = [
|
||||
"+${pkgs.writeShellScript "set-gpu-compute-timeout" ''
|
||||
for f in /sys/class/drm/card*/engine/ccs*/preempt_timeout_ms; do
|
||||
[ -w "$f" ] && echo 30000 > "$f"
|
||||
done
|
||||
for f in /sys/class/drm/card*/engine/ccs*/heartbeat_interval_ms; do
|
||||
[ -w "$f" ] && echo 10000 > "$f"
|
||||
done
|
||||
''}"
|
||||
];
|
||||
|
||||
# upstream module hardcodes --log-disable; override ExecStart to keep logs
|
||||
# so we can see prompt processing progress via journalctl
|
||||
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
|
||||
"${cfg.package}/bin/llama-server"
|
||||
+ " --host ${cfg.host}"
|
||||
+ " --port ${toString cfg.port}"
|
||||
+ " -m ${cfg.model}"
|
||||
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
|
||||
);
|
||||
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.coturn = {
|
||||
enable = true;
|
||||
realm = service_configs.https.domain;
|
||||
use-auth-secret = true;
|
||||
static-auth-secret-file = config.age.secrets.coturn-auth-secret.path;
|
||||
listening-port = service_configs.ports.public.coturn.port;
|
||||
tls-listening-port = service_configs.ports.public.coturn_tls.port;
|
||||
no-cli = true;
|
||||
|
||||
# recommended security settings from Synapse's coturn docs
|
||||
extraConfig = ''
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255
|
||||
denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
'';
|
||||
};
|
||||
|
||||
# coturn needs these ports open
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
service_configs.ports.public.coturn.port
|
||||
service_configs.ports.public.coturn_tls.port
|
||||
];
|
||||
allowedUDPPorts = [
|
||||
service_configs.ports.public.coturn.port
|
||||
service_configs.ports.public.coturn_tls.port
|
||||
];
|
||||
# relay port range
|
||||
allowedUDPPortRanges = [
|
||||
{
|
||||
from = config.services.coturn.min-port;
|
||||
to = config.services.coturn.max-port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./matrix.nix
|
||||
./coturn.nix
|
||||
./livekit.nix
|
||||
];
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
{
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
keyFile = ../../secrets/livekit_keys;
|
||||
in
|
||||
{
|
||||
services.livekit = {
|
||||
enable = true;
|
||||
inherit keyFile;
|
||||
openFirewall = true;
|
||||
|
||||
settings = {
|
||||
port = service_configs.ports.public.livekit.port;
|
||||
bind_addresses = [ "127.0.0.1" ];
|
||||
|
||||
rtc = {
|
||||
port_range_start = 50100;
|
||||
port_range_end = 50200;
|
||||
use_external_ip = true;
|
||||
};
|
||||
|
||||
# Disable LiveKit's built-in TURN; coturn is already running
|
||||
turn = {
|
||||
enabled = false;
|
||||
};
|
||||
|
||||
logging = {
|
||||
level = "info";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.lk-jwt-service = {
|
||||
enable = true;
|
||||
inherit keyFile;
|
||||
livekitUrl = "wss://${service_configs.livekit.domain}";
|
||||
port = service_configs.ports.private.lk_jwt.port;
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts."${service_configs.livekit.domain}".extraConfig = ''
|
||||
@jwt path /sfu/get /healthz
|
||||
handle @jwt {
|
||||
reverse_proxy :${builtins.toString service_configs.ports.private.lk_jwt.port}
|
||||
}
|
||||
handle {
|
||||
reverse_proxy :${builtins.toString service_configs.ports.public.livekit.port}
|
||||
}
|
||||
'';
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "continuwuity" service_configs.zpool_ssds [
|
||||
"/var/lib/private/continuwuity"
|
||||
])
|
||||
(lib.serviceFilePerms "continuwuity" [
|
||||
"Z /var/lib/private/continuwuity 0770 ${config.services.matrix-continuwuity.user} ${config.services.matrix-continuwuity.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
domain = service_configs.matrix.domain;
|
||||
port = service_configs.ports.private.matrix.port;
|
||||
})
|
||||
];
|
||||
|
||||
services.matrix-continuwuity = {
|
||||
enable = true;
|
||||
|
||||
settings.global = {
|
||||
port = [ service_configs.ports.private.matrix.port ];
|
||||
server_name = service_configs.https.domain;
|
||||
allow_registration = true;
|
||||
registration_token_file = config.age.secrets.matrix-reg-token.path;
|
||||
|
||||
new_user_displayname_suffix = "";
|
||||
|
||||
trusted_servers = [
|
||||
"matrix.org"
|
||||
"constellatory.net"
|
||||
"tchncs.de"
|
||||
"envs.net"
|
||||
];
|
||||
|
||||
address = [
|
||||
"0.0.0.0"
|
||||
];
|
||||
|
||||
# TURN server config (coturn)
|
||||
turn_secret_file = config.age.secrets.matrix-turn-secret.path;
|
||||
turn_uris = [
|
||||
"turn:${service_configs.https.domain}?transport=udp"
|
||||
"turn:${service_configs.https.domain}?transport=tcp"
|
||||
];
|
||||
turn_ttl = 86400;
|
||||
};
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts.${service_configs.https.domain}.extraConfig = lib.mkBefore ''
|
||||
header /.well-known/matrix/* Content-Type application/json
|
||||
header /.well-known/matrix/* Access-Control-Allow-Origin *
|
||||
respond /.well-known/matrix/server `{"m.server": "${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.https.port}"}`
|
||||
respond /.well-known/matrix/client `{"m.server":{"base_url":"https://${service_configs.matrix.domain}"},"m.homeserver":{"base_url":"https://${service_configs.matrix.domain}"},"org.matrix.msc3575.proxy":{"base_url":"https://${config.services.matrix-continuwuity.settings.global.server_name}"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://${service_configs.livekit.domain}"}]}`
|
||||
'';
|
||||
|
||||
# Exact duplicate for federation port
|
||||
services.caddy.virtualHosts."${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.matrix_federation.port}".extraConfig =
|
||||
config.services.caddy.virtualHosts."${service_configs.matrix.domain}".extraConfig;
|
||||
|
||||
# for federation
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
service_configs.ports.public.matrix_federation.port
|
||||
];
|
||||
|
||||
# for federation
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
service_configs.ports.public.matrix_federation.port
|
||||
];
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
config,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "minecraft-server-${service_configs.minecraft.server_name}"
|
||||
service_configs.zpool_ssds
|
||||
[
|
||||
"${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}"
|
||||
]
|
||||
)
|
||||
inputs.nix-minecraft.nixosModules.minecraft-servers
|
||||
(lib.serviceFilePerms "minecraft-server-${service_configs.minecraft.server_name}" [
|
||||
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 700 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
|
||||
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web 750 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
|
||||
# Allow caddy (in minecraft group) to traverse to squaremap/web for map.gardling.com
|
||||
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
|
||||
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
|
||||
])
|
||||
];
|
||||
|
||||
boot.kernel.sysctl = {
|
||||
# Disable autogroup for better scheduling of game server threads
|
||||
"kernel.sched_autogroup_enabled" = 0;
|
||||
};
|
||||
|
||||
services.minecraft-servers = {
|
||||
enable = true;
|
||||
eula = true;
|
||||
dataDir = service_configs.minecraft.parent_dir;
|
||||
openFirewall = true;
|
||||
|
||||
servers.${service_configs.minecraft.server_name} = {
|
||||
enable = true;
|
||||
package = pkgs.fabricServers.fabric-26_1_2.override { jre_headless = pkgs.openjdk25_headless; };
|
||||
|
||||
jvmOpts = lib.concatStringsSep " " [
|
||||
# Memory
|
||||
"-Xmx${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
|
||||
"-Xms${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
|
||||
|
||||
# GC
|
||||
"-XX:+UseZGC"
|
||||
"-XX:+ZGenerational"
|
||||
|
||||
# added in new minecraft version
|
||||
"-XX:+UseCompactObjectHeaders"
|
||||
"-XX:+UseStringDeduplication"
|
||||
|
||||
# Base JVM optimizations (brucethemoose/Minecraft-Performance-Flags-Benchmarks)
|
||||
"-XX:+UnlockExperimentalVMOptions"
|
||||
"-XX:+UnlockDiagnosticVMOptions"
|
||||
"-XX:+AlwaysActAsServerClassMachine"
|
||||
"-XX:+AlwaysPreTouch"
|
||||
"-XX:+DisableExplicitGC"
|
||||
"-XX:+UseNUMA"
|
||||
"-XX:+PerfDisableSharedMem"
|
||||
"-XX:+UseFastUnorderedTimeStamps"
|
||||
"-XX:+UseCriticalJavaThreadPriority"
|
||||
"-XX:ThreadPriorityPolicy=1"
|
||||
"-XX:AllocatePrefetchStyle=3"
|
||||
"-XX:-DontCompileHugeMethods"
|
||||
"-XX:MaxNodeLimit=240000"
|
||||
"-XX:NodeLimitFudgeFactor=8000"
|
||||
"-XX:ReservedCodeCacheSize=400M"
|
||||
"-XX:NonNMethodCodeHeapSize=12M"
|
||||
"-XX:ProfiledCodeHeapSize=194M"
|
||||
"-XX:NonProfiledCodeHeapSize=194M"
|
||||
"-XX:NmethodSweepActivity=1"
|
||||
"-XX:+UseVectorCmov"
|
||||
|
||||
# Large pages (requires vm.nr_hugepages sysctl)
|
||||
"-XX:+UseLargePages"
|
||||
"-XX:LargePageSizeInBytes=${builtins.toString service_configs.minecraft.memory.large_page_size_m}M"
|
||||
];
|
||||
|
||||
serverProperties = {
|
||||
server-port = service_configs.ports.public.minecraft.port;
|
||||
enforce-whitelist = true;
|
||||
gamemode = "survival";
|
||||
white-list = true;
|
||||
difficulty = "easy";
|
||||
motd = "A Minecraft Server";
|
||||
view-distance = 10;
|
||||
simulation-distance = 6;
|
||||
sync-chunk-writes = false;
|
||||
spawn-protection = 0;
|
||||
};
|
||||
|
||||
whitelist = import ../secrets/minecraft-whitelist.nix;
|
||||
|
||||
symlinks = {
|
||||
"mods" = pkgs.linkFarmFromDrvs "mods" (
|
||||
with pkgs;
|
||||
builtins.attrValues {
|
||||
FabricApi = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/P7dR8mSH/versions/fm7UYECV/fabric-api-0.145.4%2B26.1.2.jar";
|
||||
sha512 = "ffd5ef62a745f76cd2e5481252cb7bc67006c809b4f436827d05ea22c01d19279e94a3b24df3d57e127af1cd08440b5de6a92a4ea8f39b2dcbbe1681275564c3";
|
||||
};
|
||||
|
||||
# No 26.1.2 version available
|
||||
# FerriteCore = fetchurl {
|
||||
# url = "https://cdn.modrinth.com/data/uXXizFIs/versions/d5ddUdiB/ferritecore-9.0.0-fabric.jar";
|
||||
# sha512 = "d81fa97e11784c19d42f89c2f433831d007603dd7193cee45fa177e4a6a9c52b384b198586e04a0f7f63cd996fed713322578bde9a8db57e1188854ae5cbe584";
|
||||
# };
|
||||
|
||||
Lithium = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/gvQqBUqZ/versions/v2xoRvRP/lithium-fabric-0.24.1%2Bmc26.1.2.jar";
|
||||
sha512 = "8711bc8c6f39be4c8511becb7a68e573ced56777bd691639f2fc62299b35bb4ccd2efe4a39bd9c308084b523be86a5f5c4bf921ab85f7a22bf075d8ea2359621";
|
||||
};
|
||||
|
||||
NoChatReports = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/qQyHxfxd/versions/2yrLNE3S/NoChatReports-FABRIC-26.1-v2.19.0.jar";
|
||||
sha512 = "94d58a1a4cde4e3b1750bdf724e65c5f4ff3436c2532f36a465d497d26bf59f5ac996cddbff8ecdfed770c319aa2f2dcc9c7b2d19a35651c2a7735c5b2124dad";
|
||||
};
|
||||
|
||||
squaremap = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/PFb7ZqK6/versions/UBN6MFvH/squaremap-fabric-mc26.1.2-1.3.13.jar";
|
||||
sha512 = "97bc130184b5d0ddc4ff98a15acef6203459d982e0e2afbd49a2976d546c55a86ef22b841378b51dd782be9b2cfbe4cfa197717f2b7f6800fd8b4ff4df6e564f";
|
||||
};
|
||||
|
||||
scalablelux = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/Ps1zyz6x/versions/gYbHVCz8/ScalableLux-0.2.0%2Bfabric.2b63825-all.jar";
|
||||
sha512 = "48565a4d8a1cbd623f0044086d971f2c0cf1c40e1d0b6636a61d41512f4c1c1ddff35879d9dba24b088a670ee254e2d5842d13a30b6d76df23706fa94ea4a58b";
|
||||
};
|
||||
|
||||
c2me = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/VSNURh3q/versions/yrNQQ1AQ/c2me-fabric-mc26.1.2-0.3.7%2Balpha.0.65.jar";
|
||||
sha512 = "6666ebaa3bfa403e386776590fc845b7c306107d37ebc7b1be3b057893fbf9f933abb2314c171d7fe19c177cf8823cb47fdc32040d34a9704f5ab656dd5d93f8";
|
||||
};
|
||||
|
||||
# No 26.1 version available
|
||||
# krypton = fetchurl {
|
||||
# url = "https://cdn.modrinth.com/data/fQEb0iXm/versions/O9LmWYR7/krypton-0.2.10.jar";
|
||||
# sha512 = "4dcd7228d1890ddfc78c99ff284b45f9cf40aae77ef6359308e26d06fa0d938365255696af4cc12d524c46c4886cdcd19268c165a2bf0a2835202fe857da5cab";
|
||||
# };
|
||||
|
||||
# No 26.1.2 version available
|
||||
# disconnect-packet-fix = fetchurl {
|
||||
# url = "https://cdn.modrinth.com/data/rd9rKuJT/versions/x9gVeaTU/disconnect-packet-fix-fabric-2.1.0.jar";
|
||||
# sha512 = "bf84d02bdcd737706df123e452dd31ef535580fa4ced6af1e4ceea022fef94e4764775253e970b8caa1292e2fa00eb470557f70b290fafdb444479fa801b07a1";
|
||||
# };
|
||||
|
||||
packet-fixer = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/c7m1mi73/versions/M8PqPQr4/packetfixer-fabric-3.3.4-26.1.2.jar";
|
||||
sha512 = "698020edba2a1fd80bb282bfd4832a00d6447b08eaafbc2e16a8f3bf89e187fc9a622c92dfe94ae140dd485fc0220a86890f12158ec08054e473fef8337829bc";
|
||||
};
|
||||
|
||||
# mVUS fork: upstream ModernFix no longer ships Fabric builds
|
||||
modernfix = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/TjSm1wrD/versions/dqQ7mabN/modernfix-5.26.2-build.1.jar";
|
||||
sha512 = "fbef93c2dabf7bcd0ccd670226dfc4958f7ebe5d8c2b1158e88a65e6954a40f595efd58401d2a3dbb224660dca5952199cf64df29100e7bd39b1b1941290b57b";
|
||||
};
|
||||
|
||||
debugify = fetchurl {
|
||||
url = "https://cdn.modrinth.com/data/QwxR6Gcd/versions/mfTTfiKn/debugify-26.1.2%2B1.0.jar";
|
||||
sha512 = "63db82f2163b9f7fc27ebea999ffcd7a961054435b3ed7d8bf32d905b5f60ce81715916b7fd4e9509dd23703d5492059f3ce7e5f176402f8ed4f985a415553f4";
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.minecraft-server-main = {
|
||||
serviceConfig = {
|
||||
Nice = -5;
|
||||
IOSchedulingPriority = 0;
|
||||
LimitMEMLOCK = "infinity"; # Required for large pages
|
||||
};
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts = lib.mkIf (config.services.caddy.enable) {
|
||||
"map.${service_configs.https.domain}".extraConfig = ''
|
||||
root * ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web
|
||||
file_server browse
|
||||
'';
|
||||
};
|
||||
|
||||
users.users = lib.mkIf (config.services.caddy.enable) {
|
||||
${config.services.caddy.user}.extraGroups = [
|
||||
# for `map.gardling.com`
|
||||
config.services.minecraft-servers.group
|
||||
];
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "mollysocket" service_configs.zpool_ssds [
|
||||
"/var/lib/private/mollysocket"
|
||||
])
|
||||
(lib.serviceFilePerms "mollysocket" [
|
||||
"Z /var/lib/private/mollysocket 0700 root root"
|
||||
])
|
||||
];
|
||||
|
||||
services.mollysocket = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
host = "127.0.0.1";
|
||||
port = service_configs.ports.private.mollysocket.port;
|
||||
|
||||
# Explicitly allow our self-hosted ntfy instance.
|
||||
# Local-network endpoints are denied by default for security.
|
||||
allowed_endpoints = [ "https://${service_configs.ntfy.domain}" ];
|
||||
# allowed_uuids set via MOLLY_ALLOWED_UUIDS in environmentFile
|
||||
};
|
||||
|
||||
environmentFile = config.age.secrets.mollysocket-env.path;
|
||||
};
|
||||
|
||||
services.caddy.virtualHosts."${service_configs.mollysocket.domain}".extraConfig = ''
|
||||
reverse_proxy h2c://127.0.0.1:${builtins.toString service_configs.ports.private.mollysocket.port}
|
||||
'';
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./monero.nix
|
||||
./p2pool.nix
|
||||
./xmrig.nix
|
||||
./xmrig-auto-pause.nix
|
||||
];
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "monero" service_configs.zpool_ssds [
|
||||
service_configs.monero.dataDir
|
||||
])
|
||||
(lib.serviceFilePerms "monero" [
|
||||
"Z ${service_configs.monero.dataDir} 0700 monero monero"
|
||||
])
|
||||
];
|
||||
|
||||
services.monero = {
|
||||
enable = true;
|
||||
dataDir = service_configs.monero.dataDir;
|
||||
rpc = {
|
||||
address = "0.0.0.0";
|
||||
port = service_configs.ports.public.monero_rpc.port;
|
||||
restricted = true;
|
||||
};
|
||||
extraConfig = ''
|
||||
p2p-bind-port=${builtins.toString service_configs.ports.public.monero.port}
|
||||
zmq-pub=tcp://127.0.0.1:${builtins.toString service_configs.ports.private.monero_zmq.port}
|
||||
db-sync-mode=fast:async:1000000000bytes
|
||||
public-node=1
|
||||
confirm-external-bind=1
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
service_configs.ports.public.monero.port
|
||||
service_configs.ports.public.monero_rpc.port
|
||||
];
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "p2pool" service_configs.zpool_ssds [
|
||||
service_configs.p2pool.dataDir
|
||||
])
|
||||
(lib.serviceFilePerms "p2pool" [
|
||||
"Z ${service_configs.p2pool.dataDir} 0700 p2pool p2pool"
|
||||
])
|
||||
];
|
||||
|
||||
services.p2pool = {
|
||||
enable = true;
|
||||
dataDir = service_configs.p2pool.dataDir;
|
||||
walletAddress = service_configs.p2pool.walletAddress;
|
||||
sidechain = "nano";
|
||||
host = "127.0.0.1";
|
||||
rpcPort = service_configs.ports.public.monero_rpc.port;
|
||||
zmqPort = service_configs.ports.private.monero_zmq.port;
|
||||
extraArgs = [
|
||||
" --stratum 0.0.0.0:${builtins.toString service_configs.ports.private.p2pool_stratum.port}"
|
||||
];
|
||||
};
|
||||
|
||||
# Ensure p2pool starts after monero is ready
|
||||
systemd.services.p2pool = {
|
||||
after = [ "monero.service" ];
|
||||
wants = [ "monero.service" ];
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
service_configs.ports.public.p2pool_p2p.port
|
||||
];
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
lib.mkIf config.services.xmrig.enable {
|
||||
systemd.services.xmrig-auto-pause = {
|
||||
description = "Auto-pause xmrig when other services need CPU";
|
||||
after = [ "xmrig.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${./xmrig-auto-pause.py}";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
NoNewPrivileges = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "strict";
|
||||
PrivateTmp = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_UNIX" # systemctl talks to systemd over D-Bus unix socket
|
||||
];
|
||||
MemoryDenyWriteExecute = true;
|
||||
StateDirectory = "xmrig-auto-pause";
|
||||
};
|
||||
environment = {
|
||||
POLL_INTERVAL = "3";
|
||||
GRACE_PERIOD = "15";
|
||||
# Background services (qbittorrent, bitmagnet, postgresql, etc.) produce
|
||||
# 15-25% non-nice CPU during normal operation. The stop threshold must
|
||||
# sit above transient spikes; the resume threshold must be below the
|
||||
# steady-state floor to avoid restarting xmrig while services are active.
|
||||
CPU_STOP_THRESHOLD = "40";
|
||||
CPU_RESUME_THRESHOLD = "10";
|
||||
STARTUP_COOLDOWN = "10";
|
||||
STATE_DIR = "/var/lib/xmrig-auto-pause";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,210 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Auto-pause xmrig when other services need CPU.
|
||||
|
||||
Monitors non-nice CPU usage from /proc/stat. Since xmrig runs at Nice=19,
|
||||
its CPU time lands in the 'nice' column and is excluded from the metric.
|
||||
When real workload (user + system + irq + softirq) exceeds the stop
|
||||
threshold, stops xmrig. When it drops below the resume threshold for
|
||||
GRACE_PERIOD seconds, restarts xmrig.
|
||||
|
||||
This replaces per-service pause scripts with a single general-purpose
|
||||
monitor that handles any CPU-intensive workload (gitea workers, llama-cpp
|
||||
inference, etc.) without needing to know about specific processes.
|
||||
|
||||
Why scheduler priority alone isn't enough:
|
||||
Nice=19 / SCHED_IDLE only affects which thread gets the next time slice.
|
||||
RandomX's 2MB-per-thread scratchpad (24MB across 12 threads) pollutes
|
||||
the shared 32MB L3 cache, and its memory access pattern saturates DRAM
|
||||
bandwidth. Other services run slower even though they aren't denied CPU
|
||||
time. The only fix is to stop xmrig entirely when real work is happening.
|
||||
|
||||
Hysteresis:
|
||||
The stop threshold is set higher than the resume threshold to prevent
|
||||
oscillation. When xmrig runs, its L3 cache pressure makes other processes
|
||||
appear ~3-8% busier. A single threshold trips on this indirect effect,
|
||||
causing stop/start thrashing. Separate thresholds break the cycle: the
|
||||
resume threshold confirms the system is truly idle, while the stop
|
||||
threshold requires genuine workload above xmrig's indirect pressure.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "3"))
|
||||
GRACE_PERIOD = float(os.environ.get("GRACE_PERIOD", "15"))
|
||||
# Percentage of total CPU ticks that non-nice processes must use to trigger
|
||||
# a pause. On a 12-thread system, one fully loaded core ≈ 8.3% of total.
|
||||
# Default 15% requires roughly two busy cores, which avoids false positives
|
||||
# from xmrig's L3 cache pressure inflating other processes' apparent CPU.
|
||||
CPU_STOP_THRESHOLD = float(os.environ.get("CPU_STOP_THRESHOLD", "15"))
|
||||
# Percentage below which the system is considered idle enough to resume
|
||||
# mining. Lower than the stop threshold to provide hysteresis.
|
||||
CPU_RESUME_THRESHOLD = float(os.environ.get("CPU_RESUME_THRESHOLD", "5"))
|
||||
# After starting xmrig, ignore CPU spikes for this many seconds to let
|
||||
# RandomX dataset initialization complete (~4s on the target hardware)
|
||||
# without retriggering a stop.
|
||||
STARTUP_COOLDOWN = float(os.environ.get("STARTUP_COOLDOWN", "10"))
|
||||
# Directory for persisting pause state across script restarts. Without
|
||||
# this, a restart while xmrig is paused loses the paused_by_us flag and
|
||||
# xmrig stays stopped permanently.
|
||||
STATE_DIR = os.environ.get("STATE_DIR", "")
|
||||
_PAUSE_FILE = os.path.join(STATE_DIR, "paused") if STATE_DIR else ""
|
||||
|
||||
|
||||
def log(msg):
|
||||
print(f"[xmrig-auto-pause] {msg}", file=sys.stderr, flush=True)
|
||||
|
||||
|
||||
def read_cpu_ticks():
|
||||
"""Read CPU tick counters from /proc/stat.
|
||||
|
||||
Returns (total_ticks, real_work_ticks) where real_work excludes the
|
||||
'nice' column (xmrig) and idle/iowait.
|
||||
"""
|
||||
with open("/proc/stat") as f:
|
||||
parts = f.readline().split()
|
||||
# cpu user nice system idle iowait irq softirq steal
|
||||
user, nice, system, idle, iowait, irq, softirq, steal = (
|
||||
int(x) for x in parts[1:9]
|
||||
)
|
||||
total = user + nice + system + idle + iowait + irq + softirq + steal
|
||||
real_work = user + system + irq + softirq
|
||||
return total, real_work
|
||||
|
||||
|
||||
def is_active(unit):
|
||||
"""Check if a systemd unit is currently active."""
|
||||
result = subprocess.run(
|
||||
["systemctl", "is-active", "--quiet", unit],
|
||||
capture_output=True,
|
||||
)
|
||||
return result.returncode == 0
|
||||
|
||||
|
||||
def systemctl(action, unit):
|
||||
result = subprocess.run(
|
||||
["systemctl", action, unit],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
log(f"systemctl {action} {unit} failed (rc={result.returncode}): {result.stderr.strip()}")
|
||||
return result.returncode == 0
|
||||
|
||||
|
||||
def _save_paused(paused):
|
||||
"""Persist pause flag so a script restart can resume where we left off."""
|
||||
if not _PAUSE_FILE:
|
||||
return
|
||||
try:
|
||||
if paused:
|
||||
open(_PAUSE_FILE, "w").close()
|
||||
else:
|
||||
os.remove(_PAUSE_FILE)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _load_paused():
|
||||
"""Check if a previous instance left xmrig paused."""
|
||||
if not _PAUSE_FILE:
|
||||
return False
|
||||
return os.path.isfile(_PAUSE_FILE)
|
||||
|
||||
|
||||
def main():
|
||||
paused_by_us = _load_paused()
|
||||
idle_since = None
|
||||
started_at = None # monotonic time when we last started xmrig
|
||||
prev_total = None
|
||||
prev_work = None
|
||||
|
||||
if paused_by_us:
|
||||
log("Recovered pause state from previous instance")
|
||||
|
||||
log(
|
||||
f"Starting: poll={POLL_INTERVAL}s grace={GRACE_PERIOD}s "
|
||||
f"stop={CPU_STOP_THRESHOLD}% resume={CPU_RESUME_THRESHOLD}% "
|
||||
f"cooldown={STARTUP_COOLDOWN}s"
|
||||
)
|
||||
|
||||
while True:
|
||||
total, work = read_cpu_ticks()
|
||||
|
||||
if prev_total is None:
|
||||
prev_total = total
|
||||
prev_work = work
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
dt = total - prev_total
|
||||
if dt <= 0:
|
||||
prev_total = total
|
||||
prev_work = work
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
|
||||
real_work_pct = ((work - prev_work) / dt) * 100
|
||||
prev_total = total
|
||||
prev_work = work
|
||||
|
||||
# Don't act during startup cooldown — RandomX dataset init causes
|
||||
# a transient CPU spike that would immediately retrigger a stop.
|
||||
if started_at is not None:
|
||||
if time.monotonic() - started_at < STARTUP_COOLDOWN:
|
||||
time.sleep(POLL_INTERVAL)
|
||||
continue
|
||||
# Cooldown expired — verify xmrig survived startup. If it
|
||||
# crashed during init (hugepage failure, pool unreachable, etc.),
|
||||
# re-enter the pause/retry cycle rather than silently leaving
|
||||
# xmrig dead.
|
||||
if not is_active("xmrig.service"):
|
||||
log("xmrig died during startup cooldown — will retry")
|
||||
paused_by_us = True
|
||||
_save_paused(True)
|
||||
started_at = None
|
||||
|
||||
above_stop = real_work_pct > CPU_STOP_THRESHOLD
|
||||
below_resume = real_work_pct <= CPU_RESUME_THRESHOLD
|
||||
|
||||
if above_stop:
|
||||
idle_since = None
|
||||
if paused_by_us and is_active("xmrig.service"):
|
||||
# Something else restarted xmrig (deploy, manual start, etc.)
|
||||
# while we thought it was stopped. Reset ownership so we can
|
||||
# manage it again.
|
||||
log("xmrig was restarted externally while paused — reclaiming")
|
||||
paused_by_us = False
|
||||
_save_paused(False)
|
||||
if not paused_by_us:
|
||||
# Only claim ownership if xmrig is actually running.
|
||||
# If something else stopped it (e.g. UPS battery hook),
|
||||
# don't interfere — we'd wrongly restart it later.
|
||||
if is_active("xmrig.service"):
|
||||
log(f"Real workload detected ({real_work_pct:.1f}% CPU) — stopping xmrig")
|
||||
if systemctl("stop", "xmrig.service"):
|
||||
paused_by_us = True
|
||||
_save_paused(True)
|
||||
elif paused_by_us:
|
||||
if below_resume:
|
||||
if idle_since is None:
|
||||
idle_since = time.monotonic()
|
||||
elif time.monotonic() - idle_since >= GRACE_PERIOD:
|
||||
log(f"Workload ended ({real_work_pct:.1f}% CPU) past grace period — starting xmrig")
|
||||
if systemctl("start", "xmrig.service"):
|
||||
paused_by_us = False
|
||||
_save_paused(False)
|
||||
started_at = time.monotonic()
|
||||
idle_since = None
|
||||
else:
|
||||
# Between thresholds — not idle enough to resume.
|
||||
idle_since = None
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,59 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
threadCount = 12;
|
||||
in
|
||||
{
|
||||
services.xmrig = {
|
||||
enable = true;
|
||||
package = lib.optimizePackage pkgs.xmrig;
|
||||
|
||||
settings = {
|
||||
autosave = true;
|
||||
|
||||
cpu = {
|
||||
enabled = true;
|
||||
huge-pages = true;
|
||||
hw-aes = true;
|
||||
rx = lib.range 0 (threadCount - 1);
|
||||
};
|
||||
|
||||
randomx = {
|
||||
"1gb-pages" = true;
|
||||
};
|
||||
|
||||
opencl = false;
|
||||
cuda = false;
|
||||
|
||||
pools = [
|
||||
{
|
||||
url = "127.0.0.1:${builtins.toString service_configs.ports.private.p2pool_stratum.port}";
|
||||
tls = false;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.xmrig.serviceConfig = {
|
||||
Nice = 19;
|
||||
CPUSchedulingPolicy = "idle";
|
||||
IOSchedulingClass = "idle";
|
||||
};
|
||||
|
||||
# Stop mining on UPS battery to conserve power
|
||||
services.apcupsd.hooks = lib.mkIf config.services.apcupsd.enable {
|
||||
onbattery = "systemctl stop xmrig";
|
||||
offbattery = "systemctl start xmrig";
|
||||
};
|
||||
|
||||
# Reserve 1GB huge pages for RandomX (dataset is ~2GB)
|
||||
boot.kernelParams = [
|
||||
"hugepagesz=1G"
|
||||
"hugepages=3"
|
||||
];
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
imports = [
|
||||
./ntfy.nix
|
||||
./ntfy-alerts.nix
|
||||
];
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
lib.mkIf config.services.ntfy-sh.enable {
|
||||
services.ntfyAlerts = {
|
||||
enable = true;
|
||||
serverUrl = "https://${service_configs.ntfy.domain}";
|
||||
topicFile = config.age.secrets.ntfy-alerts-topic.path;
|
||||
|
||||
tokenFile = config.age.secrets.ntfy-alerts-token.path;
|
||||
};
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "ntfy-sh" service_configs.zpool_ssds [
|
||||
"/var/lib/private/ntfy-sh"
|
||||
])
|
||||
(lib.serviceFilePerms "ntfy-sh" [
|
||||
"Z /var/lib/private/ntfy-sh 0700 ${config.services.ntfy-sh.user} ${config.services.ntfy-sh.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
domain = service_configs.ntfy.domain;
|
||||
port = service_configs.ports.private.ntfy.port;
|
||||
})
|
||||
];
|
||||
|
||||
services.ntfy-sh = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
base-url = "https://${service_configs.ntfy.domain}";
|
||||
listen-http = "127.0.0.1:${builtins.toString service_configs.ports.private.ntfy.port}";
|
||||
behind-proxy = true;
|
||||
auth-default-access = "deny-all";
|
||||
enable-login = true;
|
||||
enable-signup = false;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
pgCheckpoint = pkgs.writeShellScript "pg-checkpoint" ''
|
||||
# Flush PostgreSQL dirty buffers to disk before ZFS snapshot so the
|
||||
# on-disk state is consistent and the snapshot is recoverable.
|
||||
# On failure: log a warning but exit 0 so sanoid still takes the
|
||||
# snapshot (an inconsistent snapshot beats no snapshot).
|
||||
if ! ${pkgs.systemd}/bin/systemctl is-active --quiet postgresql.service; then
|
||||
echo "postgresql is not running, skipping checkpoint" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ${pkgs.coreutils}/bin/timeout 120 \
|
||||
${pkgs.util-linux}/bin/runuser -u postgres -- \
|
||||
${lib.getExe' config.services.postgresql.package "psql"} \
|
||||
-v ON_ERROR_STOP=1 -c "CHECKPOINT" 2>&1; then
|
||||
echo "postgresql checkpoint completed"
|
||||
else
|
||||
echo "WARNING: postgresql checkpoint failed, snapshot may be inconsistent" >&2
|
||||
fi
|
||||
|
||||
# Always exit 0 — sanoid must run regardless
|
||||
exit 0
|
||||
'';
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "postgresql" service_configs.zpool_ssds [
|
||||
config.services.postgresql.dataDir
|
||||
])
|
||||
(lib.serviceFilePerms "postgresql" [
|
||||
"Z ${config.services.postgresql.dataDir} 0700 postgres postgres"
|
||||
])
|
||||
];
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_16;
|
||||
dataDir = service_configs.postgres.dataDir;
|
||||
settings = {
|
||||
# ZFS provides checksumming and atomic writes, making PostgreSQL's
|
||||
# full_page_writes redundant. Disabling reduces write amplification
|
||||
# and SSD wear on the zpool.
|
||||
# Did this in conjunction with setting recordsize=8k
|
||||
# on the zvolume this is on
|
||||
full_page_writes = false;
|
||||
};
|
||||
};
|
||||
|
||||
# Run a PostgreSQL CHECKPOINT before sanoid snapshots so the on-disk
|
||||
# state is consistent (required since full_page_writes = false).
|
||||
systemd.services.sanoid.serviceConfig = {
|
||||
ExecStartPre = lib.mkAfter [ "+${pgCheckpoint}" ];
|
||||
TimeoutStartSec = lib.mkForce 300; # checkpoint can be slow with large txg_timeout
|
||||
};
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
service_configs,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
categoriesFile = pkgs.writeText "categories.json" (
|
||||
builtins.toJSON (lib.mapAttrs (_: path: { save_path = path; }) service_configs.torrent.categories)
|
||||
);
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_hdds [
|
||||
service_configs.torrents_path
|
||||
])
|
||||
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_ssds [
|
||||
"${config.services.qbittorrent.profileDir}/qBittorrent"
|
||||
])
|
||||
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
|
||||
(lib.serviceFilePerms "qbittorrent" [
|
||||
# 0770: group (media) needs write to delete files during upgrades —
|
||||
# Radarr/Sonarr must unlink the old file before placing the new one.
|
||||
# Non-recursive (z not Z): UMask=0007 ensures new files get correct perms.
|
||||
# A recursive Z rule would walk millions of files on the HDD pool at every boot.
|
||||
"z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.SavePath} 0770 ${config.services.qbittorrent.user} ${service_configs.media_group}"
|
||||
"z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
|
||||
"Z ${config.services.qbittorrent.profileDir} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "torrent";
|
||||
port = service_configs.ports.private.torrent.port;
|
||||
auth = true;
|
||||
vpn = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.qbittorrent = {
|
||||
enable = true;
|
||||
webuiPort = service_configs.ports.private.torrent.port;
|
||||
profileDir = "/var/lib/qBittorrent";
|
||||
# Set the service group to 'media' so the systemd unit runs with media as
|
||||
# the primary GID. Linux assigns new file ownership from the process's GID
|
||||
# (set by systemd's Group= directive), not from /etc/passwd. Without this,
|
||||
# downloads land as qbittorrent:qbittorrent (0700), blocking Radarr/Sonarr.
|
||||
group = service_configs.media_group;
|
||||
|
||||
serverConfig.LegalNotice.Accepted = true;
|
||||
|
||||
serverConfig.Preferences = {
|
||||
WebUI = {
|
||||
AlternativeUIEnabled = true;
|
||||
RootFolder = "${pkgs.vuetorrent}/share/vuetorrent";
|
||||
|
||||
# disable auth because we use caddy for auth
|
||||
AuthSubnetWhitelist = "0.0.0.0/0";
|
||||
AuthSubnetWhitelistEnabled = true;
|
||||
};
|
||||
|
||||
Downloads = {
|
||||
inherit (service_configs.torrent) SavePath TempPath;
|
||||
};
|
||||
};
|
||||
|
||||
serverConfig.BitTorrent = {
|
||||
Session = {
|
||||
MaxConnectionsPerTorrent = 100;
|
||||
MaxUploadsPerTorrent = 50;
|
||||
MaxConnections = -1;
|
||||
MaxUploads = -1;
|
||||
|
||||
MaxActiveCheckingTorrents = 2;
|
||||
|
||||
# queueing
|
||||
QueueingSystemEnabled = true;
|
||||
MaxActiveDownloads = 15;
|
||||
MaxActiveUploads = -1;
|
||||
MaxActiveTorrents = -1;
|
||||
IgnoreSlowTorrentsForQueueing = true;
|
||||
|
||||
GlobalUPSpeedLimit = 0;
|
||||
GlobalDLSpeedLimit = 0;
|
||||
|
||||
# Alternate speed limits for when Jellyfin is streaming
|
||||
AlternativeGlobalUPSpeedLimit = 500; # 500 KB/s when throttled
|
||||
AlternativeGlobalDLSpeedLimit = 800; # 800 KB/s when throttled
|
||||
IncludeOverheadInLimits = true;
|
||||
|
||||
GlobalMaxRatio = 7.0;
|
||||
|
||||
AddTrackersEnabled = true;
|
||||
AdditionalTrackers = lib.concatStringsSep "\\n" (
|
||||
lib.lists.filter (x: x != "") (
|
||||
lib.strings.splitString "\n" (builtins.readFile "${inputs.trackerlist}/trackers_all.txt")
|
||||
)
|
||||
);
|
||||
AnnounceToAllTrackers = true;
|
||||
|
||||
# idk why it also has to be specified here too?
|
||||
inherit (config.services.qbittorrent.serverConfig.Preferences.Downloads) TempPath;
|
||||
TempPathEnabled = true;
|
||||
|
||||
ConnectionSpeed = 200; # half-open connections/s; faster peer discovery
|
||||
|
||||
SaveResumeDataInterval = 300; # save resume data every 5 min (default 60s)
|
||||
ResumeDataStorageType = "SQLite"; # SQLite is more efficient than legacy per-file .fastresume storage
|
||||
|
||||
# Automatic Torrent Management: use category save paths for new torrents
|
||||
DisableAutoTMMByDefault = false;
|
||||
DisableAutoTMMTriggers.CategorySavePathChanged = false;
|
||||
DisableAutoTMMTriggers.DefaultSavePathChanged = false;
|
||||
|
||||
ChokingAlgorithm = "RateBased";
|
||||
SeedChokingAlgorithm = "FastestUpload"; # unchoke peers we upload to fastest
|
||||
PieceExtentAffinity = true;
|
||||
SuggestMode = true;
|
||||
|
||||
# POSIX-compliant disk I/O: uses pread/pwrite instead of mmap.
|
||||
# On ZFS, mmap forces data into BOTH ARC and Linux page cache (double-caching),
|
||||
# wasting RAM. pread/pwrite goes only through ARC, maximizing its effectiveness.
|
||||
DiskIOType = "Posix";
|
||||
|
||||
FilePoolSize = 500; # keep more files open to reduce open/close overhead
|
||||
AioThreads = 24; # 6 cores * 4; better disk I/O parallelism
|
||||
|
||||
SendBufferLowWatermark = 512; # 512 KiB -- trigger reads sooner to prevent upload stalls
|
||||
SendBufferWatermark = 3072; # 3 MiB -- matches high_performance_seed
|
||||
SendBufferWatermarkFactor = 150; # percent -- matches high_performance_seed
|
||||
};
|
||||
|
||||
Network = {
|
||||
# traffic is routed through a vpn, we don't need
|
||||
# port forwarding
|
||||
PortForwardingEnabled = false;
|
||||
};
|
||||
|
||||
Session.UseUPnP = false;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.qbittorrent.serviceConfig = {
|
||||
TimeoutStopSec = lib.mkForce 10;
|
||||
# Default UMask=0022 creates files as 0644 (group read-only). With 0007,
|
||||
# new files get 0660/0770 so the media group has read+write immediately
|
||||
# instead of relying on the tmpfiles Z rule to fix permissions at restart.
|
||||
UMask = lib.mkForce "0007";
|
||||
};
|
||||
|
||||
# Pre-define qBittorrent categories with explicit save paths so every
|
||||
# torrent routes to its category directory instead of the SavePath root.
|
||||
systemd.tmpfiles.settings.qbittorrent-categories = {
|
||||
"${config.services.qbittorrent.profileDir}/qBittorrent/config/categories.json"."L+" = {
|
||||
argument = "${categoriesFile}";
|
||||
user = config.services.qbittorrent.user;
|
||||
group = config.services.qbittorrent.group;
|
||||
mode = "1400";
|
||||
};
|
||||
};
|
||||
|
||||
# Ensure category directories exist with correct ownership before first use.
|
||||
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||
_: path: "d ${path} 0770 ${config.services.qbittorrent.user} ${service_configs.media_group} -"
|
||||
) service_configs.torrent.categories;
|
||||
|
||||
# Periodically checkpoint qBittorrent's SQLite WAL (Write-Ahead Log).
|
||||
# qBittorrent holds a read transaction open for its entire lifetime,
|
||||
# preventing SQLite's auto-checkpoint from running. The WAL grows
|
||||
# unbounded (observed: 405 MB) and must be replayed on next startup,
|
||||
# causing 10+ minute "internal preparations" hangs.
|
||||
# A second sqlite3 connection can checkpoint concurrently and safely.
|
||||
# See: https://github.com/qbittorrent/qBittorrent/issues/20433
|
||||
systemd.services.qbittorrent-wal-checkpoint = {
|
||||
description = "Checkpoint qBittorrent SQLite WAL";
|
||||
after = [ "qbittorrent.service" ];
|
||||
requires = [ "qbittorrent.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.sqlite}/bin/sqlite3 ${config.services.qbittorrent.profileDir}/qBittorrent/data/torrents.db 'PRAGMA wal_checkpoint(TRUNCATE);'";
|
||||
User = config.services.qbittorrent.user;
|
||||
Group = config.services.qbittorrent.group;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.qbittorrent-wal-checkpoint = {
|
||||
description = "Periodically checkpoint qBittorrent SQLite WAL";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnUnitActiveSec = "4h";
|
||||
OnBootSec = "30min";
|
||||
RandomizedDelaySec = "10min";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.${config.services.qbittorrent.user}.extraGroups = [
|
||||
service_configs.media_group
|
||||
];
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
service_configs,
|
||||
username,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "slskd" "" [
|
||||
service_configs.slskd.base
|
||||
service_configs.slskd.downloads
|
||||
service_configs.slskd.incomplete
|
||||
])
|
||||
(lib.serviceFilePerms "slskd" [
|
||||
"Z ${service_configs.music_dir} 0750 ${username} music"
|
||||
"Z ${service_configs.slskd.base} 0750 ${config.services.slskd.user} ${config.services.slskd.group}"
|
||||
"Z ${service_configs.slskd.downloads} 0750 ${config.services.slskd.user} music"
|
||||
"Z ${service_configs.slskd.incomplete} 0750 ${config.services.slskd.user} music"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "soulseek";
|
||||
port = service_configs.ports.private.soulseek_web.port;
|
||||
})
|
||||
];
|
||||
|
||||
users.groups."music" = { };
|
||||
|
||||
services.slskd = {
|
||||
enable = true;
|
||||
domain = null; # null so we don't use nginx reverse proxy
|
||||
environmentFile = config.age.secrets.slskd_env.path;
|
||||
|
||||
settings = {
|
||||
web = {
|
||||
port = service_configs.ports.private.soulseek_web.port;
|
||||
};
|
||||
soulseek = {
|
||||
# description = "smth idk";
|
||||
listen_port = service_configs.ports.public.soulseek_listen.port;
|
||||
};
|
||||
|
||||
shares = {
|
||||
directories = [ service_configs.music_dir ];
|
||||
};
|
||||
|
||||
global = {
|
||||
download = {
|
||||
slots = -1;
|
||||
speed_limit = -1;
|
||||
};
|
||||
upload = {
|
||||
slots = 4;
|
||||
speed_limit = 2000;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
users.users.${config.services.slskd.user}.extraGroups = [ "music" ];
|
||||
users.users.${config.services.jellyfin.user}.extraGroups = [ "music" ];
|
||||
users.users.${username}.extraGroups = [ "music" ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
service_configs.ports.public.soulseek_listen.port
|
||||
];
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
username,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Enable the OpenSSH daemon.
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
AllowUsers = [
|
||||
username
|
||||
"root"
|
||||
];
|
||||
PasswordAuthentication = false;
|
||||
PermitRootLogin = "yes"; # for deploying configs
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"Z /etc/ssh 755 root root"
|
||||
"Z /etc/ssh/ssh_host_* 600 root root"
|
||||
];
|
||||
|
||||
users.users.${username}.openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO4jL6gYOunUlUtPvGdML0cpbKSsPNqQ1jit4E7U1RyH" # laptop
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBJjT5QZ3zRDb+V6Em20EYpSEgPW5e/U+06uQGJdraxi" # desktop
|
||||
];
|
||||
|
||||
# used for deploying configs to server
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
config.users.users.${username}.openssh.authorizedKeys.keys
|
||||
++ [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC5ZYN6idL/w/mUIfPOH1i+Q/SQXuzAMQUEuWpipx1Pc ci-deploy@muffin"
|
||||
];
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
service_configs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "syncthing" service_configs.zpool_ssds [
|
||||
service_configs.syncthing.dataDir
|
||||
service_configs.syncthing.signalBackupDir
|
||||
service_configs.syncthing.grayjayBackupDir
|
||||
])
|
||||
(lib.serviceFilePerms "syncthing" [
|
||||
"Z ${service_configs.syncthing.dataDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
|
||||
"Z ${service_configs.syncthing.signalBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
|
||||
"Z ${service_configs.syncthing.grayjayBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "syncthing";
|
||||
port = service_configs.ports.private.syncthing_gui.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.syncthing = {
|
||||
enable = true;
|
||||
|
||||
dataDir = service_configs.syncthing.dataDir;
|
||||
|
||||
guiAddress = "127.0.0.1:${toString service_configs.ports.private.syncthing_gui.port}";
|
||||
|
||||
overrideDevices = false;
|
||||
overrideFolders = false;
|
||||
|
||||
settings = {
|
||||
gui = {
|
||||
insecureSkipHostcheck = true; # Allow access via reverse proxy
|
||||
};
|
||||
options = {
|
||||
urAccepted = 1; # enable usage reporting
|
||||
relaysEnabled = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Open firewall ports for syncthing protocol
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ service_configs.ports.public.syncthing_protocol.port ];
|
||||
allowedUDPPorts = [
|
||||
service_configs.ports.public.syncthing_discovery.port
|
||||
service_configs.ports.public.syncthing_protocol.port
|
||||
];
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
service_configs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceMountWithZpool "trilium-server" service_configs.zpool_ssds [
|
||||
(service_configs.services_dir + "/trilium")
|
||||
])
|
||||
(lib.mkCaddyReverseProxy {
|
||||
subdomain = "notes";
|
||||
port = service_configs.ports.private.trilium.port;
|
||||
auth = true;
|
||||
})
|
||||
];
|
||||
|
||||
services.trilium-server = {
|
||||
enable = true;
|
||||
port = service_configs.ports.private.trilium.port;
|
||||
host = "127.0.0.1";
|
||||
dataDir = service_configs.trilium.dataDir;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.apcupsd = {
|
||||
enable = true;
|
||||
configText = ''
|
||||
UPSTYPE usb
|
||||
NISIP 127.0.0.1
|
||||
BATTERYLEVEL 5 # shutdown after reaching 5% battery
|
||||
MINUTES 5 # shutdown if estimated runtime on battery reaches 5 minutes
|
||||
'';
|
||||
|
||||
hooks = {
|
||||
# command to run when shutdown condition is met
|
||||
doshutdown = "systemctl poweroff";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
inputs.vpn-confinement.nixosModules.default
|
||||
];
|
||||
|
||||
# network namespace that is proxied through mullvad
|
||||
vpnNamespaces.wg = {
|
||||
enable = true;
|
||||
wireguardConfigFile = config.age.secrets.wg0-conf.path;
|
||||
accessibleFrom = [
|
||||
# "192.168.0.0/24"
|
||||
];
|
||||
};
|
||||
|
||||
boot = {
|
||||
# BBR congestion control handles variable-latency VPN connections much
|
||||
# better than CUBIC by probing bandwidth continuously rather than
|
||||
# reacting to packet loss.
|
||||
kernelModules = [ "tcp_bbr" ];
|
||||
|
||||
kernel.sysctl = {
|
||||
# Use BBR + fair queuing for smooth throughput through the WireGuard VPN
|
||||
"net.core.default_qdisc" = "fq";
|
||||
"net.ipv4.tcp_congestion_control" = "bbr";
|
||||
|
||||
# Disable slow-start after idle: prevents TCP from resetting window
|
||||
# size on each burst cycle (the primary cause of the 0 -> 40 MB/s spikes)
|
||||
"net.ipv4.tcp_slow_start_after_idle" = 0;
|
||||
|
||||
# Larger socket buffers to accommodate the VPN bandwidth-delay product
|
||||
# (22ms RTT * target throughput). Current 2.5MB max is too small.
|
||||
"net.core.rmem_max" = 16777216;
|
||||
"net.core.wmem_max" = 16777216;
|
||||
"net.ipv4.tcp_rmem" = "4096 87380 16777216";
|
||||
"net.ipv4.tcp_wmem" = "4096 65536 16777216";
|
||||
|
||||
# Higher backlog for the large number of concurrent torrent connections
|
||||
"net.core.netdev_max_backlog" = 5000;
|
||||
# Faster cleanup of dead connections from torrent peer churn
|
||||
"net.ipv4.tcp_fin_timeout" = 15; # default 60
|
||||
"net.ipv4.tcp_tw_reuse" = 1;
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.trustedInterfaces = [ "wg-br" ];
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-caddy";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../modules/security.nix
|
||||
];
|
||||
|
||||
# Set up Caddy with basic auth (minimal config, no production stuff)
|
||||
# Using bcrypt hash generated with: caddy hash-password --plaintext testpass
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
virtualHosts.":80".extraConfig = ''
|
||||
log {
|
||||
output file /var/log/caddy/access-server.log
|
||||
format json
|
||||
}
|
||||
basic_auth {
|
||||
testuser $2a$14$XqaQlGTdmofswciqrLlMz.rv0/jiGQq8aU.fP6mh6gCGiLf6Cl3.a
|
||||
}
|
||||
respond "Authenticated!" 200
|
||||
'';
|
||||
};
|
||||
|
||||
# Add the fail2ban jail for caddy-auth (same as in services/caddy.nix)
|
||||
services.fail2ban.jails.caddy-auth = {
|
||||
enabled = true;
|
||||
settings = {
|
||||
backend = "auto";
|
||||
port = "http,https";
|
||||
logpath = "/var/log/caddy/access-*.log";
|
||||
maxretry = 3; # Lower for testing
|
||||
};
|
||||
filter.Definition = {
|
||||
# Only match 401s where an Authorization header was actually sent
|
||||
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
|
||||
ignoreregex = "";
|
||||
datepattern = ''"ts":{Epoch}\.'';
|
||||
};
|
||||
};
|
||||
|
||||
# Create log directory and initial log file so fail2ban can start
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/log/caddy 755 caddy caddy"
|
||||
"f /var/log/caddy/access-server.log 644 caddy caddy"
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("caddy.service")
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(80)
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("Verify caddy-auth jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "caddy-auth" in status, f"caddy-auth jail not found in: {status}"
|
||||
|
||||
with subtest("Verify correct password works"):
|
||||
# Use -4 to force IPv4 for consistency
|
||||
result = client.succeed("curl -4 -s -u testuser:testpass http://server/")
|
||||
print(f"Curl result: {result}")
|
||||
assert "Authenticated" in result, f"Auth should succeed: {result}"
|
||||
|
||||
with subtest("Unauthenticated requests (browser probes) should not trigger ban"):
|
||||
# Simulate browser probe requests - no Authorization header sent
|
||||
# This is the normal HTTP Basic Auth challenge-response flow:
|
||||
# browser sends request without credentials, gets 401, then resends with credentials
|
||||
for i in range(5):
|
||||
client.execute("curl -4 -s http://server/ || true")
|
||||
time.sleep(0.5)
|
||||
time.sleep(3)
|
||||
status = server.succeed("fail2ban-client status caddy-auth")
|
||||
print(f"caddy-auth jail status after unauthenticated requests: {status}")
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
banned = int(match.group(1)) if match else 0
|
||||
assert banned == 0, f"Unauthenticated 401s should NOT trigger ban, but {banned} IPs were banned: {status}"
|
||||
|
||||
with subtest("Generate failed basic auth attempts (wrong password)"):
|
||||
# Use -4 to force IPv4 for consistent IP tracking
|
||||
# These send an Authorization header with wrong credentials
|
||||
for i in range(4):
|
||||
client.execute("curl -4 -s -u testuser:wrongpass http://server/ || true")
|
||||
time.sleep(1)
|
||||
|
||||
with subtest("Verify IP is banned after wrong password attempts"):
|
||||
time.sleep(5)
|
||||
status = server.succeed("fail2ban-client status caddy-auth")
|
||||
print(f"caddy-auth jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("curl -4 -s --max-time 3 http://server/ 2>&1")[0]
|
||||
assert exit_code != 0, "Connection should be blocked"
|
||||
'';
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
baseServiceConfigs = import ../service-configs.nix;
|
||||
testServiceConfigs = lib.recursiveUpdate baseServiceConfigs {
|
||||
zpool_ssds = "";
|
||||
gitea = {
|
||||
dir = "/var/lib/gitea";
|
||||
domain = "git.test.local";
|
||||
};
|
||||
ports.private.gitea = {
|
||||
port = 3000;
|
||||
proto = "tcp";
|
||||
};
|
||||
};
|
||||
|
||||
testLib = lib.extend (
|
||||
final: prev: {
|
||||
serviceMountWithZpool =
|
||||
serviceName: zpool: dirs:
|
||||
{ ... }:
|
||||
{ };
|
||||
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
|
||||
}
|
||||
);
|
||||
|
||||
giteaModule =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(import ../services/gitea.nix {
|
||||
inherit config pkgs;
|
||||
lib = testLib;
|
||||
service_configs = testServiceConfigs;
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-gitea";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../modules/security.nix
|
||||
giteaModule
|
||||
];
|
||||
|
||||
# Enable postgres for gitea
|
||||
services.postgresql.enable = true;
|
||||
|
||||
# Disable ZFS mount dependency
|
||||
systemd.services."gitea-mounts".enable = lib.mkForce false;
|
||||
systemd.services.gitea = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ "postgresql.service" ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# Override for faster testing and correct port
|
||||
services.fail2ban.jails.gitea.settings = {
|
||||
maxretry = lib.mkForce 3;
|
||||
# In test, we connect directly to Gitea port, not via Caddy
|
||||
port = lib.mkForce "3000";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("postgresql.service")
|
||||
server.wait_for_unit("gitea.service")
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(3000)
|
||||
time.sleep(3)
|
||||
|
||||
with subtest("Verify gitea jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "gitea" in status, f"gitea jail not found in: {status}"
|
||||
|
||||
with subtest("Generate failed login attempts"):
|
||||
# Use -4 to force IPv4 for consistent IP tracking
|
||||
for i in range(4):
|
||||
client.execute(
|
||||
"curl -4 -s -X POST http://server:3000/user/login -d 'user_name=baduser&password=badpass' || true"
|
||||
)
|
||||
time.sleep(0.5)
|
||||
|
||||
with subtest("Verify IP is banned"):
|
||||
time.sleep(3)
|
||||
status = server.succeed("fail2ban-client status gitea")
|
||||
print(f"gitea jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("curl -4 -s --max-time 3 http://server:3000/ 2>&1")[0]
|
||||
assert exit_code != 0, "Connection should be blocked"
|
||||
'';
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
baseServiceConfigs = import ../service-configs.nix;
|
||||
testServiceConfigs = lib.recursiveUpdate baseServiceConfigs {
|
||||
zpool_ssds = "";
|
||||
https.domain = "test.local";
|
||||
ports.private.immich = {
|
||||
port = 2283;
|
||||
proto = "tcp";
|
||||
};
|
||||
immich.dir = "/var/lib/immich";
|
||||
};
|
||||
|
||||
testLib = lib.extend (
|
||||
final: prev: {
|
||||
serviceMountWithZpool =
|
||||
serviceName: zpool: dirs:
|
||||
{ ... }:
|
||||
{ };
|
||||
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
|
||||
}
|
||||
);
|
||||
|
||||
immichModule =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(import ../services/immich.nix {
|
||||
inherit config pkgs;
|
||||
lib = testLib;
|
||||
service_configs = testServiceConfigs;
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-immich";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../modules/security.nix
|
||||
immichModule
|
||||
];
|
||||
|
||||
# Immich needs postgres
|
||||
services.postgresql.enable = true;
|
||||
|
||||
# Let immich create its own DB for testing
|
||||
services.immich.database.createDB = lib.mkForce true;
|
||||
|
||||
# Disable ZFS mount dependencies
|
||||
systemd.services."immich-server-mounts".enable = lib.mkForce false;
|
||||
systemd.services."immich-machine-learning-mounts".enable = lib.mkForce false;
|
||||
systemd.services.immich-server = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ "postgresql.service" ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
systemd.services.immich-machine-learning = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# Override for faster testing and correct port
|
||||
services.fail2ban.jails.immich.settings = {
|
||||
maxretry = lib.mkForce 3;
|
||||
# In test, we connect directly to Immich port, not via Caddy
|
||||
port = lib.mkForce "2283";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 2283 ];
|
||||
|
||||
# Immich needs more resources
|
||||
virtualisation.diskSize = 4 * 1024;
|
||||
virtualisation.memorySize = 4 * 1024; # 4GB RAM for Immich
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("postgresql.service")
|
||||
server.wait_for_unit("immich-server.service", timeout=120)
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(2283, timeout=60)
|
||||
time.sleep(3)
|
||||
|
||||
with subtest("Verify immich jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "immich" in status, f"immich jail not found in: {status}"
|
||||
|
||||
with subtest("Generate failed login attempts"):
|
||||
# Use -4 to force IPv4 for consistent IP tracking
|
||||
for i in range(4):
|
||||
client.execute(
|
||||
"curl -4 -s -X POST http://server:2283/api/auth/login -H 'Content-Type: application/json' -d '{\"email\":\"bad@user.com\",\"password\":\"badpass\"}' || true"
|
||||
)
|
||||
time.sleep(0.5)
|
||||
|
||||
with subtest("Verify IP is banned"):
|
||||
time.sleep(3)
|
||||
status = server.succeed("fail2ban-client status immich")
|
||||
print(f"immich jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("curl -4 -s --max-time 3 http://server:2283/ 2>&1")[0]
|
||||
assert exit_code != 0, "Connection should be blocked"
|
||||
'';
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
baseServiceConfigs = import ../service-configs.nix;
|
||||
testServiceConfigs = lib.recursiveUpdate baseServiceConfigs {
|
||||
zpool_ssds = "";
|
||||
https.domain = "test.local";
|
||||
jellyfin = {
|
||||
dataDir = "/var/lib/jellyfin";
|
||||
cacheDir = "/var/cache/jellyfin";
|
||||
};
|
||||
};
|
||||
|
||||
testLib = lib.extend (
|
||||
final: prev: {
|
||||
serviceMountWithZpool =
|
||||
serviceName: zpool: dirs:
|
||||
{ ... }:
|
||||
{ };
|
||||
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
|
||||
optimizePackage = pkg: pkg; # No-op for testing
|
||||
}
|
||||
);
|
||||
|
||||
jellyfinModule =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(import ../services/jellyfin/jellyfin.nix {
|
||||
inherit config pkgs;
|
||||
lib = testLib;
|
||||
service_configs = testServiceConfigs;
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-jellyfin";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../modules/security.nix
|
||||
jellyfinModule
|
||||
];
|
||||
|
||||
# needed for testing
|
||||
services.jellyfin.openFirewall = true;
|
||||
|
||||
# Create the media group
|
||||
users.groups.media = { };
|
||||
|
||||
# Disable ZFS mount dependency
|
||||
systemd.services."jellyfin-mounts".enable = lib.mkForce false;
|
||||
systemd.services.jellyfin = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# Override for faster testing and correct port
|
||||
services.fail2ban.jails.jellyfin.settings = {
|
||||
maxretry = lib.mkForce 3;
|
||||
# In test, we connect directly to Jellyfin port, not via Caddy
|
||||
port = lib.mkForce "8096";
|
||||
};
|
||||
|
||||
# Create log directory and placeholder log file for fail2ban
|
||||
# Jellyfin logs to files, not systemd journal
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/jellyfin/log 0755 jellyfin jellyfin"
|
||||
"f /var/lib/jellyfin/log/log_placeholder.log 0644 jellyfin jellyfin"
|
||||
];
|
||||
|
||||
# Make fail2ban start after Jellyfin
|
||||
systemd.services.fail2ban = {
|
||||
wants = [ "jellyfin.service" ];
|
||||
after = [ "jellyfin.service" ];
|
||||
};
|
||||
|
||||
# Give jellyfin more disk space and memory
|
||||
virtualisation.diskSize = 3 * 1024;
|
||||
virtualisation.memorySize = 2 * 1024;
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("jellyfin.service")
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(8096)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=120)
|
||||
time.sleep(2)
|
||||
|
||||
# Wait for Jellyfin to create real log files and reload fail2ban
|
||||
server.wait_until_succeeds("ls /var/lib/jellyfin/log/log_2*.log", timeout=30)
|
||||
server.succeed("fail2ban-client reload jellyfin")
|
||||
|
||||
with subtest("Verify jellyfin jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "jellyfin" in status, f"jellyfin jail not found in: {status}"
|
||||
|
||||
with subtest("Generate failed login attempts"):
|
||||
# Use -4 to force IPv4 for consistent IP tracking
|
||||
for i in range(4):
|
||||
client.execute("""
|
||||
curl -4 -s -X POST http://server:8096/Users/authenticatebyname \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'X-Emby-Authorization: MediaBrowser Client="test", Device="test", DeviceId="test", Version="1.0"' \
|
||||
-d '{"Username":"baduser","Pw":"badpass"}' || true
|
||||
""")
|
||||
time.sleep(0.5)
|
||||
|
||||
with subtest("Verify IP is banned"):
|
||||
time.sleep(3)
|
||||
status = server.succeed("fail2ban-client status jellyfin")
|
||||
print(f"jellyfin jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("curl -4 -s --max-time 3 http://server:8096/ 2>&1")[0]
|
||||
assert exit_code != 0, "Connection should be blocked"
|
||||
'';
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
securityModule = import ../modules/security.nix;
|
||||
|
||||
sshModule =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
(import ../services/ssh.nix {
|
||||
inherit config lib pkgs;
|
||||
username = "testuser";
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-ssh";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
securityModule
|
||||
sshModule
|
||||
];
|
||||
|
||||
# Override for testing - enable password auth
|
||||
services.openssh.settings.PasswordAuthentication = lib.mkForce true;
|
||||
|
||||
users.users.testuser = {
|
||||
isNormalUser = true;
|
||||
password = "correctpassword";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 22 ];
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = with pkgs; [
|
||||
sshpass
|
||||
openssh
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("sshd.service")
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(22)
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("Verify sshd jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "sshd" in status, f"sshd jail not found in: {status}"
|
||||
|
||||
with subtest("Generate failed SSH login attempts"):
|
||||
# Use -4 to force IPv4, timeout and NumberOfPasswordPrompts=1 to ensure quick failure
|
||||
# maxRetry is 3 in our config, so 4 attempts should trigger a ban
|
||||
for i in range(4):
|
||||
client.execute(
|
||||
"timeout 5 sshpass -p 'wrongpassword' ssh -4 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=3 -o NumberOfPasswordPrompts=1 testuser@server echo test 2>/dev/null || true"
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
with subtest("Verify IP is banned"):
|
||||
# Wait for fail2ban to process the logs and apply the ban
|
||||
time.sleep(5)
|
||||
status = server.succeed("fail2ban-client status sshd")
|
||||
print(f"sshd jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
import re
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("timeout 3 nc -4 -z -w 2 server 22")[0]
|
||||
assert exit_code != 0, "Connection should be blocked for banned IP"
|
||||
'';
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
baseServiceConfigs = import ../service-configs.nix;
|
||||
testServiceConfigs = lib.recursiveUpdate baseServiceConfigs {
|
||||
zpool_ssds = "";
|
||||
https.domain = "test.local";
|
||||
};
|
||||
|
||||
testLib = lib.extend (
|
||||
final: prev: {
|
||||
serviceMountWithZpool =
|
||||
serviceName: zpool: dirs:
|
||||
{ ... }:
|
||||
{ };
|
||||
serviceFilePerms = serviceName: tmpfilesRules: { ... }: { };
|
||||
}
|
||||
);
|
||||
|
||||
vaultwardenModule =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(import ../services/bitwarden.nix {
|
||||
inherit config pkgs;
|
||||
lib = testLib;
|
||||
service_configs = testServiceConfigs;
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "fail2ban-vaultwarden";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
../modules/security.nix
|
||||
vaultwardenModule
|
||||
];
|
||||
|
||||
# Disable ZFS mount dependencies
|
||||
systemd.services."vaultwarden-mounts".enable = lib.mkForce false;
|
||||
systemd.services."backup-vaultwarden-mounts".enable = lib.mkForce false;
|
||||
systemd.services.vaultwarden = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
systemd.services.backup-vaultwarden = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ ];
|
||||
requires = lib.mkForce [ ];
|
||||
};
|
||||
|
||||
# Override Vaultwarden settings for testing
|
||||
# - Listen on all interfaces (not just localhost)
|
||||
# - Enable logging at info level to capture failed login attempts
|
||||
services.vaultwarden.config = {
|
||||
ROCKET_ADDRESS = lib.mkForce "0.0.0.0";
|
||||
ROCKET_LOG = lib.mkForce "info";
|
||||
};
|
||||
|
||||
# Override for faster testing and correct port
|
||||
services.fail2ban.jails.vaultwarden.settings = {
|
||||
maxretry = lib.mkForce 3;
|
||||
# In test, we connect directly to Vaultwarden port, not via Caddy
|
||||
port = lib.mkForce "8222";
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 8222 ];
|
||||
};
|
||||
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
import re
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("vaultwarden.service")
|
||||
server.wait_for_unit("fail2ban.service")
|
||||
server.wait_for_open_port(8222)
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("Verify vaultwarden jail is active"):
|
||||
status = server.succeed("fail2ban-client status")
|
||||
assert "vaultwarden" in status, f"vaultwarden jail not found in: {status}"
|
||||
|
||||
with subtest("Generate failed login attempts"):
|
||||
# Use -4 to force IPv4 for consistent IP tracking
|
||||
for i in range(4):
|
||||
client.execute("""
|
||||
curl -4 -s -X POST 'http://server:8222/identity/connect/token' \
|
||||
-H 'Content-Type: application/x-www-form-urlencoded' \
|
||||
-H 'Bitwarden-Client-Name: web' \
|
||||
-H 'Bitwarden-Client-Version: 2024.1.0' \
|
||||
-d 'grant_type=password&username=bad@user.com&password=badpass&scope=api+offline_access&client_id=web&deviceType=10&deviceIdentifier=test&deviceName=test' \
|
||||
|| true
|
||||
""")
|
||||
time.sleep(0.5)
|
||||
|
||||
with subtest("Verify IP is banned"):
|
||||
time.sleep(3)
|
||||
status = server.succeed("fail2ban-client status vaultwarden")
|
||||
print(f"vaultwarden jail status: {status}")
|
||||
# Check that at least 1 IP is banned
|
||||
match = re.search(r"Currently banned:\s*(\d+)", status)
|
||||
assert match and int(match.group(1)) >= 1, f"Expected at least 1 banned IP, got: {status}"
|
||||
|
||||
with subtest("Verify banned client cannot connect"):
|
||||
# Use -4 to test with same IP that was banned
|
||||
exit_code = client.execute("curl -4 -s --max-time 3 http://server:8222/ 2>&1")[0]
|
||||
assert exit_code != 0, "Connection should be blocked"
|
||||
'';
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
testPkgs = pkgs.appendOverlays [ (import ../modules/overlays.nix) ];
|
||||
in
|
||||
testPkgs.testers.runNixOSTest {
|
||||
name = "file-perms test";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
(lib.serviceFilePerms "test-service" [
|
||||
"Z /tmp/test-perms-dir 0750 nobody nogroup"
|
||||
])
|
||||
];
|
||||
|
||||
systemd.services."test-service" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Create test directory with wrong permissions
|
||||
machine.succeed("mkdir -p /tmp/test-perms-dir")
|
||||
machine.succeed("chown root:root /tmp/test-perms-dir")
|
||||
machine.succeed("chmod 700 /tmp/test-perms-dir")
|
||||
|
||||
# Start service -- this should pull in test-service-file-perms
|
||||
machine.succeed("systemctl start test-service")
|
||||
|
||||
# Verify file-perms service ran and is active
|
||||
machine.succeed("systemctl is-active test-service-file-perms.service")
|
||||
|
||||
# Verify permissions were fixed by tmpfiles
|
||||
result = machine.succeed("stat -c '%U:%G' /tmp/test-perms-dir").strip()
|
||||
assert result == "nobody:nogroup", f"Expected nobody:nogroup, got {result}"
|
||||
|
||||
result = machine.succeed("stat -c '%a' /tmp/test-perms-dir").strip()
|
||||
assert result == "750", f"Expected 750, got {result}"
|
||||
'';
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "gitea-runner";
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "sqlite3";
|
||||
settings = {
|
||||
server = {
|
||||
HTTP_PORT = 3000;
|
||||
ROOT_URL = "http://localhost:3000";
|
||||
DOMAIN = "localhost";
|
||||
};
|
||||
actions.ENABLED = true;
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
};
|
||||
};
|
||||
|
||||
specialisation.runner = {
|
||||
inheritParentConfig = true;
|
||||
configuration.services.gitea-actions-runner.instances.test = {
|
||||
enable = true;
|
||||
name = "ci";
|
||||
url = "http://localhost:3000";
|
||||
labels = [ "native:host" ];
|
||||
tokenFile = "/var/lib/gitea/runner_token";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("gitea.service")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Generate runner token
|
||||
machine.succeed(
|
||||
"su -l gitea -s /bin/sh -c '${pkgs.gitea}/bin/gitea actions generate-runner-token --work-path /var/lib/gitea' | tail -1 | sed 's/^/TOKEN=/' > /var/lib/gitea/runner_token"
|
||||
)
|
||||
|
||||
# Switch to runner specialisation
|
||||
machine.succeed(
|
||||
"/run/current-system/specialisation/runner/bin/switch-to-configuration test"
|
||||
)
|
||||
|
||||
# Start the runner (specialisation switch doesn't auto-start new services)
|
||||
machine.succeed("systemctl start gitea-runner-test.service")
|
||||
machine.wait_for_unit("gitea-runner-test.service")
|
||||
machine.succeed("sleep 5")
|
||||
machine.succeed("test -f /var/lib/gitea-runner/test/.runner")
|
||||
'';
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
jfLib = import ./jellyfin-test-lib.nix { inherit pkgs lib; };
|
||||
mockGrafana = ./mock-grafana-server.py;
|
||||
script = ../services/grafana/jellyfin-annotations.py;
|
||||
python = pkgs.python3;
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "jellyfin-annotations";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [ jfLib.jellyfinTestConfig ];
|
||||
environment.systemPackages = [ pkgs.python3 ];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
import time
|
||||
|
||||
import importlib.util
|
||||
_spec = importlib.util.spec_from_file_location("jf_helpers", "${jfLib.helpers}")
|
||||
assert _spec and _spec.loader
|
||||
_jf = importlib.util.module_from_spec(_spec)
|
||||
_spec.loader.exec_module(_jf)
|
||||
setup_jellyfin = _jf.setup_jellyfin
|
||||
jellyfin_api = _jf.jellyfin_api
|
||||
|
||||
GRAFANA_PORT = 13000
|
||||
ANNOTS_FILE = "/tmp/annotations.json"
|
||||
STATE_FILE = "/tmp/annotations-state.json"
|
||||
CREDS_DIR = "/tmp/test-creds"
|
||||
PYTHON = "${python}/bin/python3"
|
||||
MOCK_GRAFANA = "${mockGrafana}"
|
||||
SCRIPT = "${script}"
|
||||
|
||||
auth_header = 'MediaBrowser Client="Infuse", DeviceId="test-dev-1", Device="iPhone", Version="1.0"'
|
||||
auth_header2 = 'MediaBrowser Client="Jellyfin Web", DeviceId="test-dev-2", Device="Chrome", Version="1.0"'
|
||||
|
||||
def read_annotations():
|
||||
out = machine.succeed(f"cat {ANNOTS_FILE} 2>/dev/null || echo '[]'")
|
||||
return json.loads(out.strip())
|
||||
|
||||
start_all()
|
||||
token, user_id, movie_id, media_source_id = setup_jellyfin(
|
||||
machine, retry, auth_header,
|
||||
"${jfLib.payloads.auth}", "${jfLib.payloads.empty}",
|
||||
)
|
||||
|
||||
with subtest("Setup mock Grafana and credentials"):
|
||||
machine.succeed(f"mkdir -p {CREDS_DIR}")
|
||||
machine.succeed(f"echo '{token}' > {CREDS_DIR}/jellyfin-api-key")
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=mock-grafana {PYTHON} {MOCK_GRAFANA} {GRAFANA_PORT} {ANNOTS_FILE}"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf -X POST http://127.0.0.1:{GRAFANA_PORT}/api/annotations "
|
||||
f"-H 'Content-Type: application/json' -d '{{\"text\":\"ping\",\"tags\":[]}}' | grep -q id",
|
||||
timeout=10,
|
||||
)
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
|
||||
with subtest("Start annotation service"):
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=annotations-svc "
|
||||
f"--setenv=JELLYFIN_URL=http://127.0.0.1:8096 "
|
||||
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
|
||||
f"--setenv=CREDENTIALS_DIRECTORY={CREDS_DIR} "
|
||||
f"--setenv=STATE_FILE={STATE_FILE} "
|
||||
f"--setenv=POLL_INTERVAL=3 "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("No annotations when no streams active"):
|
||||
time.sleep(4)
|
||||
annots = read_annotations()
|
||||
assert annots == [], f"Expected no annotations, got: {annots}"
|
||||
|
||||
with subtest("Annotation created when playback starts"):
|
||||
playback_start = json.dumps({
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-1",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
})
|
||||
machine.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing' "
|
||||
f"-d '{playback_start}' -H 'Content-Type:application/json' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header}, Token={token}'"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | python3 -c \"import sys,json; a=json.load(sys.stdin); exit(0 if a else 1)\"",
|
||||
timeout=15,
|
||||
)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
|
||||
text = annots[0]["text"]
|
||||
assert "jellyfin" in annots[0].get("tags", []), f"Missing jellyfin tag: {annots[0]}"
|
||||
assert "Test Movie" in text, f"Missing title in: {text}"
|
||||
assert "Infuse" in text, f"Missing client in: {text}"
|
||||
assert "iPhone" in text, f"Missing device in: {text}"
|
||||
assert "timeEnd" not in annots[0], f"timeEnd should not be set yet: {annots[0]}"
|
||||
|
||||
with subtest("Annotation closed when playback stops"):
|
||||
playback_stop = json.dumps({
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-1",
|
||||
"PositionTicks": 50000000,
|
||||
})
|
||||
machine.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing/Stopped' "
|
||||
f"-d '{playback_stop}' -H 'Content-Type:application/json' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header}, Token={token}'"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | python3 -c \"import sys,json; a=json.load(sys.stdin); exit(0 if a and 'timeEnd' in a[0] else 1)\"",
|
||||
timeout=15,
|
||||
)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
|
||||
assert "timeEnd" in annots[0], f"timeEnd should be set: {annots[0]}"
|
||||
assert annots[0]["timeEnd"] > annots[0]["time"], "timeEnd should be after time"
|
||||
|
||||
with subtest("Multiple concurrent streams each get their own annotation"):
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
|
||||
auth_result2 = json.loads(machine.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Users/AuthenticateByName' "
|
||||
f"-d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header2}'"
|
||||
))
|
||||
token2 = auth_result2["AccessToken"]
|
||||
|
||||
playback1 = json.dumps({
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-multi-1",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
})
|
||||
machine.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing' "
|
||||
f"-d '{playback1}' -H 'Content-Type:application/json' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header}, Token={token}'"
|
||||
)
|
||||
playback2 = json.dumps({
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-multi-2",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
})
|
||||
machine.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing' "
|
||||
f"-d '{playback2}' -H 'Content-Type:application/json' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header2}, Token={token2}'"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"cat {ANNOTS_FILE} | python3 -c \"import sys,json; a=json.load(sys.stdin); exit(0 if len(a)==2 else 1)\"",
|
||||
timeout=15,
|
||||
)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 2, f"Expected 2 annotations, got: {annots}"
|
||||
|
||||
with subtest("State survives service restart (no duplicate annotations)"):
|
||||
machine.succeed("systemctl stop annotations-svc || true")
|
||||
time.sleep(1)
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=annotations-svc-2 "
|
||||
f"--setenv=JELLYFIN_URL=http://127.0.0.1:8096 "
|
||||
f"--setenv=GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
|
||||
f"--setenv=CREDENTIALS_DIRECTORY={CREDS_DIR} "
|
||||
f"--setenv=STATE_FILE={STATE_FILE} "
|
||||
f"--setenv=POLL_INTERVAL=3 "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
time.sleep(6)
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 2, f"Restart should not create duplicates, got: {annots}"
|
||||
'';
|
||||
}
|
||||
@@ -1,654 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
jfLib = import ./jellyfin-test-lib.nix { inherit pkgs lib; };
|
||||
webhookPlugin = import ../services/jellyfin/jellyfin-webhook-plugin.nix { inherit pkgs lib; };
|
||||
configureWebhook = webhookPlugin.mkConfigureScript {
|
||||
jellyfinUrl = "http://localhost:8096";
|
||||
webhooks = [
|
||||
{
|
||||
name = "qBittorrent Monitor";
|
||||
uri = "http://127.0.0.1:9898/";
|
||||
notificationTypes = [
|
||||
"PlaybackStart"
|
||||
"PlaybackProgress"
|
||||
"PlaybackStop"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "jellyfin-qbittorrent-monitor";
|
||||
|
||||
nodes = {
|
||||
server =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
jfLib.jellyfinTestConfig
|
||||
inputs.vpn-confinement.nixosModules.default
|
||||
];
|
||||
|
||||
# Real qBittorrent service
|
||||
services.qbittorrent = {
|
||||
enable = true;
|
||||
webuiPort = 8080;
|
||||
openFirewall = true;
|
||||
|
||||
serverConfig.LegalNotice.Accepted = true;
|
||||
|
||||
serverConfig.Preferences = {
|
||||
WebUI = {
|
||||
# Disable authentication for testing
|
||||
AuthSubnetWhitelist = "0.0.0.0/0,::/0";
|
||||
AuthSubnetWhitelistEnabled = true;
|
||||
LocalHostAuth = false;
|
||||
};
|
||||
|
||||
Downloads = {
|
||||
SavePath = "/var/lib/qbittorrent/downloads";
|
||||
TempPath = "/var/lib/qbittorrent/incomplete";
|
||||
};
|
||||
};
|
||||
|
||||
serverConfig.BitTorrent.Session = {
|
||||
# Normal speed - unlimited
|
||||
GlobalUPSpeedLimit = 0;
|
||||
GlobalDLSpeedLimit = 0;
|
||||
|
||||
# Alternate speed limits for when Jellyfin is streaming
|
||||
AlternativeGlobalUPSpeedLimit = 100;
|
||||
AlternativeGlobalDLSpeedLimit = 100;
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
8096
|
||||
8080
|
||||
];
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
|
||||
{
|
||||
address = "192.168.1.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth1.ipv4.routes = [
|
||||
{
|
||||
address = "203.0.113.0";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
# Create directories for qBittorrent.
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/qbittorrent/downloads 0755 qbittorrent qbittorrent"
|
||||
"d /var/lib/qbittorrent/incomplete 0755 qbittorrent qbittorrent"
|
||||
];
|
||||
|
||||
# Install the Jellyfin Webhook plugin before Jellyfin starts, mirroring
|
||||
# the production module. Jellyfin rewrites meta.json at runtime so a
|
||||
# read-only nix-store symlink would fail — we materialise a writable copy.
|
||||
systemd.services."jellyfin-webhook-install" = {
|
||||
description = "Install Jellyfin Webhook plugin files";
|
||||
before = [ "jellyfin.service" ];
|
||||
wantedBy = [ "jellyfin.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
User = "jellyfin";
|
||||
Group = "jellyfin";
|
||||
UMask = "0077";
|
||||
ExecStart = webhookPlugin.mkInstallScript {
|
||||
pluginsDir = "/var/lib/jellyfin/plugins";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Public test IP (RFC 5737 TEST-NET-3) so Jellyfin sees it as external
|
||||
client = {
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
|
||||
{
|
||||
address = "203.0.113.10";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
networking.interfaces.eth1.ipv4.routes = [
|
||||
{
|
||||
address = "192.168.1.0";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
import time
|
||||
|
||||
import importlib.util
|
||||
_spec = importlib.util.spec_from_file_location("jf_helpers", "${jfLib.helpers}")
|
||||
assert _spec and _spec.loader
|
||||
_jf = importlib.util.module_from_spec(_spec)
|
||||
_spec.loader.exec_module(_jf)
|
||||
setup_jellyfin = _jf.setup_jellyfin
|
||||
jellyfin_api = _jf.jellyfin_api
|
||||
|
||||
auth_header = 'MediaBrowser Client="NixOS Test", DeviceId="test-1337", Device="TestDevice", Version="1.0"'
|
||||
|
||||
def is_throttled():
|
||||
return server.succeed("curl -s http://localhost:8080/api/v2/transfer/speedLimitsMode").strip() == "1"
|
||||
|
||||
def get_alt_dl_limit():
|
||||
prefs = json.loads(server.succeed("curl -s http://localhost:8080/api/v2/app/preferences"))
|
||||
return prefs["alt_dl_limit"]
|
||||
|
||||
def get_alt_up_limit():
|
||||
prefs = json.loads(server.succeed("curl -s http://localhost:8080/api/v2/app/preferences"))
|
||||
return prefs["alt_up_limit"]
|
||||
|
||||
def are_torrents_paused():
|
||||
torrents = json.loads(server.succeed("curl -s 'http://localhost:8080/api/v2/torrents/info'"))
|
||||
if not torrents:
|
||||
return False
|
||||
return all(t["state"].startswith("stopped") for t in torrents)
|
||||
|
||||
start_all()
|
||||
server.wait_for_unit("qbittorrent.service")
|
||||
server.wait_for_open_port(8080)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
|
||||
|
||||
token, user_id, movie_id, media_source_id = setup_jellyfin(
|
||||
server, retry, auth_header,
|
||||
"${jfLib.payloads.auth}", "${jfLib.payloads.empty}",
|
||||
)
|
||||
|
||||
with subtest("Start monitor service"):
|
||||
python = "${pkgs.python3.withPackages (ps: [ ps.requests ])}/bin/python"
|
||||
monitor = "${../services/jellyfin/jellyfin-qbittorrent-monitor.py}"
|
||||
server.succeed(f"""
|
||||
systemd-run --unit=monitor-test \
|
||||
--setenv=JELLYFIN_URL=http://localhost:8096 \
|
||||
--setenv=JELLYFIN_API_KEY={token} \
|
||||
--setenv=QBITTORRENT_URL=http://localhost:8080 \
|
||||
--setenv=CHECK_INTERVAL=1 \
|
||||
--setenv=STREAMING_START_DELAY=1 \
|
||||
--setenv=STREAMING_STOP_DELAY=1 \
|
||||
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
|
||||
--setenv=SERVICE_BUFFER=2000000 \
|
||||
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
|
||||
--setenv=MIN_TORRENT_SPEED=100 \
|
||||
{python} {monitor}
|
||||
""")
|
||||
time.sleep(2)
|
||||
assert not is_throttled(), "Should start unthrottled"
|
||||
|
||||
client_auth = 'MediaBrowser Client="External Client", DeviceId="external-9999", Device="ExternalDevice", Version="1.0"'
|
||||
client_auth2 = 'MediaBrowser Client="External Client 2", DeviceId="external-8888", Device="ExternalDevice2", Version="1.0"'
|
||||
server_ip = "192.168.1.1"
|
||||
|
||||
with subtest("Client authenticates from external network"):
|
||||
auth_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
|
||||
client_auth_result = json.loads(client.succeed(auth_cmd))
|
||||
client_token = client_auth_result["AccessToken"]
|
||||
|
||||
with subtest("Second client authenticates from external network"):
|
||||
auth_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
|
||||
client_auth_result2 = json.loads(client.succeed(auth_cmd2))
|
||||
client_token2 = client_auth_result2["AccessToken"]
|
||||
|
||||
with subtest("External video playback triggers throttling"):
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-1",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(2)
|
||||
assert is_throttled(), "Should throttle for external video playback"
|
||||
|
||||
with subtest("Pausing disables throttling"):
|
||||
playback_progress = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-1",
|
||||
"IsPaused": True,
|
||||
"PositionTicks": 10000000,
|
||||
}
|
||||
progress_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Progress' -d '{json.dumps(playback_progress)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(progress_cmd)
|
||||
time.sleep(2)
|
||||
|
||||
assert not is_throttled(), "Should unthrottle when paused"
|
||||
|
||||
with subtest("Resuming re-enables throttling"):
|
||||
playback_progress["IsPaused"] = False
|
||||
playback_progress["PositionTicks"] = 20000000
|
||||
progress_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Progress' -d '{json.dumps(playback_progress)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(progress_cmd)
|
||||
time.sleep(2)
|
||||
|
||||
assert is_throttled(), "Should re-throttle when resumed"
|
||||
|
||||
with subtest("Stopping playback disables throttling"):
|
||||
playback_stop = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-1",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd)
|
||||
time.sleep(2)
|
||||
|
||||
assert not is_throttled(), "Should unthrottle when playback stops"
|
||||
|
||||
with subtest("Single stream sets proportional alt speed limits"):
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-proportional",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(3)
|
||||
|
||||
assert is_throttled(), "Should be in alt speed mode during streaming"
|
||||
dl_limit = get_alt_dl_limit()
|
||||
ul_limit = get_alt_up_limit()
|
||||
# Both upload and download should get remaining bandwidth (proportional)
|
||||
assert dl_limit > 0, f"Download limit should be > 0, got {dl_limit}"
|
||||
assert ul_limit == dl_limit, f"Upload limit ({ul_limit}) should equal download limit ({dl_limit})"
|
||||
|
||||
# Stop playback
|
||||
playback_stop = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-proportional",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd)
|
||||
time.sleep(3)
|
||||
|
||||
with subtest("Multiple streams reduce available bandwidth"):
|
||||
# Start first stream
|
||||
playback1 = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-multi-1",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd1 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback1)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd1)
|
||||
time.sleep(3)
|
||||
|
||||
single_dl_limit = get_alt_dl_limit()
|
||||
|
||||
# Start second stream with different client identity
|
||||
playback2 = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-multi-2",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback2)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}, Token={client_token2}'"
|
||||
client.succeed(start_cmd2)
|
||||
time.sleep(3)
|
||||
|
||||
dual_dl_limit = get_alt_dl_limit()
|
||||
# Two streams should leave less bandwidth than one stream
|
||||
assert dual_dl_limit < single_dl_limit, f"Two streams ({dual_dl_limit}) should have lower limit than one ({single_dl_limit})"
|
||||
|
||||
# Stop both streams
|
||||
stop1 = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-multi-1",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd1 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(stop1)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd1)
|
||||
|
||||
stop2 = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-multi-2",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd2 = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(stop2)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}, Token={client_token2}'"
|
||||
client.succeed(stop_cmd2)
|
||||
time.sleep(3)
|
||||
|
||||
with subtest("Budget exhaustion pauses all torrents"):
|
||||
# Stop current monitor
|
||||
server.succeed("systemctl stop monitor-test || true")
|
||||
time.sleep(1)
|
||||
|
||||
# Add a dummy torrent so we can check pause state
|
||||
server.succeed("curl -sf -X POST 'http://localhost:8080/api/v2/torrents/add' -d 'urls=magnet:?xt=urn:btih:0000000000000000000000000000000000000001%26dn=test-torrent'")
|
||||
time.sleep(2)
|
||||
|
||||
# Start monitor with impossibly low budget
|
||||
server.succeed(f"""
|
||||
systemd-run --unit=monitor-exhaust \
|
||||
--setenv=JELLYFIN_URL=http://localhost:8096 \
|
||||
--setenv=JELLYFIN_API_KEY={token} \
|
||||
--setenv=QBITTORRENT_URL=http://localhost:8080 \
|
||||
--setenv=CHECK_INTERVAL=1 \
|
||||
--setenv=STREAMING_START_DELAY=1 \
|
||||
--setenv=STREAMING_STOP_DELAY=1 \
|
||||
--setenv=TOTAL_BANDWIDTH_BUDGET=1000 \
|
||||
--setenv=SERVICE_BUFFER=500 \
|
||||
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
|
||||
--setenv=MIN_TORRENT_SPEED=100 \
|
||||
{python} {monitor}
|
||||
""")
|
||||
time.sleep(2)
|
||||
|
||||
# Start a stream - this will exceed the tiny budget
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-exhaust",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(3)
|
||||
|
||||
assert are_torrents_paused(), "Torrents should be paused when budget is exhausted"
|
||||
|
||||
with subtest("Recovery from pause restores unlimited"):
|
||||
# Stop the stream
|
||||
playback_stop = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-exhaust",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd)
|
||||
time.sleep(3)
|
||||
|
||||
assert not is_throttled(), "Should return to unlimited after streams stop"
|
||||
assert not are_torrents_paused(), "Torrents should be resumed after streams stop"
|
||||
|
||||
# Clean up: stop exhaust monitor, restart normal monitor
|
||||
server.succeed("systemctl stop monitor-exhaust || true")
|
||||
time.sleep(1)
|
||||
server.succeed(f"""
|
||||
systemd-run --unit=monitor-test \
|
||||
--setenv=JELLYFIN_URL=http://localhost:8096 \
|
||||
--setenv=JELLYFIN_API_KEY={token} \
|
||||
--setenv=QBITTORRENT_URL=http://localhost:8080 \
|
||||
--setenv=CHECK_INTERVAL=1 \
|
||||
--setenv=STREAMING_START_DELAY=1 \
|
||||
--setenv=STREAMING_STOP_DELAY=1 \
|
||||
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
|
||||
--setenv=SERVICE_BUFFER=2000000 \
|
||||
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
|
||||
--setenv=MIN_TORRENT_SPEED=100 \
|
||||
{python} {monitor}
|
||||
""")
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("Local playback does NOT trigger throttling"):
|
||||
local_auth = 'MediaBrowser Client="Local Client", DeviceId="local-1111", Device="LocalDevice", Version="1.0"'
|
||||
local_auth_result = json.loads(server.succeed(
|
||||
f"curl -sf -X POST 'http://localhost:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}'"
|
||||
))
|
||||
local_token = local_auth_result["AccessToken"]
|
||||
|
||||
local_playback = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-local",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
server.succeed(f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing' -d '{json.dumps(local_playback)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}, Token={local_token}'")
|
||||
time.sleep(2)
|
||||
assert not is_throttled(), "Should NOT throttle for local playback"
|
||||
|
||||
local_playback["PositionTicks"] = 50000000
|
||||
server.succeed(f"curl -sf -X POST 'http://localhost:8096/Sessions/Playing/Stopped' -d '{json.dumps(local_playback)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{local_auth}, Token={local_token}'")
|
||||
|
||||
# === WEBHOOK TESTS ===
|
||||
#
|
||||
# Configure the Jellyfin Webhook plugin to target the monitor, then verify
|
||||
# the real Jellyfin → plugin → monitor path reacts faster than any possible
|
||||
# poll. CHECK_INTERVAL=30 rules out polling as the cause.
|
||||
|
||||
WEBHOOK_PORT = 9898
|
||||
WEBHOOK_CREDS = "/tmp/webhook-creds"
|
||||
|
||||
# Start a webhook-enabled monitor with long poll interval.
|
||||
server.succeed("systemctl stop monitor-test || true")
|
||||
time.sleep(1)
|
||||
server.succeed(f"""
|
||||
systemd-run --unit=monitor-webhook \
|
||||
--setenv=JELLYFIN_URL=http://localhost:8096 \
|
||||
--setenv=JELLYFIN_API_KEY={token} \
|
||||
--setenv=QBITTORRENT_URL=http://localhost:8080 \
|
||||
--setenv=CHECK_INTERVAL=30 \
|
||||
--setenv=STREAMING_START_DELAY=1 \
|
||||
--setenv=STREAMING_STOP_DELAY=1 \
|
||||
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
|
||||
--setenv=SERVICE_BUFFER=2000000 \
|
||||
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
|
||||
--setenv=MIN_TORRENT_SPEED=100 \
|
||||
--setenv=WEBHOOK_PORT={WEBHOOK_PORT} \
|
||||
--setenv=WEBHOOK_BIND=127.0.0.1 \
|
||||
{python} {monitor}
|
||||
""")
|
||||
server.wait_until_succeeds(f"ss -ltn | grep -q ':{WEBHOOK_PORT}'", timeout=15)
|
||||
time.sleep(2)
|
||||
assert not is_throttled(), "Should start unthrottled"
|
||||
|
||||
# Drop the admin token where the configure script expects it (production uses agenix).
|
||||
server.succeed(f"mkdir -p {WEBHOOK_CREDS} && echo '{token}' > {WEBHOOK_CREDS}/jellyfin-api-key")
|
||||
server.succeed(
|
||||
f"systemd-run --wait --unit=webhook-configure-test "
|
||||
f"--setenv=CREDENTIALS_DIRECTORY={WEBHOOK_CREDS} "
|
||||
f"${configureWebhook}"
|
||||
)
|
||||
|
||||
with subtest("Real PlaybackStart event throttles via the plugin"):
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-plugin-start",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
server.wait_until_succeeds(
|
||||
"curl -sf http://localhost:8080/api/v2/transfer/speedLimitsMode | grep -q '^1$'",
|
||||
timeout=5,
|
||||
)
|
||||
# Let STREAMING_STOP_DELAY (1s) elapse so the upcoming stop is not swallowed by hysteresis.
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("Real PlaybackStop event unthrottles via the plugin"):
|
||||
playback_stop = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-plugin-start",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd)
|
||||
server.wait_until_succeeds(
|
||||
"curl -sf http://localhost:8080/api/v2/transfer/speedLimitsMode | grep -q '^0$'",
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
# Restore fast-polling monitor for the service-restart tests below.
|
||||
server.succeed("systemctl stop monitor-webhook || true")
|
||||
time.sleep(1)
|
||||
server.succeed(f"""
|
||||
systemd-run --unit=monitor-test \
|
||||
--setenv=JELLYFIN_URL=http://localhost:8096 \
|
||||
--setenv=JELLYFIN_API_KEY={token} \
|
||||
--setenv=QBITTORRENT_URL=http://localhost:8080 \
|
||||
--setenv=CHECK_INTERVAL=1 \
|
||||
--setenv=STREAMING_START_DELAY=1 \
|
||||
--setenv=STREAMING_STOP_DELAY=1 \
|
||||
--setenv=TOTAL_BANDWIDTH_BUDGET=50000000 \
|
||||
--setenv=SERVICE_BUFFER=2000000 \
|
||||
--setenv=DEFAULT_STREAM_BITRATE=10000000 \
|
||||
--setenv=MIN_TORRENT_SPEED=100 \
|
||||
{python} {monitor}
|
||||
""")
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
# === SERVICE RESTART TESTS ===
|
||||
|
||||
with subtest("qBittorrent restart during throttled state re-applies throttling"):
|
||||
# Start external playback to trigger throttling
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-restart-1",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(2)
|
||||
assert is_throttled(), "Should be throttled before qBittorrent restart"
|
||||
|
||||
# Restart qBittorrent (this resets alt_speed to its config default - disabled)
|
||||
server.succeed("systemctl restart qbittorrent.service")
|
||||
server.wait_for_unit("qbittorrent.service")
|
||||
server.wait_for_open_port(8080)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
|
||||
|
||||
# qBittorrent restarted - alt_speed is now False (default on startup)
|
||||
# The monitor should detect this and re-apply throttling
|
||||
time.sleep(3) # Give monitor time to detect and re-apply
|
||||
assert is_throttled(), "Monitor should re-apply throttling after qBittorrent restart"
|
||||
|
||||
# Stop playback to clean up
|
||||
playback_stop = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-restart-1",
|
||||
"PositionTicks": 50000000,
|
||||
}
|
||||
stop_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing/Stopped' -d '{json.dumps(playback_stop)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(stop_cmd)
|
||||
time.sleep(2)
|
||||
|
||||
with subtest("qBittorrent restart during unthrottled state stays unthrottled"):
|
||||
# Verify we're unthrottled (no active streams)
|
||||
assert not is_throttled(), "Should be unthrottled before test"
|
||||
|
||||
# Restart qBittorrent
|
||||
server.succeed("systemctl restart qbittorrent.service")
|
||||
server.wait_for_unit("qbittorrent.service")
|
||||
server.wait_for_open_port(8080)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8080/api/v2/app/version", timeout=30)
|
||||
|
||||
# Give monitor time to check state
|
||||
time.sleep(3)
|
||||
assert not is_throttled(), "Should remain unthrottled after qBittorrent restart with no streams"
|
||||
|
||||
with subtest("Jellyfin restart during throttled state maintains throttling"):
|
||||
# Start external playback to trigger throttling
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-restart-2",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(2)
|
||||
assert is_throttled(), "Should be throttled before Jellyfin restart"
|
||||
|
||||
# Restart Jellyfin
|
||||
server.succeed("systemctl restart jellyfin.service")
|
||||
server.wait_for_unit("jellyfin.service")
|
||||
server.wait_for_open_port(8096)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
|
||||
|
||||
# During Jellyfin restart, monitor can't reach Jellyfin
|
||||
# After restart, sessions are cleared - monitor should eventually unthrottle
|
||||
# But during the unavailability window, throttling should be maintained (fail-safe)
|
||||
time.sleep(3)
|
||||
|
||||
# Re-authenticate (old token invalid after restart)
|
||||
client_auth_result = json.loads(client.succeed(
|
||||
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
|
||||
))
|
||||
client_token = client_auth_result["AccessToken"]
|
||||
client_auth_result2 = json.loads(client.succeed(
|
||||
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
|
||||
))
|
||||
client_token2 = client_auth_result2["AccessToken"]
|
||||
|
||||
# No active streams after Jellyfin restart, should eventually unthrottle
|
||||
time.sleep(3)
|
||||
assert not is_throttled(), "Should unthrottle after Jellyfin restart clears sessions"
|
||||
|
||||
with subtest("Monitor recovers after Jellyfin temporary unavailability"):
|
||||
# Re-authenticate with fresh token
|
||||
client_auth_result = json.loads(client.succeed(
|
||||
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}'"
|
||||
))
|
||||
client_token = client_auth_result["AccessToken"]
|
||||
client_auth_result2 = json.loads(client.succeed(
|
||||
f"curl -sf -X POST 'http://{server_ip}:8096/Users/AuthenticateByName' -d '@${jfLib.payloads.auth}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth2}'"
|
||||
))
|
||||
client_token2 = client_auth_result2["AccessToken"]
|
||||
|
||||
# Start playback
|
||||
playback_start = {
|
||||
"ItemId": movie_id,
|
||||
"MediaSourceId": media_source_id,
|
||||
"PlaySessionId": "test-play-session-restart-3",
|
||||
"CanSeek": True,
|
||||
"IsPaused": False,
|
||||
}
|
||||
start_cmd = f"curl -sf -X POST 'http://{server_ip}:8096/Sessions/Playing' -d '{json.dumps(playback_start)}' -H 'Content-Type:application/json' -H 'X-Emby-Authorization:{client_auth}, Token={client_token}'"
|
||||
client.succeed(start_cmd)
|
||||
time.sleep(2)
|
||||
assert is_throttled(), "Should be throttled"
|
||||
|
||||
# Stop Jellyfin briefly (simulating temporary unavailability)
|
||||
server.succeed("systemctl stop jellyfin.service")
|
||||
time.sleep(2)
|
||||
|
||||
# During unavailability, throttle state should be maintained (fail-safe)
|
||||
assert is_throttled(), "Should maintain throttle during Jellyfin unavailability"
|
||||
|
||||
# Bring Jellyfin back
|
||||
server.succeed("systemctl start jellyfin.service")
|
||||
server.wait_for_unit("jellyfin.service")
|
||||
server.wait_for_open_port(8096)
|
||||
server.wait_until_succeeds("curl -sf http://localhost:8096/health | grep -q Healthy", timeout=60)
|
||||
|
||||
# After Jellyfin comes back, sessions are gone - should unthrottle
|
||||
time.sleep(3)
|
||||
assert not is_throttled(), "Should unthrottle after Jellyfin returns with no sessions"
|
||||
'';
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{ pkgs, lib }:
|
||||
{
|
||||
payloads = {
|
||||
auth = pkgs.writeText "auth.json" (builtins.toJSON { Username = "jellyfin"; });
|
||||
empty = pkgs.writeText "empty.json" (builtins.toJSON { });
|
||||
};
|
||||
|
||||
helpers = ./jellyfin-test-lib.py;
|
||||
|
||||
jellyfinTestConfig =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
services.jellyfin.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
ffmpeg
|
||||
];
|
||||
virtualisation.diskSize = lib.mkDefault (3 * 1024);
|
||||
};
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
import json
|
||||
from urllib.parse import urlencode
|
||||
|
||||
|
||||
def jellyfin_api(machine, method, path, auth_header, token=None, data_file=None, data=None):
|
||||
hdr = auth_header + (f", Token={token}" if token else "")
|
||||
cmd = f"curl -sf -X {method} 'http://localhost:8096{path}'"
|
||||
if data_file:
|
||||
cmd += f" -d '@{data_file}' -H 'Content-Type:application/json'"
|
||||
elif data:
|
||||
payload = json.dumps(data) if isinstance(data, dict) else data
|
||||
cmd += f" -d '{payload}' -H 'Content-Type:application/json'"
|
||||
cmd += f" -H 'X-Emby-Authorization:{hdr}'"
|
||||
return machine.succeed(cmd)
|
||||
|
||||
|
||||
def setup_jellyfin(machine, retry, auth_header, auth_payload, empty_payload):
|
||||
machine.wait_for_unit("jellyfin.service")
|
||||
machine.wait_for_open_port(8096)
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf http://localhost:8096/health | grep -q Healthy", timeout=120
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf 'http://localhost:8096/Startup/Configuration' "
|
||||
f"-H 'X-Emby-Authorization:{auth_header}'"
|
||||
)
|
||||
jellyfin_api(machine, "GET", "/Startup/FirstUser", auth_header)
|
||||
jellyfin_api(machine, "POST", "/Startup/Complete", auth_header)
|
||||
|
||||
result = json.loads(
|
||||
jellyfin_api(
|
||||
machine, "POST", "/Users/AuthenticateByName",
|
||||
auth_header, data_file=auth_payload,
|
||||
)
|
||||
)
|
||||
token = result["AccessToken"]
|
||||
user_id = result["User"]["Id"]
|
||||
|
||||
tempdir = machine.succeed("mktemp -d -p /var/lib/jellyfin").strip()
|
||||
machine.succeed(f"chmod 755 '{tempdir}'")
|
||||
machine.succeed(
|
||||
f"ffmpeg -f lavfi -i testsrc2=duration=5 -f lavfi -i sine=frequency=440:duration=5 "
|
||||
f"-c:v libx264 -c:a aac '{tempdir}/Test Movie (2024).mkv'"
|
||||
)
|
||||
|
||||
query = urlencode({
|
||||
"name": "Test Library",
|
||||
"collectionType": "Movies",
|
||||
"paths": tempdir,
|
||||
"refreshLibrary": "true",
|
||||
})
|
||||
jellyfin_api(
|
||||
machine, "POST", f"/Library/VirtualFolders?{query}",
|
||||
auth_header, token=token, data_file=empty_payload,
|
||||
)
|
||||
|
||||
def is_ready(_):
|
||||
folders = json.loads(
|
||||
jellyfin_api(machine, "GET", "/Library/VirtualFolders", auth_header, token=token)
|
||||
)
|
||||
return all(f.get("RefreshStatus") == "Idle" for f in folders)
|
||||
retry(is_ready, timeout=60)
|
||||
|
||||
movie_id = None
|
||||
media_source_id = None
|
||||
|
||||
def get_movie(_):
|
||||
nonlocal movie_id, media_source_id
|
||||
items = json.loads(
|
||||
jellyfin_api(
|
||||
machine, "GET",
|
||||
f"/Users/{user_id}/Items?IncludeItemTypes=Movie&Recursive=true",
|
||||
auth_header, token=token,
|
||||
)
|
||||
)
|
||||
if items["TotalRecordCount"] > 0:
|
||||
movie_id = items["Items"][0]["Id"]
|
||||
info = json.loads(
|
||||
jellyfin_api(
|
||||
machine, "GET", f"/Users/{user_id}/Items/{movie_id}",
|
||||
auth_header, token=token,
|
||||
)
|
||||
)
|
||||
media_source_id = info["MediaSources"][0]["Id"]
|
||||
return True
|
||||
return False
|
||||
retry(get_movie, timeout=60)
|
||||
|
||||
return token, user_id, movie_id, media_source_id
|
||||
@@ -1,97 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
baseServiceConfigs = import ../service-configs.nix;
|
||||
testServiceConfigs = lib.recursiveUpdate baseServiceConfigs {
|
||||
zpool_ssds = "";
|
||||
https.domain = "test.local";
|
||||
minecraft.parent_dir = "/var/lib/minecraft";
|
||||
minecraft.memory = rec {
|
||||
heap_size_m = 1000;
|
||||
};
|
||||
};
|
||||
|
||||
# Create pkgs with nix-minecraft overlay and unfree packages allowed
|
||||
testPkgs = import inputs.nixpkgs {
|
||||
system = pkgs.stdenv.targetPlatform.system;
|
||||
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "minecraft-server" ];
|
||||
overlays = [
|
||||
inputs.nix-minecraft.overlay
|
||||
(import ../modules/overlays.nix)
|
||||
];
|
||||
};
|
||||
in
|
||||
testPkgs.testers.runNixOSTest {
|
||||
name = "minecraft server startup test";
|
||||
|
||||
node.specialArgs = {
|
||||
inherit inputs lib;
|
||||
service_configs = testServiceConfigs;
|
||||
username = "testuser";
|
||||
};
|
||||
|
||||
nodes.machine =
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
../services/minecraft.nix
|
||||
];
|
||||
|
||||
# Force to 0 because no huge pages in vms ?
|
||||
boot.kernel.sysctl."vm.nr_hugepages" = lib.mkForce 0;
|
||||
|
||||
# Enable caddy service (required by minecraft service)
|
||||
services.caddy.enable = true;
|
||||
|
||||
# Enable networking for the test (needed for minecraft mods to download mappings)
|
||||
networking.dhcpcd.enable = true;
|
||||
|
||||
# Disable the ZFS mount dependency service in test environment
|
||||
systemd.services."minecraft-server-main_mounts".enable = lib.mkForce false;
|
||||
|
||||
# Remove service dependencies that require ZFS
|
||||
systemd.services.minecraft-server-main = {
|
||||
wants = lib.mkForce [ ];
|
||||
after = lib.mkForce [ ];
|
||||
requires = lib.mkForce [ ];
|
||||
serviceConfig = {
|
||||
Nice = lib.mkForce 0;
|
||||
LimitMEMLOCK = lib.mkForce "infinity";
|
||||
};
|
||||
};
|
||||
|
||||
# Test-specific overrides only - reduce memory for testing
|
||||
services.minecraft-servers.servers.main.jvmOpts = lib.mkForce "-Xmx1G -Xms1G";
|
||||
|
||||
# Create test user
|
||||
users.users.testuser = {
|
||||
isNormalUser = true;
|
||||
uid = 1000;
|
||||
extraGroups = [ "minecraft" ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Wait for minecraft service to be available
|
||||
machine.wait_for_unit("minecraft-server-main.service")
|
||||
|
||||
# Wait up to 60 seconds for the server to complete startup
|
||||
with machine.nested("Waiting for minecraft server startup completion"):
|
||||
try:
|
||||
machine.wait_until_succeeds(
|
||||
"grep -Eq '\\[[0-9]+:[0-9]+:[0-9]+\\] \\[Server thread/INFO\\]: Done \\([0-9]+\\.[0-9]+s\\)! For help, type \"help\"' /var/lib/minecraft/main/logs/latest.log",
|
||||
timeout=120
|
||||
)
|
||||
except Exception:
|
||||
print(machine.succeed("cat /var/lib/minecraft/main/logs/latest.log"))
|
||||
raise
|
||||
'';
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
import http.server, json, sys
|
||||
|
||||
PORT = int(sys.argv[1])
|
||||
DATA_FILE = sys.argv[2]
|
||||
|
||||
class Handler(http.server.BaseHTTPRequestHandler):
|
||||
def log_message(self, fmt, *args):
|
||||
pass
|
||||
|
||||
def _read_body(self):
|
||||
length = int(self.headers.get("Content-Length", 0))
|
||||
return json.loads(self.rfile.read(length)) if length else {}
|
||||
|
||||
def _json(self, code, body):
|
||||
data = json.dumps(body).encode()
|
||||
self.send_response(code)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(data)
|
||||
|
||||
def do_POST(self):
|
||||
if self.path == "/api/annotations":
|
||||
body = self._read_body()
|
||||
try:
|
||||
with open(DATA_FILE) as f:
|
||||
annotations = json.load(f)
|
||||
except Exception:
|
||||
annotations = []
|
||||
aid = len(annotations) + 1
|
||||
body["id"] = aid
|
||||
annotations.append(body)
|
||||
with open(DATA_FILE, "w") as f:
|
||||
json.dump(annotations, f)
|
||||
self._json(200, {"id": aid, "message": "Annotation added"})
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def do_PATCH(self):
|
||||
if self.path.startswith("/api/annotations/"):
|
||||
aid = int(self.path.rsplit("/", 1)[-1])
|
||||
body = self._read_body()
|
||||
try:
|
||||
with open(DATA_FILE) as f:
|
||||
annotations = json.load(f)
|
||||
except Exception:
|
||||
annotations = []
|
||||
for a in annotations:
|
||||
if a["id"] == aid:
|
||||
a.update(body)
|
||||
with open(DATA_FILE, "w") as f:
|
||||
json.dump(annotations, f)
|
||||
self._json(200, {"message": "Annotation patched"})
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
http.server.HTTPServer(("127.0.0.1", PORT), Handler).serve_forever()
|
||||
@@ -1,174 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
testPkgs = pkgs.appendOverlays [ (import ../modules/overlays.nix) ];
|
||||
in
|
||||
testPkgs.testers.runNixOSTest {
|
||||
name = "ntfy-alerts";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
../modules/ntfy-alerts.nix
|
||||
];
|
||||
|
||||
system.stateVersion = config.system.stateVersion;
|
||||
|
||||
virtualisation.memorySize = 2048;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
curl
|
||||
jq
|
||||
];
|
||||
|
||||
# Create test topic file
|
||||
systemd.tmpfiles.rules = [
|
||||
"f /run/ntfy-test-topic 0644 root root - test-alerts"
|
||||
];
|
||||
|
||||
# Mock ntfy server that records POST requests
|
||||
systemd.services.mock-ntfy =
|
||||
let
|
||||
mockNtfyScript = pkgs.writeScript "mock-ntfy.py" ''
|
||||
import json
|
||||
import os
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
from datetime import datetime
|
||||
|
||||
REQUESTS_FILE = "/tmp/ntfy-requests.json"
|
||||
|
||||
class MockNtfy(BaseHTTPRequestHandler):
|
||||
def _respond(self, code=200, body=b"Ok"):
|
||||
self.send_response(code)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(body if isinstance(body, bytes) else body.encode())
|
||||
|
||||
def do_GET(self):
|
||||
self._respond()
|
||||
|
||||
def do_POST(self):
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
body = self.rfile.read(content_length).decode() if content_length > 0 else ""
|
||||
|
||||
request_data = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"path": self.path,
|
||||
"headers": dict(self.headers),
|
||||
"body": body,
|
||||
}
|
||||
|
||||
# Load existing requests or start new list
|
||||
requests = []
|
||||
if os.path.exists(REQUESTS_FILE):
|
||||
try:
|
||||
with open(REQUESTS_FILE, "r") as f:
|
||||
requests = json.load(f)
|
||||
except:
|
||||
requests = []
|
||||
|
||||
requests.append(request_data)
|
||||
|
||||
with open(REQUESTS_FILE, "w") as f:
|
||||
json.dump(requests, f, indent=2)
|
||||
|
||||
self._respond()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
pass
|
||||
|
||||
HTTPServer(("0.0.0.0", 8080), MockNtfy).serve_forever()
|
||||
'';
|
||||
in
|
||||
{
|
||||
description = "Mock ntfy server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "ntfy-alert@test-fail.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${mockNtfyScript}";
|
||||
Type = "simple";
|
||||
};
|
||||
};
|
||||
|
||||
# Test service that will fail
|
||||
systemd.services.test-fail = {
|
||||
description = "Test service that fails";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.coreutils}/bin/false";
|
||||
};
|
||||
};
|
||||
|
||||
# Configure ntfy-alerts to use mock server
|
||||
services.ntfyAlerts = {
|
||||
enable = true;
|
||||
serverUrl = "http://localhost:8080";
|
||||
topicFile = "/run/ntfy-test-topic";
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
import time
|
||||
|
||||
start_all()
|
||||
|
||||
# Wait for mock ntfy server to be ready
|
||||
machine.wait_for_unit("mock-ntfy.service")
|
||||
machine.wait_until_succeeds("curl -sf http://localhost:8080/", timeout=30)
|
||||
|
||||
# Verify the ntfy-alert@ template service exists
|
||||
machine.succeed("systemctl list-unit-files | grep ntfy-alert@")
|
||||
|
||||
# Verify the global OnFailure drop-in is configured
|
||||
machine.succeed("cat /etc/systemd/system/service.d/onfailure.conf | grep -q 'OnFailure=ntfy-alert@%p.service'")
|
||||
|
||||
# Trigger the test-fail service
|
||||
machine.succeed("systemctl start test-fail.service || true")
|
||||
|
||||
# Wait a moment for the failure notification to be sent
|
||||
time.sleep(2)
|
||||
|
||||
# Verify the ntfy-alert@test-fail service ran
|
||||
machine.succeed("systemctl is-active ntfy-alert@test-fail.service || systemctl is-failed ntfy-alert@test-fail.service || true")
|
||||
|
||||
# Check that the mock server received a POST request
|
||||
machine.wait_until_succeeds("test -f /tmp/ntfy-requests.json", timeout=30)
|
||||
|
||||
# Verify the request content
|
||||
result = machine.succeed("cat /tmp/ntfy-requests.json")
|
||||
requests = json.loads(result)
|
||||
|
||||
assert len(requests) >= 1, f"Expected at least 1 request, got {len(requests)}"
|
||||
|
||||
# Check the first request
|
||||
req = requests[0]
|
||||
assert "/test-alerts" in req["path"], f"Expected path to contain /test-alerts, got {req['path']}"
|
||||
assert "Title" in req["headers"], "Expected Title header"
|
||||
assert "test-fail" in req["headers"]["Title"], f"Expected Title to contain 'test-fail', got {req['headers']['Title']}"
|
||||
assert req["headers"]["Priority"] == "high", f"Expected Priority 'high', got {req['headers'].get('Priority')}"
|
||||
assert req["headers"]["Tags"] == "warning", f"Expected Tags 'warning', got {req['headers'].get('Tags')}"
|
||||
|
||||
print(f"Received notification: Title={req['headers']['Title']}, Body={req['body'][:100]}...")
|
||||
|
||||
# Idempotency test: trigger failure again
|
||||
machine.succeed("rm /tmp/ntfy-requests.json")
|
||||
machine.succeed("systemctl reset-failed test-fail.service || true")
|
||||
machine.succeed("systemctl start test-fail.service || true")
|
||||
time.sleep(2)
|
||||
|
||||
# Verify another notification was sent
|
||||
machine.wait_until_succeeds("test -f /tmp/ntfy-requests.json", timeout=30)
|
||||
result = machine.succeed("cat /tmp/ntfy-requests.json")
|
||||
requests = json.loads(result)
|
||||
assert len(requests) >= 1, f"Expected at least 1 request after second failure, got {len(requests)}"
|
||||
|
||||
print("All tests passed!")
|
||||
'';
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "test of tests";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.succeed("echo hello!")
|
||||
'';
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}@args:
|
||||
let
|
||||
handleTest = file: import file (args);
|
||||
in
|
||||
{
|
||||
zfsTest = handleTest ./zfs.nix;
|
||||
testTest = handleTest ./testTest.nix;
|
||||
minecraftTest = handleTest ./minecraft.nix;
|
||||
jellyfinQbittorrentMonitorTest = handleTest ./jellyfin-qbittorrent-monitor.nix;
|
||||
filePermsTest = handleTest ./file-perms.nix;
|
||||
|
||||
# fail2ban tests
|
||||
fail2banSshTest = handleTest ./fail2ban-ssh.nix;
|
||||
fail2banCaddyTest = handleTest ./fail2ban-caddy.nix;
|
||||
fail2banGiteaTest = handleTest ./fail2ban-gitea.nix;
|
||||
fail2banVaultwardenTest = handleTest ./fail2ban-vaultwarden.nix;
|
||||
fail2banImmichTest = handleTest ./fail2ban-immich.nix;
|
||||
fail2banJellyfinTest = handleTest ./fail2ban-jellyfin.nix;
|
||||
|
||||
# jellyfin annotation service test
|
||||
jellyfinAnnotationsTest = handleTest ./jellyfin-annotations.nix;
|
||||
|
||||
# zfs scrub annotations test
|
||||
zfsScrubAnnotationsTest = handleTest ./zfs-scrub-annotations.nix;
|
||||
|
||||
# xmrig auto-pause test
|
||||
xmrigAutoPauseTest = handleTest ./xmrig-auto-pause.nix;
|
||||
# ntfy alerts test
|
||||
ntfyAlertsTest = handleTest ./ntfy-alerts.nix;
|
||||
|
||||
# torrent audit test
|
||||
torrentAuditTest = handleTest ./torrent-audit.nix;
|
||||
|
||||
# gitea runner test
|
||||
giteaRunnerTest = handleTest ./gitea-runner.nix;
|
||||
}
|
||||
@@ -1,422 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
qbitPort = 18080;
|
||||
radarrPort = 17878;
|
||||
sonarrPort = 18989;
|
||||
|
||||
radarrConfig = pkgs.writeText "radarr-config.xml" ''
|
||||
<Config><ApiKey>test-radarr-key</ApiKey></Config>
|
||||
'';
|
||||
|
||||
sonarrConfig = pkgs.writeText "sonarr-config.xml" ''
|
||||
<Config><ApiKey>test-sonarr-key</ApiKey></Config>
|
||||
'';
|
||||
|
||||
python = "${
|
||||
pkgs.python3.withPackages (ps: [
|
||||
ps.pyarr
|
||||
ps.qbittorrent-api
|
||||
])
|
||||
}/bin/python3";
|
||||
auditScript = ../services/arr/torrent-audit.py;
|
||||
|
||||
# Single mock API server script -- accepts SERVICE and PORT as CLI args.
|
||||
# Routes responses based on SERVICE type (qbit / radarr / sonarr).
|
||||
mockScript = pkgs.writeText "mock-api-server.py" ''
|
||||
import json
|
||||
import sys
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
|
||||
SERVICE = sys.argv[1]
|
||||
PORT = int(sys.argv[2])
|
||||
|
||||
# ── Hash constants (uppercase, 40 hex chars) ──────────────────────────
|
||||
# Movies
|
||||
UNMANAGED_MOV = "A" * 38 + "01"
|
||||
MANAGED_MOV = "A" * 38 + "02"
|
||||
OLD_MOV = "A" * 38 + "03" # movieId=2, older import → abandoned SAFE
|
||||
NEW_MOV = "A" * 38 + "04" # movieId=2, newer import → keeper
|
||||
KEEPER_CROSS = "A" * 38 + "05" # keeper for movieId=3, old for movieId=4
|
||||
KEEPER3_OLD = "A" * 38 + "0B" # movieId=3, older import (not in qBit)
|
||||
KEEPER4_NEW = "A" * 38 + "06" # movieId=4, newer import → keeper
|
||||
REMOVED_OLD = "A" * 38 + "07" # movieId=5, older import (movie removed)
|
||||
REMOVED_NEW = "A" * 38 + "08" # movieId=5, newer import → keeper (not in qBit)
|
||||
LARGER_OLD = "A" * 38 + "09" # movieId=6, older import (larger than current)
|
||||
LARGER_NEW = "A" * 38 + "0A" # movieId=6, newer import → keeper
|
||||
SINGLE_CROSS = "A" * 38 + "0C" # movieId=7 single import AND older import for movieId=8
|
||||
SINGLE8_NEW = "A" * 38 + "0D" # movieId=8, newer import → keeper (not in qBit)
|
||||
QUEUED_MOV = "A" * 38 + "0E" # in Radarr queue, not in history
|
||||
|
||||
# TV
|
||||
UNMANAGED_TV = "B" * 38 + "01"
|
||||
MANAGED_TV = "B" * 38 + "02" # episodeId=100, single import
|
||||
OLD_TV = "B" * 38 + "03" # episodeId=200, older import → abandoned SAFE
|
||||
NEW_TV = "B" * 38 + "04" # episodeId=200, newer import → active
|
||||
SEASON_PACK = "B" * 38 + "05" # episodeIds 300,301,302 (still active for 301,302)
|
||||
REPACK = "B" * 38 + "06" # episodeId=300, newer import → active
|
||||
REMOVED_TV = "B" * 38 + "07" # episodeId=400, older import (series removed)
|
||||
REMOVED_TV_NEW = "B" * 38 + "08" # episodeId=400, newer import (not in qBit)
|
||||
|
||||
def make_torrent(h, name, size, added_on, state="uploading"):
|
||||
return {
|
||||
"hash": h.lower(),
|
||||
"name": name,
|
||||
"size": size,
|
||||
"state": state,
|
||||
"added_on": added_on,
|
||||
"content_path": f"/downloads/{name}",
|
||||
}
|
||||
|
||||
QBIT_DATA = {
|
||||
"movies": [
|
||||
make_torrent(UNMANAGED_MOV, "Unmanaged.Movie.2024", 5_000_000_000, 1704067200),
|
||||
make_torrent(MANAGED_MOV, "Managed.Movie.2024", 4_000_000_000, 1704067201),
|
||||
make_torrent(OLD_MOV, "Old.Movie.Quality.2024", 3_000_000_000, 1704067202),
|
||||
make_torrent(NEW_MOV, "New.Movie.Quality.2024", 6_000_000_000, 1704067203),
|
||||
make_torrent(KEEPER_CROSS, "CrossRef.Movie.2024", 4_500_000_000, 1704067204),
|
||||
make_torrent(REMOVED_OLD, "Removed.Movie.2024", 3_500_000_000, 1704067205),
|
||||
make_torrent(LARGER_OLD, "Larger.Movie.2024", 10_737_418_240, 1704067206),
|
||||
make_torrent(SINGLE_CROSS, "SingleCross.Movie.2024", 4_000_000_000, 1704067207),
|
||||
make_torrent(QUEUED_MOV, "Queued.Movie.2024", 2_000_000_000, 1704067208),
|
||||
],
|
||||
"tvshows": [
|
||||
make_torrent(UNMANAGED_TV, "Unmanaged.Show.S01E01", 1_000_000_000, 1704067200),
|
||||
make_torrent(MANAGED_TV, "Managed.Show.S01E01", 800_000_000, 1704067201),
|
||||
make_torrent(OLD_TV, "Old.Show.S01E01", 700_000_000, 1704067202),
|
||||
make_torrent(NEW_TV, "New.Show.S01E01", 1_200_000_000, 1704067203),
|
||||
make_torrent(SEASON_PACK, "Season.Pack.S02", 5_000_000_000, 1704067204),
|
||||
make_torrent(REMOVED_TV, "Removed.Show.S01E01", 900_000_000, 1704067205),
|
||||
],
|
||||
}
|
||||
|
||||
# ── Radarr mock data ──────────────────────────────────────────────────
|
||||
RADARR_HISTORY = [
|
||||
{"movieId": 1, "downloadId": MANAGED_MOV, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 2, "downloadId": OLD_MOV, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 2, "downloadId": NEW_MOV, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
{"movieId": 3, "downloadId": KEEPER3_OLD, "eventType": "downloadFolderImported", "date": "2023-01-01T00:00:00Z"},
|
||||
{"movieId": 3, "downloadId": KEEPER_CROSS, "eventType": "downloadFolderImported", "date": "2024-03-01T00:00:00Z"},
|
||||
{"movieId": 4, "downloadId": KEEPER_CROSS, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 4, "downloadId": KEEPER4_NEW, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
{"movieId": 5, "downloadId": REMOVED_OLD, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 5, "downloadId": REMOVED_NEW, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
{"movieId": 6, "downloadId": LARGER_OLD, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 6, "downloadId": LARGER_NEW, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
# Non-import event (should be ignored by abandoned detection)
|
||||
{"movieId": 2, "downloadId": NEW_MOV, "eventType": "grabbed", "date": "2024-05-31T00:00:00Z"},
|
||||
# Single-import keeper test (Fix 13): SINGLE_CROSS is only import for movieId=7
|
||||
# AND an older import for movieId=8 (SINGLE8_NEW is newer for movieId=8)
|
||||
{"movieId": 7, "downloadId": SINGLE_CROSS, "eventType": "downloadFolderImported", "date": "2024-03-01T00:00:00Z"},
|
||||
{"movieId": 8, "downloadId": SINGLE_CROSS, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"movieId": 8, "downloadId": SINGLE8_NEW, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
]
|
||||
|
||||
RADARR_MOVIES = [
|
||||
{"id": 1, "hasFile": True, "movieFile": {"size": 4_000_000_000, "quality": {"quality": {"name": "Bluray-1080p"}}}},
|
||||
{"id": 2, "hasFile": True, "movieFile": {"size": 6_000_000_000, "quality": {"quality": {"name": "Remux-1080p"}}}},
|
||||
{"id": 3, "hasFile": True, "movieFile": {"size": 4_500_000_000, "quality": {"quality": {"name": "Bluray-1080p"}}}},
|
||||
{"id": 4, "hasFile": True, "movieFile": {"size": 5_000_000_000, "quality": {"quality": {"name": "Remux-1080p"}}}},
|
||||
# id=5 intentionally MISSING -- movie removed from Radarr
|
||||
{"id": 6, "hasFile": True, "movieFile": {"size": 5_368_709_120, "quality": {"quality": {"name": "Bluray-720p"}}}},
|
||||
{"id": 7, "hasFile": True, "movieFile": {"size": 4_000_000_000, "quality": {"quality": {"name": "Bluray-1080p"}}}},
|
||||
{"id": 8, "hasFile": True, "movieFile": {"size": 5_000_000_000, "quality": {"quality": {"name": "Remux-1080p"}}}},
|
||||
]
|
||||
|
||||
# ── Sonarr mock data ──────────────────────────────────────────────────
|
||||
# Page 1 records (returned on page=1, with totalRecords=1001 to force pagination)
|
||||
SONARR_HISTORY_PAGE1 = [
|
||||
{"episodeId": 100, "seriesId": 1, "downloadId": MANAGED_TV, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"episodeId": 200, "seriesId": 1, "downloadId": OLD_TV, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"episodeId": 200, "seriesId": 1, "downloadId": NEW_TV, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
# Season pack covers 3 episodes
|
||||
{"episodeId": 300, "seriesId": 2, "downloadId": SEASON_PACK, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"episodeId": 301, "seriesId": 2, "downloadId": SEASON_PACK, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"episodeId": 302, "seriesId": 2, "downloadId": SEASON_PACK, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
# Non-import event (should be ignored)
|
||||
{"episodeId": 200, "seriesId": 1, "downloadId": NEW_TV, "eventType": "grabbed", "date": "2024-05-31T00:00:00Z"},
|
||||
]
|
||||
# Page 2 records (critical data only available via pagination)
|
||||
SONARR_HISTORY_PAGE2 = [
|
||||
# Episode 300 re-imported from a repack -- but 301,302 still reference SEASON_PACK
|
||||
{"episodeId": 300, "seriesId": 2, "downloadId": REPACK, "eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
# Removed series scenario
|
||||
{"episodeId": 400, "seriesId": 99, "downloadId": REMOVED_TV, "eventType": "downloadFolderImported", "date": "2024-01-01T00:00:00Z"},
|
||||
{"episodeId": 400, "seriesId": 99, "downloadId": REMOVED_TV_NEW,"eventType": "downloadFolderImported", "date": "2024-06-01T00:00:00Z"},
|
||||
]
|
||||
SONARR_HISTORY_ALL = SONARR_HISTORY_PAGE1 + SONARR_HISTORY_PAGE2
|
||||
|
||||
# seriesId=99 intentionally MISSING -- series removed from Sonarr
|
||||
SONARR_SERIES = [
|
||||
{"id": 1, "title": "Managed Show"},
|
||||
{"id": 2, "title": "Season Pack Show"},
|
||||
]
|
||||
|
||||
class Handler(BaseHTTPRequestHandler):
|
||||
def do_POST(self):
|
||||
if self.path.startswith("/api/v2/auth/login"):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/plain")
|
||||
self.send_header("Set-Cookie", "SID=test; path=/")
|
||||
self.end_headers()
|
||||
self.wfile.write(b"Ok.")
|
||||
else:
|
||||
self._handle_json()
|
||||
|
||||
def do_GET(self):
|
||||
self._handle_json()
|
||||
|
||||
def _handle_json(self):
|
||||
parsed = urlparse(self.path)
|
||||
path = parsed.path
|
||||
params = parse_qs(parsed.query)
|
||||
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
if content_length:
|
||||
body = self.rfile.read(content_length).decode()
|
||||
params.update(parse_qs(body))
|
||||
|
||||
response = self._route(path, params)
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
|
||||
def _route(self, path, params):
|
||||
if SERVICE == "qbit":
|
||||
category = params.get("category", [""])[0]
|
||||
return QBIT_DATA.get(category, [])
|
||||
|
||||
elif SERVICE == "radarr":
|
||||
if path == "/api/v3/history":
|
||||
return {"records": RADARR_HISTORY, "totalRecords": len(RADARR_HISTORY)}
|
||||
elif path == "/api/v3/queue":
|
||||
return {"records": [{"downloadId": QUEUED_MOV}], "totalRecords": 1}
|
||||
elif path == "/api/v3/movie":
|
||||
return RADARR_MOVIES
|
||||
return {}
|
||||
|
||||
elif SERVICE == "sonarr":
|
||||
if path == "/api/v3/history":
|
||||
page = int(params.get("page", ["1"])[0])
|
||||
if page == 1:
|
||||
return {"records": SONARR_HISTORY_PAGE1, "totalRecords": 1001}
|
||||
else:
|
||||
return {"records": SONARR_HISTORY_PAGE2, "totalRecords": 1001}
|
||||
elif path == "/api/v3/queue":
|
||||
return {"records": [], "totalRecords": 0}
|
||||
elif path == "/api/v3/series":
|
||||
return SONARR_SERIES
|
||||
return {}
|
||||
|
||||
return {}
|
||||
|
||||
def log_message(self, fmt, *args):
|
||||
pass
|
||||
|
||||
HTTPServer(("0.0.0.0", PORT), Handler).serve_forever()
|
||||
'';
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "torrent-audit";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [ pkgs.curl ];
|
||||
|
||||
systemd.services.mock-qbittorrent = {
|
||||
description = "Mock qBittorrent API";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${mockScript} qbit ${toString qbitPort}";
|
||||
Type = "simple";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.mock-radarr = {
|
||||
description = "Mock Radarr API";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${mockScript} radarr ${toString radarrPort}";
|
||||
Type = "simple";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.mock-sonarr = {
|
||||
description = "Mock Sonarr API";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python3}/bin/python3 ${mockScript} sonarr ${toString sonarrPort}";
|
||||
Type = "simple";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Wait for all mock services to be responsive
|
||||
machine.wait_for_unit("mock-qbittorrent.service")
|
||||
machine.wait_for_unit("mock-radarr.service")
|
||||
machine.wait_for_unit("mock-sonarr.service")
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf http://localhost:${toString qbitPort}/api/v2/torrents/info?category=movies",
|
||||
timeout=30,
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf http://localhost:${toString radarrPort}/api/v3/movie",
|
||||
timeout=30,
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
"curl -sf http://localhost:${toString sonarrPort}/api/v3/queue",
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
# Run the audit script and capture stdout
|
||||
output = machine.succeed(
|
||||
"QBITTORRENT_URL=http://localhost:${toString qbitPort} "
|
||||
"RADARR_URL=http://localhost:${toString radarrPort} "
|
||||
"RADARR_CONFIG=${radarrConfig} "
|
||||
"SONARR_URL=http://localhost:${toString sonarrPort} "
|
||||
"SONARR_CONFIG=${sonarrConfig} "
|
||||
"CATEGORIES=movies,tvshows,anime "
|
||||
"${python} ${auditScript}"
|
||||
)
|
||||
|
||||
print("=== SCRIPT OUTPUT ===")
|
||||
print(output)
|
||||
print("=== END OUTPUT ===")
|
||||
|
||||
# Fix 10: Assert section heading exists before splitting
|
||||
assert "ABANDONED UPGRADE LEFTOVERS" in output, \
|
||||
"Output must contain ABANDONED UPGRADE LEFTOVERS heading"
|
||||
|
||||
# Split output into sections for targeted assertions
|
||||
unmanaged_section = output.split("ABANDONED UPGRADE LEFTOVERS")[0]
|
||||
abandoned_section = output.split("ABANDONED UPGRADE LEFTOVERS")[1]
|
||||
|
||||
# Helper: find a torrent name line and check nearby lines (within 3) for a note
|
||||
def assert_note_near(section, torrent_name, note_text):
|
||||
lines = section.splitlines()
|
||||
found_idx = None
|
||||
for i, line in enumerate(lines):
|
||||
if torrent_name in line:
|
||||
found_idx = i
|
||||
break
|
||||
assert found_idx is not None, f"{torrent_name} not found in section"
|
||||
nearby = "\n".join(lines[max(0, found_idx):found_idx + 4])
|
||||
assert note_text in nearby, \
|
||||
f"Expected '{note_text}' near '{torrent_name}', got:\n{nearby}"
|
||||
|
||||
with subtest("Detects unmanaged movie torrent"):
|
||||
assert "Unmanaged.Movie.2024" in unmanaged_section, \
|
||||
"Should detect unmanaged movie"
|
||||
assert "1 unmanaged / 9 total" in unmanaged_section, \
|
||||
"Should show 1 unmanaged movie out of 9"
|
||||
|
||||
with subtest("Detects unmanaged TV torrent"):
|
||||
assert "Unmanaged.Show.S01E01" in unmanaged_section, \
|
||||
"Should detect unmanaged TV show"
|
||||
assert "1 unmanaged / 6 total" in unmanaged_section, \
|
||||
"Should show 1 unmanaged TV show out of 6"
|
||||
|
||||
with subtest("Empty category shows zero counts"):
|
||||
assert "0 unmanaged / 0 total" in unmanaged_section, \
|
||||
"anime category should show 0 unmanaged / 0 total"
|
||||
|
||||
with subtest("Managed torrents are NOT listed as unmanaged"):
|
||||
assert "Managed.Movie.2024" not in unmanaged_section, \
|
||||
"Managed movie should not appear in unmanaged section"
|
||||
assert "Managed.Show.S01E01" not in unmanaged_section, \
|
||||
"Managed TV show should not appear in unmanaged section"
|
||||
|
||||
with subtest("Queue-known hash is NOT listed as unmanaged"):
|
||||
assert "Queued.Movie.2024" not in unmanaged_section, \
|
||||
"Torrent in Radarr queue should not appear as unmanaged"
|
||||
|
||||
with subtest("Detects abandoned movie upgrade as SAFE"):
|
||||
assert "Old.Movie.Quality.2024" in abandoned_section, \
|
||||
"Should detect abandoned movie"
|
||||
for line in abandoned_section.splitlines():
|
||||
if "Old.Movie.Quality.2024" in line:
|
||||
assert "SAFE" in line, f"Old movie should be SAFE, got: {line}"
|
||||
break
|
||||
|
||||
with subtest("Detects abandoned TV episode as SAFE"):
|
||||
assert "Old.Show.S01E01" in abandoned_section, \
|
||||
"Should detect abandoned TV episode"
|
||||
for line in abandoned_section.splitlines():
|
||||
if "Old.Show.S01E01" in line:
|
||||
assert "SAFE" in line, f"Old TV should be SAFE, got: {line}"
|
||||
break
|
||||
|
||||
with subtest("Keeper-also-abandoned hash is NOT listed as abandoned"):
|
||||
assert "CrossRef.Movie.2024" not in abandoned_section, \
|
||||
"Hash that is keeper for another movie must not appear as abandoned"
|
||||
|
||||
with subtest("Season pack NOT abandoned when still active for other episodes"):
|
||||
assert "Season.Pack.S02" not in abandoned_section, \
|
||||
"Season pack still active for episodes 301/302 must not be abandoned"
|
||||
|
||||
with subtest("Negative assertions for keepers"):
|
||||
assert "New.Movie.Quality.2024" not in abandoned_section, \
|
||||
"Keeper for movieId=2 must not appear as abandoned"
|
||||
assert "New.Show.S01E01" not in abandoned_section, \
|
||||
"Keeper for episodeId=200 must not appear as abandoned"
|
||||
assert "Managed.Movie.2024" not in abandoned_section, \
|
||||
"Single-import movie must not appear as abandoned"
|
||||
assert "Managed.Show.S01E01" not in abandoned_section, \
|
||||
"Single-import TV show must not appear as abandoned"
|
||||
|
||||
with subtest("Single-import keeper not abandoned (Bug 1 regression)"):
|
||||
assert "SingleCross.Movie.2024" not in abandoned_section, \
|
||||
"Hash that is sole import for movieId=7 must be in keeper set, not abandoned"
|
||||
|
||||
with subtest("Removed movie triggers REVIEW status"):
|
||||
assert "Removed.Movie.2024" in abandoned_section, \
|
||||
"Should detect abandoned torrent for removed movie"
|
||||
assert_note_near(abandoned_section, "Removed.Movie.2024", "movie removed")
|
||||
for line in abandoned_section.splitlines():
|
||||
if "Removed.Movie.2024" in line:
|
||||
assert "REVIEW" in line, f"Removed movie should be REVIEW, got: {line}"
|
||||
break
|
||||
|
||||
with subtest("Abandoned larger than current triggers REVIEW"):
|
||||
assert "Larger.Movie.2024" in abandoned_section, \
|
||||
"Should detect larger abandoned torrent"
|
||||
assert_note_near(abandoned_section, "Larger.Movie.2024", "abandoned is larger")
|
||||
for line in abandoned_section.splitlines():
|
||||
if "Larger.Movie.2024" in line:
|
||||
assert "REVIEW" in line, f"Larger abandoned should be REVIEW, got: {line}"
|
||||
break
|
||||
|
||||
with subtest("Removed series triggers REVIEW status for TV"):
|
||||
assert "Removed.Show.S01E01" in abandoned_section, \
|
||||
"Should detect abandoned torrent for removed series"
|
||||
assert_note_near(abandoned_section, "Removed.Show.S01E01", "series removed")
|
||||
for line in abandoned_section.splitlines():
|
||||
if "Removed.Show.S01E01" in line:
|
||||
assert "REVIEW" in line, f"Removed series should be REVIEW, got: {line}"
|
||||
break
|
||||
|
||||
with subtest("Correct abandoned counts per category"):
|
||||
assert "movies (3 abandoned)" in abandoned_section, \
|
||||
"Should show 3 abandoned movies"
|
||||
assert "tvshows (2 abandoned)" in abandoned_section, \
|
||||
"Should show 2 abandoned TV shows"
|
||||
|
||||
with subtest("Correct summary totals"):
|
||||
assert "ABANDONED: 5 total (2 safe to delete)" in output, \
|
||||
"Summary should show 5 total abandoned, 2 safe to delete"
|
||||
assert "SAFE TO RECLAIM: 3.4 GiB" in output, \
|
||||
"Should report 3.4 GiB reclaimable (2.8 GiB movie + 0.7 GiB TV)"
|
||||
'';
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
script = ../services/monero/xmrig-auto-pause.py;
|
||||
python = pkgs.python3;
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "xmrig-auto-pause";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = [
|
||||
pkgs.python3
|
||||
pkgs.procps
|
||||
];
|
||||
|
||||
# Mock xmrig as a nice'd sleep process that can be stopped/started.
|
||||
systemd.services.xmrig = {
|
||||
description = "Mock xmrig miner";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.coreutils}/bin/sleep infinity";
|
||||
Type = "simple";
|
||||
Nice = 19;
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import time
|
||||
|
||||
PYTHON = "${python}/bin/python3"
|
||||
SCRIPT = "${script}"
|
||||
|
||||
# Tuned for test VMs (1-2 cores).
|
||||
# POLL_INTERVAL=1 keeps detection latency low.
|
||||
# GRACE_PERIOD=5 is long enough to verify "stays stopped" but short
|
||||
# enough that the full test completes in reasonable time.
|
||||
# CPU_STOP_THRESHOLD=20 catches a busy-loop on a 1-2 core VM (50-100%)
|
||||
# without triggering from normal VM noise.
|
||||
# CPU_RESUME_THRESHOLD=10 is the idle cutoff for a 1-2 core VM.
|
||||
POLL_INTERVAL = "1"
|
||||
GRACE_PERIOD = "5"
|
||||
CPU_STOP_THRESHOLD = "20"
|
||||
CPU_RESUME_THRESHOLD = "10"
|
||||
STARTUP_COOLDOWN = "4"
|
||||
STATE_DIR = "/tmp/xap-state"
|
||||
def start_cpu_load(name):
|
||||
"""Start a non-nice CPU burn as a transient systemd unit."""
|
||||
machine.succeed(
|
||||
f"systemd-run --unit={name} --property=Type=exec "
|
||||
f"bash -c 'while true; do :; done'"
|
||||
)
|
||||
|
||||
def stop_cpu_load(name):
|
||||
machine.succeed(f"systemctl stop {name}")
|
||||
|
||||
def start_monitor(unit_name):
|
||||
"""Start the auto-pause monitor as a transient unit."""
|
||||
machine.succeed(
|
||||
f"systemd-run --unit={unit_name} "
|
||||
f"--setenv=POLL_INTERVAL={POLL_INTERVAL} "
|
||||
f"--setenv=GRACE_PERIOD={GRACE_PERIOD} "
|
||||
f"--setenv=CPU_STOP_THRESHOLD={CPU_STOP_THRESHOLD} "
|
||||
f"--setenv=CPU_RESUME_THRESHOLD={CPU_RESUME_THRESHOLD} "
|
||||
f"--setenv=STARTUP_COOLDOWN={STARTUP_COOLDOWN} "
|
||||
f"--setenv=STATE_DIR={STATE_DIR} "
|
||||
f"{PYTHON} {SCRIPT}"
|
||||
)
|
||||
# Monitor needs two consecutive polls to compute a CPU delta.
|
||||
time.sleep(3)
|
||||
# Monitor needs two consecutive polls to compute a CPU delta.
|
||||
time.sleep(3)
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_unit("xmrig.service")
|
||||
machine.succeed(f"mkdir -p {STATE_DIR}")
|
||||
|
||||
with subtest("Start auto-pause monitor"):
|
||||
start_monitor("xmrig-auto-pause")
|
||||
|
||||
with subtest("xmrig stays running while system is idle"):
|
||||
machine.succeed("systemctl is-active xmrig")
|
||||
|
||||
with subtest("xmrig stopped when CPU load appears"):
|
||||
start_cpu_load("cpu-load")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
with subtest("xmrig remains stopped during grace period after load ends"):
|
||||
stop_cpu_load("cpu-load")
|
||||
# Load just stopped. Grace period is 5s. Check at 2s — well within.
|
||||
time.sleep(2)
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
with subtest("xmrig resumes after grace period expires"):
|
||||
# Already idle since previous subtest. Grace period (5s) plus
|
||||
# detection delay (~2 polls) plus startup cooldown (4s) means
|
||||
# xmrig should restart within ~12s.
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
with subtest("Intermittent load does not cause flapping"):
|
||||
# First load — stop xmrig
|
||||
start_cpu_load("cpu-load-1")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
stop_cpu_load("cpu-load-1")
|
||||
|
||||
# Brief idle gap — shorter than grace period
|
||||
time.sleep(2)
|
||||
|
||||
# Second load arrives before grace period expires
|
||||
start_cpu_load("cpu-load-2")
|
||||
time.sleep(3)
|
||||
|
||||
# xmrig must still be stopped
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
stop_cpu_load("cpu-load-2")
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
with subtest("Sustained load keeps xmrig stopped"):
|
||||
start_cpu_load("cpu-load-3")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# Stay busy longer than the grace period to prove continuous
|
||||
# activity keeps xmrig stopped indefinitely.
|
||||
time.sleep(8)
|
||||
machine.fail("systemctl is-active xmrig")
|
||||
|
||||
stop_cpu_load("cpu-load-3")
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
with subtest("External restart detected and re-stopped under load"):
|
||||
# Put system under load so auto-pause stops xmrig.
|
||||
start_cpu_load("cpu-load-4")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# Something external starts xmrig while load is active.
|
||||
# The script should detect this and re-stop it.
|
||||
machine.succeed("systemctl start xmrig")
|
||||
machine.succeed("systemctl is-active xmrig")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
stop_cpu_load("cpu-load-4")
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# --- State persistence and crash recovery ---
|
||||
machine.succeed("systemctl stop xmrig-auto-pause")
|
||||
|
||||
with subtest("xmrig recovers after crash during startup cooldown"):
|
||||
machine.succeed(f"rm -rf {STATE_DIR} && mkdir -p {STATE_DIR}")
|
||||
start_monitor("xmrig-auto-pause-crash")
|
||||
|
||||
# Load -> xmrig stops
|
||||
start_cpu_load("cpu-crash")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# End load -> xmrig restarts after grace period
|
||||
stop_cpu_load("cpu-crash")
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=30)
|
||||
|
||||
# Kill xmrig immediately — simulates crash during startup cooldown.
|
||||
# The script should detect the failure when cooldown expires and
|
||||
# re-enter the retry cycle.
|
||||
machine.succeed("systemctl kill --signal=KILL xmrig")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=5)
|
||||
|
||||
# After cooldown + grace period + restart, xmrig should be back.
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=30)
|
||||
|
||||
machine.succeed("systemctl stop xmrig-auto-pause-crash")
|
||||
machine.succeed("systemctl reset-failed xmrig.service || true")
|
||||
machine.succeed("systemctl start xmrig")
|
||||
machine.wait_for_unit("xmrig.service")
|
||||
|
||||
with subtest("Script restart preserves pause state"):
|
||||
machine.succeed(f"rm -rf {STATE_DIR} && mkdir -p {STATE_DIR}")
|
||||
start_monitor("xmrig-auto-pause-persist")
|
||||
|
||||
# Load -> xmrig stops
|
||||
start_cpu_load("cpu-persist")
|
||||
machine.wait_until_fails("systemctl is-active xmrig", timeout=20)
|
||||
|
||||
# Kill the monitor while xmrig is paused (simulates script crash)
|
||||
machine.succeed("systemctl stop xmrig-auto-pause-persist")
|
||||
|
||||
# State file must exist — the monitor persisted the pause flag
|
||||
machine.succeed(f"test -f {STATE_DIR}/paused")
|
||||
|
||||
# Start a fresh monitor instance (reads state file on startup)
|
||||
start_monitor("xmrig-auto-pause-persist2")
|
||||
|
||||
# End load — the new monitor should pick up the paused state
|
||||
# and restart xmrig after the grace period
|
||||
stop_cpu_load("cpu-persist")
|
||||
machine.wait_until_succeeds("systemctl is-active xmrig", timeout=30)
|
||||
|
||||
# State file should be cleaned up after successful restart
|
||||
machine.fail(f"test -f {STATE_DIR}/paused")
|
||||
|
||||
machine.succeed("systemctl stop xmrig-auto-pause-persist2")
|
||||
'';
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
mockServer = ./mock-grafana-server.py;
|
||||
|
||||
mockZpool = pkgs.writeShellScript "zpool" ''
|
||||
case "$1" in
|
||||
list)
|
||||
echo "tank"
|
||||
echo "hdds"
|
||||
;;
|
||||
status)
|
||||
pool="$2"
|
||||
if [ "$pool" = "tank" ]; then
|
||||
echo " scan: scrub repaired 0B in 00:24:39 with 0 errors on Mon Jan 1 02:24:39 2024"
|
||||
elif [ "$pool" = "hdds" ]; then
|
||||
echo " scan: scrub repaired 0B in 04:12:33 with 0 errors on Mon Jan 1 06:12:33 2024"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
'';
|
||||
|
||||
script = ../services/grafana/zfs-scrub-annotations.sh;
|
||||
python = pkgs.python3;
|
||||
in
|
||||
pkgs.testers.runNixOSTest {
|
||||
name = "zfs-scrub-annotations";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
python3
|
||||
curl
|
||||
jq
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
|
||||
GRAFANA_PORT = 13000
|
||||
ANNOTS_FILE = "/tmp/annotations.json"
|
||||
STATE_DIR = "/tmp/scrub-state"
|
||||
PYTHON = "${python}/bin/python3"
|
||||
MOCK = "${mockServer}"
|
||||
SCRIPT = "${script}"
|
||||
MOCK_ZPOOL = "${mockZpool}"
|
||||
|
||||
MOCK_BIN = "/tmp/mock-bin"
|
||||
ENV_PREFIX = (
|
||||
f"GRAFANA_URL=http://127.0.0.1:{GRAFANA_PORT} "
|
||||
f"STATE_DIR={STATE_DIR} "
|
||||
f"PATH={MOCK_BIN}:$PATH "
|
||||
)
|
||||
|
||||
def read_annotations():
|
||||
out = machine.succeed(f"cat {ANNOTS_FILE} 2>/dev/null || echo '[]'")
|
||||
return json.loads(out.strip())
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
with subtest("Setup state directory and mock zpool"):
|
||||
machine.succeed(f"mkdir -p {STATE_DIR}")
|
||||
machine.succeed(f"mkdir -p {MOCK_BIN} && cp {MOCK_ZPOOL} {MOCK_BIN}/zpool && chmod +x {MOCK_BIN}/zpool")
|
||||
|
||||
with subtest("Start mock Grafana server"):
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
machine.succeed(
|
||||
f"systemd-run --unit=mock-grafana {PYTHON} {MOCK} {GRAFANA_PORT} {ANNOTS_FILE}"
|
||||
)
|
||||
machine.wait_until_succeeds(
|
||||
f"curl -sf -X POST http://127.0.0.1:{GRAFANA_PORT}/api/annotations "
|
||||
f"-H 'Content-Type: application/json' -d '{{\"text\":\"ping\",\"tags\":[]}}' | grep -q id",
|
||||
timeout=10,
|
||||
)
|
||||
machine.succeed(f"echo '[]' > {ANNOTS_FILE}")
|
||||
|
||||
with subtest("Start action creates annotation with pool names and zfs-scrub tag"):
|
||||
machine.succeed(f"{ENV_PREFIX} bash {SCRIPT} start")
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
|
||||
assert "zfs-scrub" in annots[0].get("tags", []), f"Missing zfs-scrub tag: {annots[0]}"
|
||||
assert "tank" in annots[0]["text"], f"Missing tank in text: {annots[0]['text']}"
|
||||
assert "hdds" in annots[0]["text"], f"Missing hdds in text: {annots[0]['text']}"
|
||||
assert "time" in annots[0], f"Missing time field: {annots[0]}"
|
||||
assert "timeEnd" not in annots[0], f"timeEnd should not be set yet: {annots[0]}"
|
||||
|
||||
with subtest("State file contains annotation ID"):
|
||||
ann_id = machine.succeed(f"cat {STATE_DIR}/annotation-id").strip()
|
||||
assert ann_id == "1", f"Expected annotation ID 1, got: {ann_id}"
|
||||
|
||||
with subtest("Stop action closes annotation with per-pool scrub results"):
|
||||
machine.succeed(f"{ENV_PREFIX} bash {SCRIPT} stop")
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected 1 annotation, got: {annots}"
|
||||
assert "timeEnd" in annots[0], f"timeEnd should be set: {annots[0]}"
|
||||
assert annots[0]["timeEnd"] > annots[0]["time"], "timeEnd should be after time"
|
||||
text = annots[0]["text"]
|
||||
assert "ZFS scrub completed" in text, f"Missing completed text: {text}"
|
||||
assert "tank:" in text, f"Missing tank results: {text}"
|
||||
assert "hdds:" in text, f"Missing hdds results: {text}"
|
||||
assert "00:24:39" in text, f"Missing tank scrub duration: {text}"
|
||||
assert "04:12:33" in text, f"Missing hdds scrub duration: {text}"
|
||||
|
||||
with subtest("State file cleaned up after stop"):
|
||||
machine.fail(f"test -f {STATE_DIR}/annotation-id")
|
||||
|
||||
with subtest("Stop action handles missing state file gracefully"):
|
||||
machine.succeed(f"{ENV_PREFIX} bash {SCRIPT} stop")
|
||||
annots = read_annotations()
|
||||
assert len(annots) == 1, f"Expected no new annotations, got: {annots}"
|
||||
|
||||
with subtest("Start action handles Grafana being down gracefully"):
|
||||
machine.succeed("systemctl stop mock-grafana")
|
||||
machine.succeed(f"{ENV_PREFIX} bash {SCRIPT} start")
|
||||
machine.fail(f"test -f {STATE_DIR}/annotation-id")
|
||||
'';
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Create pkgs with ensureZfsMounts overlay
|
||||
testPkgs = pkgs.appendOverlays [ (import ../modules/overlays.nix) ];
|
||||
in
|
||||
testPkgs.testers.runNixOSTest {
|
||||
name = "zfs test";
|
||||
|
||||
nodes.machine =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
# Test valid paths within zpool
|
||||
(lib.serviceMountWithZpool "test-service" "rpool" [ "/mnt/rpool_data" ])
|
||||
|
||||
# Test service with paths outside zpool (should fail assertion)
|
||||
(lib.serviceMountWithZpool "invalid-service" "rpool2" [ "/mnt/rpool_data" ])
|
||||
|
||||
# Test multi-command logic: service with multiple serviceMountWithZpool calls
|
||||
(lib.serviceMountWithZpool "multi-service" "rpool" [ "/mnt/rpool_data" ])
|
||||
(lib.serviceMountWithZpool "multi-service" "rpool2" [ "/mnt/rpool2_data" ])
|
||||
|
||||
# Test multi-command logic: service with multiple serviceMountWithZpool calls
|
||||
# BUT this one should fail as `/mnt/rpool_moar_data` is not on rpool2
|
||||
(lib.serviceMountWithZpool "multi-service-fail" "rpool" [ "/mnt/rpool_data" ])
|
||||
(lib.serviceMountWithZpool "multi-service-fail" "rpool2" [ "/mnt/rpool_moar_data" ])
|
||||
];
|
||||
|
||||
virtualisation = {
|
||||
emptyDiskImages = [
|
||||
4096
|
||||
4096
|
||||
];
|
||||
# Add this to avoid ZFS hanging issues
|
||||
additionalPaths = [ pkgs.zfs ];
|
||||
};
|
||||
networking.hostId = "deadbeef";
|
||||
boot.kernelPackages = config.boot.kernelPackages;
|
||||
boot.zfs.package = config.boot.zfs.package;
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
parted
|
||||
ensureZfsMounts
|
||||
];
|
||||
|
||||
systemd.services."test-service" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."invalid-service" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."multi-service" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."multi-service-fail" = {
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe pkgs.bash;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
|
||||
# Setup ZFS pool
|
||||
machine.succeed(
|
||||
"parted --script /dev/vdb mklabel msdos",
|
||||
"parted --script /dev/vdb -- mkpart primary 1024M -1s",
|
||||
"zpool create rpool /dev/vdb1"
|
||||
)
|
||||
|
||||
# Setup ZFS pool 2
|
||||
machine.succeed(
|
||||
"parted --script /dev/vdc mklabel msdos",
|
||||
"parted --script /dev/vdc -- mkpart primary 1024M -1s",
|
||||
"zpool create rpool2 /dev/vdc1"
|
||||
)
|
||||
|
||||
machine.succeed("zfs create -o mountpoint=/mnt/rpool_data rpool/data")
|
||||
|
||||
machine.succeed("zfs create -o mountpoint=/mnt/rpool2_data rpool2/data")
|
||||
|
||||
machine.succeed("zfs create -o mountpoint=/mnt/rpool_moar_data rpool/moar_data")
|
||||
|
||||
# Test that valid service starts successfully
|
||||
machine.succeed("systemctl start test-service")
|
||||
|
||||
# Manually test our validation logic by checking the debug output
|
||||
zfs_output = machine.succeed("zfs list -H -o name,mountpoint")
|
||||
print("ZFS LIST OUTPUT:")
|
||||
print(zfs_output)
|
||||
|
||||
dataset = machine.succeed("zfs list -H -o name,mountpoint | awk '/\\/mnt\\/rpool_data/ { print $1 }'")
|
||||
print("DATASET FOR /mnt/rpool_data:")
|
||||
print(dataset)
|
||||
|
||||
# Test that invalid-service mount service fails validation
|
||||
machine.fail("systemctl start invalid-service.service")
|
||||
|
||||
# Check the journal for our detailed validation error message
|
||||
journal_output = machine.succeed("journalctl -u invalid-service-mounts.service --no-pager")
|
||||
print("JOURNAL OUTPUT:")
|
||||
print(journal_output)
|
||||
|
||||
# Verify our validation error is in the journal using Python string matching
|
||||
assert "ERROR: ZFS pool mismatch for /mnt/rpool_data" in journal_output
|
||||
assert "Expected pool: rpool2" in journal_output
|
||||
assert "Actual pool: rpool" in journal_output
|
||||
|
||||
|
||||
# Test that invalid-service mount service fails validation
|
||||
machine.fail("systemctl start multi-service-fail.service")
|
||||
|
||||
# Check the journal for our detailed validation error message
|
||||
journal_output = machine.succeed("journalctl -u multi-service-fail-mounts.service --no-pager")
|
||||
print("JOURNAL OUTPUT:")
|
||||
print(journal_output)
|
||||
|
||||
# Verify our validation error is in the journal using Python string matching
|
||||
assert "ERROR: ZFS pool mismatch for /mnt/rpool_moar_data" in journal_output, "no zfs pool mismatch found (1)"
|
||||
assert "Expected pool: rpool2" in journal_output, "no zfs pool mismatch found (2)"
|
||||
assert "Actual pool: rpool" in journal_output, "no zfs pool mismatch found (3)"
|
||||
|
||||
|
||||
machine.succeed("systemctl start multi-service")
|
||||
machine.succeed("systemctl is-active multi-service-mounts.service")
|
||||
'';
|
||||
}
|
||||
Reference in New Issue
Block a user