phase 2: promote services/, tests/, patches/, lib/, scripts/

This commit is contained in:
primary
2026-04-18 00:47:39 -04:00
parent 99e98e39b7
commit 999ed05d9f
86 changed files with 0 additions and 0 deletions

115
services/arr/arr-search.nix Normal file
View File

@@ -0,0 +1,115 @@
{
pkgs,
lib,
service_configs,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
radarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
sonarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
curl = "${pkgs.curl}/bin/curl";
jq = "${pkgs.jq}/bin/jq";
# Max items to search per cycle per category (missing + cutoff) per app
maxPerCycle = 5;
searchScript = pkgs.writeShellScript "arr-search" ''
set -euo pipefail
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
search_radarr() {
local endpoint="$1"
local label="$2"
local ids
ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $RADARR_KEY" \
"${radarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending" \
| ${jq} -r '.records[].id // empty')
if [ -z "$ids" ]; then
echo "radarr: no $label items"
return
fi
local id_array
id_array=$(echo "$ids" | ${jq} -Rs '[split("\n") | .[] | select(. != "") | tonumber]')
echo "radarr: searching $label: $id_array"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $RADARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${radarrUrl}/api/v3/command" \
-d "{\"name\": \"MoviesSearch\", \"movieIds\": $id_array}" > /dev/null
}
search_sonarr() {
local endpoint="$1"
local label="$2"
local series_ids
series_ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $SONARR_KEY" \
"${sonarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending&includeSeries=true" \
| ${jq} -r '[.records[].seriesId] | unique | .[] // empty')
if [ -z "$series_ids" ]; then
echo "sonarr: no $label items"
return
fi
# search per series (sonarr searches by series, not episode)
for sid in $series_ids; do
echo "sonarr: searching $label series $sid"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $SONARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${sonarrUrl}/api/v3/command" \
-d "{\"name\": \"SeriesSearch\", \"seriesId\": $sid}" > /dev/null
done
}
echo "=== arr-search $(date -Iseconds) ==="
search_radarr "missing" "missing"
search_radarr "cutoff" "cutoff-unmet"
search_sonarr "missing" "missing"
search_sonarr "cutoff" "cutoff-unmet"
echo "=== done ==="
'';
in
{
systemd.services.arr-search = {
description = "Search for missing and cutoff-unmet media in Radarr/Sonarr";
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${searchScript}"; # + prefix: runs as root to read API keys from config.xml
TimeoutSec = 300;
};
};
systemd.timers.arr-search = {
description = "Periodically search for missing and cutoff-unmet media";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 03:00:00"; # daily at 3 AM
Persistent = true; # run on boot if missed
RandomizedDelaySec = "30m";
};
};
}

34
services/arr/bazarr.nix Normal file
View File

@@ -0,0 +1,34 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_ssds [
service_configs.bazarr.dataDir
])
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "bazarr" [
"Z ${service_configs.bazarr.dataDir} 0700 ${config.services.bazarr.user} ${config.services.bazarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "bazarr";
port = service_configs.ports.private.bazarr.port;
auth = true;
})
];
services.bazarr = {
enable = true;
listenPort = service_configs.ports.private.bazarr.port;
};
users.users.${config.services.bazarr.user}.extraGroups = [
service_configs.media_group
];
}

153
services/arr/init.nix Normal file
View File

@@ -0,0 +1,153 @@
{ config, service_configs, ... }:
{
services.arrInit = {
prowlarr = {
enable = true;
serviceName = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
dataDir = service_configs.prowlarr.dataDir;
apiVersion = "v1";
networkNamespacePath = "/run/netns/wg";
networkNamespaceService = "wg";
# Guarantee critical config.xml elements before startup. Prowlarr has a
# history of losing <Port> from config.xml, causing the service to run
# without binding any socket. See arr-init's configXml for details.
configXml = {
Port = service_configs.ports.private.prowlarr.port;
BindAddress = "*";
EnableSsl = false;
};
# Prowlarr runs in the wg netns; Sonarr/Radarr in the host netns.
# From host netns, Prowlarr is reachable at the wg namespace address,
# not at localhost (which resolves to the host's own netns).
# Health checks can now run — the reverse-connect is reachable.
healthChecks = true;
syncedApps = [
{
name = "Sonarr";
implementation = "Sonarr";
configContract = "SonarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.sonarr.port}";
apiKeyFrom = "${service_configs.sonarr.dataDir}/config.xml";
serviceName = "sonarr";
}
{
name = "Radarr";
implementation = "Radarr";
configContract = "RadarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.radarr.port}";
apiKeyFrom = "${service_configs.radarr.dataDir}/config.xml";
serviceName = "radarr";
}
];
};
sonarr = {
enable = true;
serviceName = "sonarr";
port = service_configs.ports.private.sonarr.port;
dataDir = service_configs.sonarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.sonarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.tvDir ];
naming = {
renameEpisodes = true;
replaceIllegalCharacters = true;
standardEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
dailyEpisodeFormat = "{Series Title} - {Air-Date} - {Episode Title} {Quality Full}";
animeEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
seasonFolderFormat = "Season {season}";
seriesFolderFormat = "{Series Title}";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
tvCategory = "tvshows";
};
}
];
};
radarr = {
enable = true;
serviceName = "radarr";
port = service_configs.ports.private.radarr.port;
dataDir = service_configs.radarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.radarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.moviesDir ];
naming = {
renameMovies = true;
replaceIllegalCharacters = true;
standardMovieFormat = "{Movie Title} ({Release Year}) {Quality Full}";
movieFolderFormat = "{Movie Title} ({Release Year})";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
movieCategory = "movies";
};
}
];
};
};
services.bazarrInit = {
enable = true;
dataDir = "/var/lib/bazarr";
port = service_configs.ports.private.bazarr.port;
sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
};
services.jellyseerrInit = {
enable = true;
configDir = service_configs.jellyseerr.configDir;
radarr = {
profileName = "Remux + WEB 2160p";
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
sonarr = {
profileName = "WEB-2160p";
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
};
}

View File

@@ -0,0 +1,43 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyseerr" service_configs.zpool_ssds [
service_configs.jellyseerr.configDir
])
(lib.serviceFilePerms "jellyseerr" [
"Z ${service_configs.jellyseerr.configDir} 0700 jellyseerr jellyseerr"
])
(lib.mkCaddyReverseProxy {
subdomain = "jellyseerr";
port = service_configs.ports.private.jellyseerr.port;
})
];
services.jellyseerr = {
enable = true;
port = service_configs.ports.private.jellyseerr.port;
configDir = service_configs.jellyseerr.configDir;
};
systemd.services.jellyseerr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "jellyseerr";
Group = "jellyseerr";
ReadWritePaths = [ service_configs.jellyseerr.configDir ];
};
users.users.jellyseerr = {
isSystemUser = true;
group = "jellyseerr";
home = service_configs.jellyseerr.configDir;
};
users.groups.jellyseerr = { };
}

60
services/arr/prowlarr.nix Normal file
View File

@@ -0,0 +1,60 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "prowlarr" service_configs.zpool_ssds [
service_configs.prowlarr.dataDir
])
(lib.vpnNamespaceOpenPort service_configs.ports.private.prowlarr.port "prowlarr")
(lib.serviceFilePerms "prowlarr" [
"Z ${service_configs.prowlarr.dataDir} 0700 prowlarr prowlarr"
])
(lib.mkCaddyReverseProxy {
subdomain = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
auth = true;
vpn = true;
})
];
services.prowlarr = {
enable = true;
dataDir = service_configs.prowlarr.dataDir;
settings.server.port = service_configs.ports.private.prowlarr.port;
};
# The upstream prowlarr module uses DynamicUser=true which is incompatible
# with ZFS-backed persistent storage — the dynamic user can't access files
# on the ZFS mount. Override with a static user to match sonarr/radarr.
users.users.prowlarr = {
isSystemUser = true;
group = "prowlarr";
home = service_configs.prowlarr.dataDir;
};
users.groups.prowlarr = { };
# The upstream prowlarr module hardcodes root:root in tmpfiles for custom dataDirs
# (systemd.tmpfiles.settings."10-prowlarr"), which gets applied by
# systemd-tmpfiles-setup.service on every boot/deploy, resetting the directory
# ownership and making Prowlarr unable to access its SQLite databases.
# Override to use the correct user as we disable DynamicUser
systemd.tmpfiles.settings."10-prowlarr".${service_configs.prowlarr.dataDir}.d = lib.mkForce {
user = "prowlarr";
group = "prowlarr";
mode = "0700";
};
systemd.services.prowlarr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "prowlarr";
Group = "prowlarr";
StateDirectory = lib.mkForce "";
ExecStart = lib.mkForce "${lib.getExe pkgs.prowlarr} -nobrowser -data=${service_configs.prowlarr.dataDir}";
};
}

36
services/arr/radarr.nix Normal file
View File

@@ -0,0 +1,36 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "radarr" service_configs.zpool_ssds [
service_configs.radarr.dataDir
])
(lib.serviceMountWithZpool "radarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "radarr" [
"Z ${service_configs.radarr.dataDir} 0700 ${config.services.radarr.user} ${config.services.radarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "radarr";
port = service_configs.ports.private.radarr.port;
auth = true;
})
];
services.radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
settings.server.port = service_configs.ports.private.radarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.radarr.user}.extraGroups = [
service_configs.media_group
];
}

224
services/arr/recyclarr.nix Normal file
View File

@@ -0,0 +1,224 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
configPath = "/var/lib/recyclarr/config.json";
# Runs as root (via + prefix) after the NixOS module writes config.json.
# Extracts API keys from radarr/sonarr config.xml and injects them via jq.
injectApiKeys = pkgs.writeShellScript "recyclarr-inject-api-keys" ''
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
${pkgs.jq}/bin/jq \
--arg rk "$RADARR_KEY" \
--arg sk "$SONARR_KEY" \
'.radarr.movies.api_key = $rk | .sonarr.series.api_key = $sk' \
${configPath} > ${configPath}.tmp
mv ${configPath}.tmp ${configPath}
chown recyclarr:recyclarr ${configPath}
'';
in
{
imports = [
(lib.serviceMountWithZpool "recyclarr" service_configs.zpool_ssds [
service_configs.recyclarr.dataDir
])
];
systemd.tmpfiles.rules = [
"d ${service_configs.recyclarr.dataDir} 0755 recyclarr recyclarr -"
];
services.recyclarr = {
enable = true;
command = "sync";
schedule = "daily";
user = "recyclarr";
group = "recyclarr";
configuration = {
radarr.movies = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "radarr-quality-definition-movie"; }
{ template = "radarr-quality-profile-remux-web-2160p"; }
{ template = "radarr-custom-formats-remux-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "Remux + WEB 2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "Remux-2160p";
until_score = 10000;
};
qualities = [
{ name = "Remux-2160p"; }
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Remux-1080p"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "923b6abef9b17f937fab56cfcf89e1f1" ];
assign_scores_to = [
{ name = "Remux + WEB 2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "bfd8eb01832d646a0a89c4deb46f8564" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = -10000;
}
];
}
];
};
sonarr.series = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "sonarr-quality-definition-series"; }
{ template = "sonarr-v4-quality-profile-web-2160p"; }
{ template = "sonarr-v4-custom-formats-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "WEB-2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "WEB/Bluray";
until_score = 10000;
};
qualities = [
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Bluray-1080p Remux"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "9b27ab6498ec0f31a3353992e19434ca" ];
assign_scores_to = [
{ name = "WEB-2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "23297a736ca77c0fc8e70f8edd7ee56c" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = -10000;
}
];
}
];
};
};
};
# Trigger immediate sync on deploy when recyclarr config changes.
# restartTriggers on the oneshot service are unreliable (systemd may
# no-op a restart of an inactive oneshot). Instead, embed a config
# hash in the timer unit -- NixOS restarts changed timers reliably,
# and OnActiveSec fires the sync within seconds.
systemd.timers.recyclarr = {
timerConfig.OnActiveSec = "5s";
unitConfig.X-ConfigHash = builtins.hashString "sha256" (
builtins.toJSON config.services.recyclarr.configuration
);
};
systemd.services.recyclarr = {
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig.ExecStartPre = [ "+${injectApiKeys}" ];
};
}

42
services/arr/sonarr.nix Normal file
View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_ssds [
service_configs.sonarr.dataDir
])
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "sonarr" [
"Z ${service_configs.sonarr.dataDir} 0700 ${config.services.sonarr.user} ${config.services.sonarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "sonarr";
port = service_configs.ports.private.sonarr.port;
auth = true;
})
];
systemd.tmpfiles.rules = [
"d /torrents/media 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.tvDir} 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.moviesDir} 2775 root ${service_configs.media_group} -"
];
services.sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
settings.server.port = service_configs.ports.private.sonarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.sonarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
systemd.services.torrent-audit = {
description = "Audit qBittorrent for unmanaged and abandoned upgrade torrents";
after = [
"network-online.target"
"sonarr.service"
"radarr.service"
"qbittorrent.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${
pkgs.python3.withPackages (
ps: with ps; [
pyarr
qbittorrent-api
]
)
}/bin/python ${./torrent-audit.py}";
TimeoutSec = 300;
};
environment = {
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
RADARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
RADARR_CONFIG = "${service_configs.radarr.dataDir}/config.xml";
SONARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
SONARR_CONFIG = "${service_configs.sonarr.dataDir}/config.xml";
CATEGORIES = lib.concatStringsSep "," (builtins.attrNames service_configs.torrent.categories);
TAG_TORRENTS = "true";
};
};
}

View File

@@ -0,0 +1,382 @@
#!/usr/bin/env python3
"""
Audit qBittorrent torrents against Radarr/Sonarr.
Reports two categories:
UNMANAGED -- torrents in qBittorrent that no *arr service has ever touched.
These were added manually or by some other tool.
ABANDONED -- torrents that *arr grabbed but later replaced with a better
version. The old torrent is still seeding while the library
points to the new one.
Abandoned detection uses API cross-referencing (not filesystem hardlinks) and
verifies against the *arr's current file state:
1. HISTORY -- group imports by content unit (movieId / episodeId); the
most recent import is the keeper, older ones are candidates.
2. CURRENT -- verify against the *arr's active file mapping.
"""
import logging
import os
import sys
from collections import defaultdict
from xml.etree import ElementTree
import qbittorrentapi
from pyarr import RadarrAPI, SonarrAPI
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
stream=sys.stderr,
)
log = logging.getLogger(__name__)
def get_api_key(config_path: str) -> str:
tree = ElementTree.parse(config_path)
return tree.find(".//ApiKey").text
def paginate(arr_client, endpoint: str, page_size: int = 1000):
method = getattr(arr_client, f"get_{endpoint}")
page = 1
while True:
data = method(page=page, page_size=page_size)
yield from data["records"]
if page * page_size >= data["totalRecords"]:
break
page += 1
def get_qbit_torrents(qbit_client, category: str) -> dict[str, dict]:
torrents = qbit_client.torrents_info(category=category)
return {t["hash"].upper(): t for t in torrents}
def gib(size_bytes: int) -> str:
return f"{size_bytes / 1073741824:.1f}"
# ---------------------------------------------------------------------------
# Collect all known hashes from *arr history + queue
# ---------------------------------------------------------------------------
def collect_all_known_hashes(arr_client, page_size: int = 1000) -> set[str]:
hashes = set()
for endpoint in ("queue", "history"):
for rec in paginate(arr_client, endpoint, page_size):
did = (rec.get("downloadId") or "").upper()
if did:
hashes.add(did)
return hashes
# ---------------------------------------------------------------------------
# Unmanaged: torrents with hashes not in any *arr history/queue
# ---------------------------------------------------------------------------
def find_unmanaged(qbit_torrents: dict, known_hashes: set) -> list[dict]:
results = []
for uhash, torrent in qbit_torrents.items():
if uhash not in known_hashes:
results.append(torrent)
return sorted(results, key=lambda t: t["added_on"])
# ---------------------------------------------------------------------------
# Abandoned movies: group imports by movieId, older = abandoned
# ---------------------------------------------------------------------------
def find_movie_abandoned(radarr, qbit_movies):
log.info("Analysing Radarr import history ...")
imports_by_movie = defaultdict(list)
for rec in paginate(radarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
if not did:
continue
mid = rec.get("movieId")
if not mid:
continue
imports_by_movie[mid].append(
{"downloadId": did, "date": rec["date"]}
)
# Identify keeper (latest) and abandoned (older) hashes per movie.
abandoned_hashes: set[str] = set()
keeper_hashes: set[str] = set()
hash_to_movie: dict[str, int] = {}
for mid, events in imports_by_movie.items():
ordered = sorted(events, key=lambda e: e["date"])
keeper_hashes.add(ordered[-1]["downloadId"])
for e in ordered[:-1]:
abandoned_hashes.add(e["downloadId"])
hash_to_movie[e["downloadId"]] = mid
# A hash that is a keeper for *any* movie must not be deleted.
abandoned_hashes -= keeper_hashes
log.info("Fetching Radarr current movie state ...")
radarr_movies = {m["id"]: m for m in radarr.get_movie()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_movies.get(ahash)
if torrent is None:
continue
mid = hash_to_movie.get(ahash)
movie = radarr_movies.get(mid) if mid else None
mf = (movie or {}).get("movieFile") or {}
current_quality = (mf.get("quality") or {}).get("quality", {}).get("name", "?")
current_size = mf.get("size", 0)
status = "SAFE"
notes = []
if not movie or not movie.get("hasFile"):
notes.append("movie removed or has no file in Radarr")
status = "REVIEW"
elif torrent["size"] > current_size * 1.05:
notes.append(
f"abandoned is larger than current "
f"({gib(torrent['size'])} > {gib(current_size)} GiB)"
)
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
"current_quality": current_quality,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Abandoned TV: group imports by episodeId, a hash is abandoned only when
# it is NOT the latest import for ANY episode it covers.
# ---------------------------------------------------------------------------
def find_tv_abandoned(sonarr, qbit_tvshows):
log.info("Analysing Sonarr import history ...")
episode_imports = defaultdict(list)
all_download_ids: set[str] = set()
hash_to_series: dict[str, int] = {}
for rec in paginate(sonarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
eid = rec.get("episodeId")
if not did or not eid:
continue
episode_imports[eid].append({"downloadId": did, "date": rec["date"]})
all_download_ids.add(did)
sid = rec.get("seriesId")
if sid:
hash_to_series[did] = sid
# A hash is "active" if it is the latest import for *any* episode.
active_hashes: set[str] = set()
for events in episode_imports.values():
latest = max(events, key=lambda e: e["date"])
active_hashes.add(latest["downloadId"])
abandoned_hashes = all_download_ids - active_hashes
log.info("Fetching Sonarr current series state ...")
current_series = {s["id"] for s in sonarr.get_series()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_tvshows.get(ahash)
if torrent is None:
continue
status = "SAFE"
notes = []
sid = hash_to_series.get(ahash)
if sid and sid not in current_series:
notes.append("series removed from Sonarr")
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Report
# ---------------------------------------------------------------------------
def print_section(torrents, show_status=False):
if not torrents:
print(" (none)\n")
return
total_size = sum(t["size"] for t in torrents)
for t in torrents:
prefix = f"[{t['status']:6s}] " if show_status else " "
print(f" {prefix}{t['name']}")
extra = f"{gib(t['size'])} GiB | {t['state']}"
print(f" {' ' * len(prefix)}{extra}")
for note in t.get("notes", []):
print(f" {' ' * len(prefix)}** {note}")
print()
if show_status:
safe = [t for t in torrents if t["status"] == "SAFE"]
review = [t for t in torrents if t["status"] == "REVIEW"]
print(
f" total={len(torrents)} ({gib(total_size)} GiB) | "
f"safe={len(safe)} | review={len(review)}"
)
else:
print(f" total={len(torrents)} ({gib(total_size)} GiB)")
print()
AUDIT_TAGS = {"audit:unmanaged", "audit:abandoned-safe", "audit:abandoned-review"}
def tag_torrents(qbit_client, qbit_torrents, all_known, all_abandoned):
log.info("Tagging torrents ...")
abandoned_by_hash = {t["hash"].upper(): t for t in all_abandoned}
all_hashes = []
for torrents in qbit_torrents.values():
all_hashes.extend(torrents.keys())
for h in all_hashes:
current_tags = set()
torrent_info = None
for torrents in qbit_torrents.values():
if h in torrents:
torrent_info = torrents[h]
break
if not torrent_info:
continue
existing_tags = {t.strip() for t in torrent_info.get("tags", "").split(",") if t.strip()}
existing_audit_tags = existing_tags & AUDIT_TAGS
if h in abandoned_by_hash:
status = abandoned_by_hash[h]["status"]
desired = "audit:abandoned-safe" if status == "SAFE" else "audit:abandoned-review"
elif h not in all_known:
desired = "audit:unmanaged"
else:
desired = None
tags_to_remove = existing_audit_tags - ({desired} if desired else set())
tags_to_add = ({desired} if desired else set()) - existing_audit_tags
low_hash = torrent_info["hash"]
for tag in tags_to_remove:
qbit_client.torrents_remove_tags(tags=tag, torrent_hashes=low_hash)
for tag in tags_to_add:
qbit_client.torrents_add_tags(tags=tag, torrent_hashes=low_hash)
log.info("Tagging complete")
def main():
qbit_url = os.environ["QBITTORRENT_URL"]
radarr_url = os.environ["RADARR_URL"]
radarr_config = os.environ["RADARR_CONFIG"]
sonarr_url = os.environ["SONARR_URL"]
sonarr_config = os.environ["SONARR_CONFIG"]
categories = os.environ.get("CATEGORIES", "tvshows,movies,anime").split(",")
radarr_key = get_api_key(radarr_config)
sonarr_key = get_api_key(sonarr_config)
radarr = RadarrAPI(radarr_url, radarr_key)
sonarr = SonarrAPI(sonarr_url, sonarr_key)
qbit = qbittorrentapi.Client(host=qbit_url)
log.info("Getting qBittorrent state ...")
qbit_torrents = {cat: get_qbit_torrents(qbit, cat) for cat in categories}
for cat, torrents in qbit_torrents.items():
log.info(" %s: %d torrents", cat, len(torrents))
log.info("Collecting known hashes from Sonarr ...")
sonarr_hashes = collect_all_known_hashes(sonarr)
log.info(" %d unique hashes", len(sonarr_hashes))
log.info("Collecting known hashes from Radarr ...")
radarr_hashes = collect_all_known_hashes(radarr)
log.info(" %d unique hashes", len(radarr_hashes))
all_known = sonarr_hashes | radarr_hashes
# -- Unmanaged --
print("\n========== UNMANAGED TORRENTS ==========\n")
for cat in categories:
unmanaged = find_unmanaged(qbit_torrents[cat], all_known)
print(f"--- {cat} ({len(unmanaged)} unmanaged / {len(qbit_torrents[cat])} total) ---\n")
print_section(unmanaged)
# -- Abandoned --
print("========== ABANDONED UPGRADE LEFTOVERS ==========\n")
movie_abandoned = find_movie_abandoned(
radarr, qbit_torrents.get("movies", {})
)
print(f"--- movies ({len(movie_abandoned)} abandoned) ---\n")
print_section(movie_abandoned, show_status=True)
tv_abandoned = find_tv_abandoned(
sonarr, qbit_torrents.get("tvshows", {})
)
print(f"--- tvshows ({len(tv_abandoned)} abandoned) ---\n")
print_section(tv_abandoned, show_status=True)
# -- Summary --
all_abandoned = movie_abandoned + tv_abandoned
safe = [t for t in all_abandoned if t["status"] == "SAFE"]
print("=" * 50)
print(
f"ABANDONED: {len(all_abandoned)} total ({len(safe)} safe to delete)"
)
print(f"SAFE TO RECLAIM: {gib(sum(t['size'] for t in safe))} GiB")
# -- Tagging --
if os.environ.get("TAG_TORRENTS", "").lower() in ("1", "true", "yes"):
tag_torrents(qbit, qbit_torrents, all_known, all_abandoned)
if __name__ == "__main__":
main()

113
services/bitmagnet.nix Normal file
View File

@@ -0,0 +1,113 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
let
prowlarrPort = toString service_configs.ports.private.prowlarr.port;
sonarrPort = toString service_configs.ports.private.sonarr.port;
radarrPort = toString service_configs.ports.private.radarr.port;
bitmagnetPort = toString service_configs.ports.private.bitmagnet.port;
bridgeAddr = config.vpnNamespaces.wg.bridgeAddress;
prowlarrConfigXml = "${service_configs.prowlarr.dataDir}/config.xml";
sonarrConfigXml = "${service_configs.sonarr.dataDir}/config.xml";
radarrConfigXml = "${service_configs.radarr.dataDir}/config.xml";
curl = "${pkgs.curl}/bin/curl";
jq = "${pkgs.jq}/bin/jq";
# Clears the escalating failure backoff for the Bitmagnet indexer across
# Prowlarr, Sonarr, and Radarr so searches resume immediately after
# Bitmagnet restarts instead of waiting hours for disable timers to expire.
recoveryScript = pkgs.writeShellScript "prowlarr-bitmagnet-recovery" ''
set -euo pipefail
wait_for() {
for _ in $(seq 1 "$2"); do
${curl} -sf --max-time 5 "$1" > /dev/null && return 0
sleep 5
done
echo "$1 not reachable, aborting" >&2; exit 1
}
# Test a Bitmagnet-named indexer to clear its failure status.
# A successful test triggers RecordSuccess() which resets the backoff.
clear_status() {
local key indexer
key=$(${lib.extractArrApiKey ''"$3"''}) || return 0
indexer=$(${curl} -sf --max-time 10 \
-H "X-Api-Key: $key" "$2/api/$1/indexer" | \
${jq} 'first(.[] | select(.name | test("Bitmagnet"; "i")))') || return 0
[ -n "$indexer" ] && [ "$indexer" != "null" ] || return 0
${curl} -sf --max-time 30 \
-H "X-Api-Key: $key" -H "Content-Type: application/json" \
-X POST "$2/api/$1/indexer/test" -d "$indexer" > /dev/null
}
wait_for "http://localhost:${bitmagnetPort}" 12
wait_for "http://localhost:${prowlarrPort}/ping" 6
# Prowlarr first downstream apps route searches through it.
clear_status v1 "http://localhost:${prowlarrPort}" "${prowlarrConfigXml}" || true
clear_status v3 "http://${bridgeAddr}:${sonarrPort}" "${sonarrConfigXml}" || true
clear_status v3 "http://${bridgeAddr}:${radarrPort}" "${radarrConfigXml}" || true
'';
in
{
imports = [
(lib.vpnNamespaceOpenPort service_configs.ports.private.bitmagnet.port "bitmagnet")
(lib.mkCaddyReverseProxy {
subdomain = "bitmagnet";
port = service_configs.ports.private.bitmagnet.port;
auth = true;
vpn = true;
})
];
services.bitmagnet = {
enable = true;
settings = {
postgres = {
host = service_configs.postgres.socket;
};
http_server = {
# TODO! make issue about this being a string and not a `port` type
port = ":" + (toString service_configs.ports.private.bitmagnet.port);
};
};
};
# The upstream default (Restart=on-failure) leaves Bitmagnet dead after
# clean exits (e.g. systemd stop during deploy). Always restart it.
systemd.services.bitmagnet.serviceConfig = {
Restart = lib.mkForce "always";
RestartSec = 10;
};
# After Bitmagnet restarts, clear the escalating failure backoff across
# Prowlarr, Sonarr, and Radarr so searches resume immediately instead of
# waiting hours for the disable timers to expire.
systemd.services.prowlarr-bitmagnet-recovery = {
description = "Clear Prowlarr/Sonarr/Radarr failure status for Bitmagnet indexer";
after = [
"bitmagnet.service"
"prowlarr.service"
"sonarr.service"
"radarr.service"
];
bindsTo = [ "bitmagnet.service" ];
wantedBy = [ "bitmagnet.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = recoveryScript;
# Same VPN namespace as Bitmagnet and Prowlarr.
NetworkNamespacePath = "/run/netns/wg";
};
};
}

45
services/bitwarden.nix Normal file
View File

@@ -0,0 +1,45 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "vaultwarden" service_configs.zpool_ssds [
service_configs.vaultwarden.path
])
(lib.serviceFilePerms "vaultwarden" [
"Z ${service_configs.vaultwarden.path} 0700 vaultwarden vaultwarden"
])
(lib.mkFail2banJail {
name = "vaultwarden";
failregex = ''^.*Username or password is incorrect\. Try again\. IP: <HOST>\..*$'';
})
];
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
configurePostgres = true;
config = {
# Refer to https://github.com/dani-garcia/vaultwarden/blob/main/.env.template
DOMAIN = "https://bitwarden.${service_configs.https.domain}";
SIGNUPS_ALLOWED = false;
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = service_configs.ports.private.vaultwarden.port;
ROCKET_LOG = "critical";
};
};
services.caddy.virtualHosts."bitwarden.${service_configs.https.domain}".extraConfig = ''
encode zstd gzip
reverse_proxy :${toString config.services.vaultwarden.config.ROCKET_PORT} {
header_up X-Real-IP {remote_host}
}
'';
}

162
services/caddy/caddy.nix Normal file
View File

@@ -0,0 +1,162 @@
{
config,
service_configs,
pkgs,
lib,
inputs,
...
}:
let
theme = pkgs.fetchFromGitHub {
owner = "kaiiiz";
repo = "hugo-theme-monochrome";
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
};
hugo-neko = pkgs.fetchFromGitHub {
owner = "ystepanoff";
repo = "hugo-neko";
rev = "5a50034acbb1ae0cec19775af64e7167ca22725e";
sha256 = "VLwr4zEeFQU/b+vj0XTLSuEiosuNFu2du4uud7m8bnw=";
};
hugoWebsite = pkgs.stdenv.mkDerivation {
pname = "hugo-site";
version = "0.1";
src = inputs.website;
nativeBuildInputs = with pkgs; [
hugo
go
git
];
installPhase = ''
rm -fr themes/theme modules/hugo-neko
cp -r ${theme} themes/theme
cp -r ${hugo-neko} modules/hugo-neko
hugo --minify -d $out;
'';
};
newDomain = service_configs.https.domain;
oldDomain = service_configs.https.old_domain;
in
{
imports = [
(lib.serviceMountWithZpool "caddy" service_configs.zpool_ssds [
config.services.caddy.dataDir
])
];
services.caddy = {
enable = true;
email = "titaniumtown@proton.me";
# Build with Njalla DNS provider for DNS-01 ACME challenges (wildcard certs)
package = pkgs.caddy.withPlugins {
plugins = [ "github.com/caddy-dns/njalla@v0.0.0-20250823094507-f709141f1fe6" ];
hash = "sha256-rrOAR6noTDpV/I/hZXxhz0OXVJKu0mFQRq87RUrpmzw=";
};
globalConfig = ''
# Wildcard cert for *.${newDomain} via DNS-01 challenge
acme_dns njalla {
api_token {env.NJALLA_API_TOKEN}
}
# On-demand TLS for old domain redirects
on_demand_tls {
ask http://localhost:9123/check
}
'';
# Internal endpoint to validate on-demand TLS requests
# Only allows certs for *.${oldDomain}
extraConfig = ''
http://localhost:9123 {
@allowed expression {query.domain}.endsWith(".${oldDomain}") || {query.domain} == "${oldDomain}" || {query.domain} == "www.${oldDomain}"
respond @allowed 200
respond 403
}
'';
virtualHosts = {
${newDomain} = {
extraConfig = ''
root * ${hugoWebsite}
file_server browse
'';
serverAliases = [ "www.${newDomain}" ];
};
# Redirect old domain (bare + www) to new domain
${oldDomain} = {
extraConfig = ''
redir https://${newDomain}{uri} permanent
'';
serverAliases = [ "www.${oldDomain}" ];
};
# Wildcard redirect for all old domain subdomains
# Uses on-demand TLS - certs issued automatically on first request
"*.${oldDomain}" = {
extraConfig = ''
tls {
on_demand
}
# {labels.2} extracts subdomain from *.gardling.com
redir https://{labels.2}.${newDomain}{uri} permanent
'';
};
};
};
# Inject Njalla API token for DNS-01 challenge
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.njalla-api-token-env.path;
systemd.tmpfiles.rules = [
"d ${config.services.caddy.dataDir} 700 ${config.services.caddy.user} ${config.services.caddy.group}"
];
systemd.packages = with pkgs; [ nssTools ];
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.https.port
# http (but really acmeCA challenges)
service_configs.ports.public.http.port
];
networking.firewall.allowedUDPPorts = [
service_configs.ports.public.https.port
];
# Protect Caddy basic auth endpoints from brute force attacks
services.fail2ban.jails.caddy-auth = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "/var/log/caddy/access-*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
# Ignore local network IPs - NAT hairpinning causes all LAN traffic to
# appear from the router IP (192.168.1.1). Banning it blocks all internal access.
ignoreip = "127.0.0.1/8 ::1 192.168.1.0/24";
};
filter.Definition = {
# Only match 401s where an Authorization header was actually sent.
# Without this, the normal HTTP Basic Auth challenge-response flow
# (browser probes without credentials, gets 401, then resends with
# credentials) counts every page visit as a "failure."
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
ignoreregex = "";
datepattern = ''"ts":{Epoch}\.'';
};
};
}

View File

@@ -0,0 +1,39 @@
{
config,
lib,
pkgs,
service_configs,
inputs,
...
}:
let
theme = pkgs.fetchFromGitHub {
owner = "kaiiiz";
repo = "hugo-theme-monochrome";
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
};
hugoWebsite = pkgs.stdenv.mkDerivation {
pname = "hugo-site";
version = "0.1";
src = inputs.senior_project-website;
nativeBuildInputs = with pkgs; [
hugo
];
installPhase = ''
rm -fr themes/theme
cp -rv ${theme} themes/theme
hugo --minify -d $out;
'';
};
in
{
services.caddy.virtualHosts."senior-project.${service_configs.https.domain}".extraConfig = ''
root * ${hugoWebsite}
file_server browse
'';
}

View File

@@ -0,0 +1,7 @@
{
imports = [
./caddy.nix
# KEEP UNTIL 2028
./caddy_senior_project.nix
];
}

27
services/ddns-updater.nix Normal file
View File

@@ -0,0 +1,27 @@
{
config,
lib,
...
}:
{
services.ddns-updater = {
enable = true;
environment = {
PERIOD = "5m";
# ddns-updater reads config from this path at runtime
CONFIG_FILEPATH = config.age.secrets.ddns-updater-config.path;
};
};
users.users.ddns-updater = {
isSystemUser = true;
group = "ddns-updater";
};
users.groups.ddns-updater = { };
systemd.services.ddns-updater.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "ddns-updater";
Group = "ddns-updater";
};
}

View File

@@ -0,0 +1,43 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.mkCaddyReverseProxy {
domain = service_configs.firefox_syncserver.domain;
port = service_configs.ports.private.firefox_syncserver.port;
})
];
services.firefox-syncserver = {
enable = true;
database = {
type = "postgresql";
createLocally = false;
user = "firefox_syncserver";
};
secrets = config.age.secrets.firefox-syncserver-env.path;
settings.port = service_configs.ports.private.firefox_syncserver.port;
singleNode = {
enable = true;
hostname = service_configs.firefox_syncserver.domain;
url = "https://${service_configs.firefox_syncserver.domain}";
capacity = 1;
};
};
services.postgresql = {
ensureDatabases = [ "firefox_syncserver" ];
ensureUsers = [
{
name = "firefox_syncserver";
ensureDBOwnership = true;
}
];
};
}

View File

@@ -0,0 +1,50 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
services.gitea-actions-runner.instances.muffin = {
enable = true;
name = "muffin";
url = config.services.gitea.settings.server.ROOT_URL;
tokenFile = config.age.secrets.gitea-runner-token.path;
labels = [ "nix:host" ];
hostPackages = with pkgs; [
bash
coreutils
curl
gawk
git
git-crypt
gnugrep
gnused
jq
nix
nodejs
openssh
];
settings = {
runner = {
capacity = 1;
timeout = "6h";
};
};
};
# Override DynamicUser to use our static gitea-runner user, and ensure
# the runner doesn't start before the co-located gitea instance is ready
# (upstream can't assume locality, so this dependency is ours to add).
systemd.services."gitea-runner-muffin" = {
requires = [ "gitea.service" ];
after = [ "gitea.service" ];
serviceConfig = {
DynamicUser = lib.mkForce false;
User = "gitea-runner";
Group = "gitea-runner";
};
environment.GIT_SSH_COMMAND = "ssh -i /run/agenix/ci-deploy-key -o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts";
};
}

65
services/gitea.nix Normal file
View File

@@ -0,0 +1,65 @@
{
pkgs,
lib,
config,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ])
(lib.serviceFilePerms "gitea" [
"Z ${config.services.gitea.stateDir} 0700 ${config.services.gitea.user} ${config.services.gitea.group}"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.gitea.domain;
port = service_configs.ports.private.gitea.port;
})
(lib.mkFail2banJail {
name = "gitea";
failregex = "^.*Failed authentication attempt for .* from <HOST>:.*$";
})
];
services.gitea = {
enable = true;
appName = "Simon Gardling's Gitea instance";
stateDir = service_configs.gitea.dir;
database = {
type = "postgres";
socket = service_configs.postgres.socket;
};
settings = {
server = {
SSH_USER = "gitea";
DOMAIN = service_configs.gitea.domain;
ROOT_URL = "https://" + config.services.gitea.settings.server.DOMAIN;
HTTP_PORT = service_configs.ports.private.gitea.port;
LANDING_PAGE = "/explore/repos";
DISABLE_HTTP_GIT = true;
};
session = {
# https cookies or smth
COOKIE_SECURE = true;
};
# only I shall use gitea
service.DISABLE_REGISTRATION = true;
actions.ENABLED = true;
};
};
services.postgresql = {
ensureDatabases = [ config.services.gitea.user ];
ensureUsers = [
{
name = config.services.gitea.database.user;
ensureDBOwnership = true;
ensureClauses.login = true;
}
];
};
services.openssh.settings.AllowUsers = [ config.services.gitea.user ];
}

View File

@@ -0,0 +1,698 @@
{
...
}:
let
promDs = {
type = "prometheus";
uid = "prometheus";
};
dashboard = {
editable = true;
graphTooltip = 1;
schemaVersion = 39;
tags = [
"system"
"monitoring"
];
time = {
from = "now-6h";
to = "now";
};
timezone = "browser";
title = "System Overview";
uid = "system-overview";
annotations.list = [
{
name = "Jellyfin Streams";
datasource = {
type = "grafana";
uid = "-- Grafana --";
};
enable = true;
iconColor = "green";
showIn = 0;
type = "tags";
tags = [ "jellyfin" ];
}
{
name = "ZFS Scrubs";
datasource = {
type = "grafana";
uid = "-- Grafana --";
};
enable = true;
iconColor = "orange";
showIn = 0;
type = "tags";
tags = [ "zfs-scrub" ];
}
{
name = "LLM Requests";
datasource = promDs;
enable = true;
iconColor = "purple";
target = {
datasource = promDs;
expr = "llamacpp:requests_processing > 0";
instant = false;
range = true;
refId = "A";
};
titleFormat = "LLM inference";
}
];
panels = [
# -- Row 1: UPS --
{
id = 1;
type = "timeseries";
title = "UPS Power Draw";
gridPos = {
h = 8;
w = 8;
x = 0;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts";
legendFormat = "Power (W)";
refId = "A";
}
{
datasource = promDs;
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[5m:])";
legendFormat = "5m average (W)";
refId = "B";
}
];
fieldConfig = {
defaults = {
unit = "watt";
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [
{
matcher = {
id = "byFrameRefID";
options = "A";
};
properties = [
{
id = "custom.lineStyle";
value = {
fill = "dot";
};
}
{
id = "custom.fillOpacity";
value = 10;
}
{
id = "custom.lineWidth";
value = 1;
}
{
id = "custom.pointSize";
value = 1;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "B";
};
properties = [
{
id = "custom.lineWidth";
value = 4;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
];
};
}
{
id = 7;
type = "stat";
title = "Energy Usage (24h)";
gridPos = {
h = 8;
w = 4;
x = 8;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[24h:]) * 24 / 1000";
legendFormat = "";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "kwatth";
decimals = 2;
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 5;
}
{
color = "red";
value = 10;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "none";
};
}
{
id = 2;
type = "gauge";
title = "UPS Load";
gridPos = {
h = 8;
w = 6;
x = 12;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_ups_load_percent";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 70;
}
{
color = "red";
value = 90;
}
];
};
};
overrides = [ ];
};
options.reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
}
{
id = 3;
type = "gauge";
title = "UPS Battery";
gridPos = {
h = 8;
w = 6;
x = 18;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_battery_charge_percent";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
thresholds = {
mode = "absolute";
steps = [
{
color = "red";
value = null;
}
{
color = "yellow";
value = 20;
}
{
color = "green";
value = 50;
}
];
};
};
overrides = [ ];
};
options.reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
}
# -- Row 2: System --
{
id = 4;
type = "timeseries";
title = "CPU Temperature";
gridPos = {
h = 8;
w = 12;
x = 0;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = ''node_hwmon_temp_celsius{chip=~"pci.*"}'';
legendFormat = "CPU {{sensor}}";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "celsius";
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [ ];
};
}
{
id = 5;
type = "stat";
title = "System Uptime";
gridPos = {
h = 8;
w = 6;
x = 12;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "time() - node_boot_time_seconds";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "s";
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "none";
};
}
{
id = 6;
type = "stat";
title = "Jellyfin Active Streams";
gridPos = {
h = 8;
w = 6;
x = 18;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "count(jellyfin_now_playing_state) or vector(0)";
refId = "A";
}
];
fieldConfig = {
defaults = {
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 3;
}
{
color = "red";
value = 6;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "area";
};
}
# -- Row 3: qBittorrent --
{
id = 11;
type = "timeseries";
title = "qBittorrent Speed";
gridPos = {
h = 8;
w = 24;
x = 0;
y = 16;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "sum(qbit_dlspeed) or vector(0)";
legendFormat = "Download";
refId = "A";
}
{
datasource = promDs;
expr = "sum(qbit_upspeed) or vector(0)";
legendFormat = "Upload";
refId = "B";
}
{
datasource = promDs;
expr = "avg_over_time((sum(qbit_dlspeed) or vector(0))[10m:])";
legendFormat = "Download (10m avg)";
refId = "C";
}
{
datasource = promDs;
expr = "avg_over_time((sum(qbit_upspeed) or vector(0))[10m:])";
legendFormat = "Upload (10m avg)";
refId = "D";
}
];
fieldConfig = {
defaults = {
unit = "binBps";
min = 0;
color.mode = "palette-classic";
custom = {
lineWidth = 1;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [
{
matcher = {
id = "byFrameRefID";
options = "A";
};
properties = [
{
id = "color";
value = {
fixedColor = "green";
mode = "fixed";
};
}
{
id = "custom.fillOpacity";
value = 5;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "B";
};
properties = [
{
id = "color";
value = {
fixedColor = "blue";
mode = "fixed";
};
}
{
id = "custom.fillOpacity";
value = 5;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "C";
};
properties = [
{
id = "color";
value = {
fixedColor = "green";
mode = "fixed";
};
}
{
id = "custom.lineWidth";
value = 3;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "D";
};
properties = [
{
id = "color";
value = {
fixedColor = "blue";
mode = "fixed";
};
}
{
id = "custom.lineWidth";
value = 3;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
];
};
}
# -- Row 4: Intel GPU --
{
id = 8;
type = "timeseries";
title = "Intel GPU Utilization";
gridPos = {
h = 8;
w = 24;
x = 0;
y = 24;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "igpu_engines_busy_percent";
legendFormat = "{{engine}}";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [ ];
};
}
# -- Row 5: Storage --
{
id = 12;
type = "timeseries";
title = "ZFS Pool Utilization";
gridPos = {
h = 8;
w = 12;
x = 0;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "zfs_pool_allocated_bytes{pool=\"tank\"} / zfs_pool_size_bytes{pool=\"tank\"} * 100";
legendFormat = "tank";
refId = "A";
}
{
datasource = promDs;
expr = "zfs_pool_allocated_bytes{pool=\"hdds\"} / zfs_pool_size_bytes{pool=\"hdds\"} * 100";
legendFormat = "hdds";
refId = "B";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
{
id = 13;
type = "timeseries";
title = "Boot Drive Partitions";
gridPos = {
h = 8;
w = 12;
x = 12;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/boot\"} - node_filesystem_avail_bytes{mountpoint=\"/boot\"}) / node_filesystem_size_bytes{mountpoint=\"/boot\"} * 100";
legendFormat = "/boot";
refId = "A";
}
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/persistent\"} - node_filesystem_avail_bytes{mountpoint=\"/persistent\"}) / node_filesystem_size_bytes{mountpoint=\"/persistent\"} * 100";
legendFormat = "/persistent";
refId = "B";
}
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/nix\"} - node_filesystem_avail_bytes{mountpoint=\"/nix\"}) / node_filesystem_size_bytes{mountpoint=\"/nix\"} * 100";
legendFormat = "/nix";
refId = "C";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
];
};
in
{
environment.etc."grafana-dashboards/system-overview.json" = {
text = builtins.toJSON dashboard;
mode = "0444";
};
}

View File

@@ -0,0 +1,10 @@
{
imports = [
./grafana.nix
./prometheus.nix
./dashboard.nix
./exporters.nix
./jellyfin-annotations.nix
./zfs-scrub-annotations.nix
];
}

View File

@@ -0,0 +1,112 @@
{
config,
pkgs,
inputs,
service_configs,
lib,
...
}:
let
jellyfinExporterPort = service_configs.ports.private.jellyfin_exporter.port;
qbitExporterPort = service_configs.ports.private.qbittorrent_exporter.port;
igpuExporterPort = service_configs.ports.private.igpu_exporter.port;
in
{
# -- Jellyfin Prometheus Exporter --
# Replaces custom jellyfin-collector.nix textfile timer.
# Exposes per-session metrics (jellyfin_now_playing_state) and library stats.
systemd.services.jellyfin-exporter =
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable)
{
description = "Prometheus exporter for Jellyfin";
after = [
"network.target"
"jellyfin.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = lib.getExe (
pkgs.writeShellApplication {
name = "jellyfin-exporter-wrapper";
runtimeInputs = [ pkgs.jellyfin-exporter ];
text = ''
exec jellyfin_exporter \
--jellyfin.address=http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port} \
--jellyfin.token="$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")" \
--web.listen-address=127.0.0.1:${toString jellyfinExporterPort}
'';
}
);
Restart = "on-failure";
RestartSec = "10s";
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
MemoryDenyWriteExecute = true;
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
};
};
# -- qBittorrent Prometheus Exporter --
# Replaces custom qbittorrent-collector.nix textfile timer.
# Exposes per-torrent metrics (qbit_dlspeed, qbit_upspeed) and aggregate stats.
# qBittorrent runs in a VPN namespace; the exporter reaches it via namespace address.
systemd.services.qbittorrent-exporter =
lib.mkIf (config.services.grafana.enable && config.services.qbittorrent.enable)
{
description = "Prometheus exporter for qBittorrent";
after = [
"network.target"
"qbittorrent.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart =
lib.getExe' inputs.qbittorrent-metrics-exporter.packages.${pkgs.system}.default
"qbittorrent-metrics-exporter";
Restart = "on-failure";
RestartSec = "10s";
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
};
environment = {
HOST = "127.0.0.1";
PORT = toString qbitExporterPort;
SCRAPE_INTERVAL = "15";
BACKEND = "in-memory";
# qBittorrent has AuthSubnetWhitelist=0.0.0.0/0, so no real password needed.
# The exporter still expects the env var to be set.
QBITTORRENT_PASSWORD = "unused";
QBITTORRENT_USERNAME = "admin";
TORRENT_HOSTS = "qbit:main=http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}|http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}";
RUST_LOG = "warn";
};
};
# -- Intel GPU Prometheus Exporter --
# Replaces custom intel-gpu-collector.nix + intel-gpu-collector.py textfile timer.
# Exposes engine busy%, frequency, and RC6 metrics via /metrics.
# Requires privileged access to GPU debug interfaces (intel_gpu_top).
systemd.services.igpu-exporter = lib.mkIf config.services.grafana.enable {
description = "Prometheus exporter for Intel integrated GPU";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.intel-gpu-tools ];
serviceConfig = {
ExecStart = lib.getExe pkgs.igpu-exporter;
Restart = "on-failure";
RestartSec = "10s";
# intel_gpu_top requires root-level access to GPU debug interfaces
ProtectHome = true;
PrivateTmp = true;
};
environment = {
PORT = toString igpuExporterPort;
REFRESH_PERIOD_MS = "30000";
};
};
}

View File

@@ -0,0 +1,103 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "grafana" service_configs.zpool_ssds [
service_configs.grafana.dir
])
(lib.serviceFilePerms "grafana" [
"Z ${service_configs.grafana.dir} 0700 grafana grafana"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.grafana.domain;
port = service_configs.ports.private.grafana.port;
auth = true;
})
];
services.grafana = {
enable = true;
dataDir = service_configs.grafana.dir;
settings = {
server = {
http_addr = "127.0.0.1";
http_port = service_configs.ports.private.grafana.port;
domain = service_configs.grafana.domain;
root_url = "https://${service_configs.grafana.domain}";
};
database = {
type = "postgres";
host = service_configs.postgres.socket;
user = "grafana";
};
"auth.anonymous" = {
enabled = true;
org_role = "Admin";
};
"auth.basic".enabled = false;
"auth".disable_login_form = true;
analytics.reporting_enabled = false;
feature_toggles.enable = "dataConnectionsConsole=false";
users.default_theme = "dark";
# Disable unused built-in integrations
alerting.enabled = false;
"unified_alerting".enabled = false;
explore.enabled = false;
news.news_feed_enabled = false;
plugins = {
enable_alpha = false;
plugin_admin_enabled = false;
};
};
provision = {
datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://127.0.0.1:${toString service_configs.ports.private.prometheus.port}";
access = "proxy";
isDefault = true;
editable = false;
uid = "prometheus";
}
];
};
dashboards.settings.providers = [
{
name = "system";
type = "file";
options.path = "/etc/grafana-dashboards";
disableDeletion = true;
updateIntervalSeconds = 60;
}
];
};
};
services.postgresql = {
ensureDatabases = [ "grafana" ];
ensureUsers = [
{
name = "grafana";
ensureDBOwnership = true;
ensureClauses.login = true;
}
];
};
}

View File

@@ -0,0 +1,18 @@
{
config,
service_configs,
lib,
...
}:
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable) (
lib.mkGrafanaAnnotationService {
name = "jellyfin";
description = "Jellyfin stream annotation service for Grafana";
script = ./jellyfin-annotations.py;
environment = {
JELLYFIN_URL = "http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port}";
POLL_INTERVAL = "30";
};
loadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
}
)

View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python3
import json
import os
import sys
import time
import urllib.request
from pathlib import Path
JELLYFIN_URL = os.environ.get("JELLYFIN_URL", "http://127.0.0.1:8096")
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/jellyfin-annotations/state.json")
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "30"))
def get_api_key():
cred_dir = os.environ.get("CREDENTIALS_DIRECTORY")
if cred_dir:
return Path(cred_dir, "jellyfin-api-key").read_text().strip()
for p in ["/run/agenix/jellyfin-api-key"]:
if Path(p).exists():
return Path(p).read_text().strip()
sys.exit("ERROR: Cannot find jellyfin-api-key")
def http_json(method, url, body=None):
data = json.dumps(body).encode() if body is not None else None
req = urllib.request.Request(
url,
data=data,
headers={"Content-Type": "application/json", "Accept": "application/json"},
method=method,
)
with urllib.request.urlopen(req, timeout=5) as resp:
return json.loads(resp.read())
def get_active_sessions(api_key):
try:
req = urllib.request.Request(
f"{JELLYFIN_URL}/Sessions?api_key={api_key}",
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=5) as resp:
sessions = json.loads(resp.read())
return [s for s in sessions if s.get("NowPlayingItem")]
except Exception as e:
print(f"Error fetching sessions: {e}", file=sys.stderr)
return None
def _codec(name):
if not name:
return ""
aliases = {"h264": "H.264", "h265": "H.265", "hevc": "H.265", "av1": "AV1",
"vp9": "VP9", "vp8": "VP8", "mpeg4": "MPEG-4", "mpeg2video": "MPEG-2",
"aac": "AAC", "ac3": "AC3", "eac3": "EAC3", "dts": "DTS",
"truehd": "TrueHD", "mp3": "MP3", "opus": "Opus", "flac": "FLAC",
"vorbis": "Vorbis"}
return aliases.get(name.lower(), name.upper())
def _res(width, height):
if not height:
return ""
common = {2160: "4K", 1440: "1440p", 1080: "1080p", 720: "720p",
480: "480p", 360: "360p"}
return common.get(height, f"{height}p")
def _channels(n):
labels = {1: "Mono", 2: "Stereo", 6: "5.1", 7: "6.1", 8: "7.1"}
return labels.get(n, f"{n}ch") if n else ""
def format_label(session):
user = session.get("UserName", "Unknown")
item = session.get("NowPlayingItem", {}) or {}
transcode = session.get("TranscodingInfo") or {}
play_state = session.get("PlayState") or {}
client = session.get("Client", "")
device = session.get("DeviceName", "")
name = item.get("Name", "Unknown")
series = item.get("SeriesName", "")
season = item.get("ParentIndexNumber")
episode = item.get("IndexNumber")
media_type = item.get("Type", "")
if series and season and episode:
title = f"{series} S{season:02d}E{episode:02d} \u2013 {name}"
elif series:
title = f"{series} \u2013 {name}"
elif media_type == "Movie":
title = f"{name} (movie)"
else:
title = name
play_method = play_state.get("PlayMethod", "")
if play_method == "DirectPlay":
method = "Direct Play"
elif play_method == "DirectStream":
method = "Direct Stream"
elif play_method == "Transcode" or transcode:
method = "Transcode"
else:
method = "Direct Play"
media_streams = item.get("MediaStreams") or []
video_streams = [s for s in media_streams if s.get("Type") == "Video"]
audio_streams = [s for s in media_streams if s.get("Type") == "Audio"]
default_audio = next((s for s in audio_streams if s.get("IsDefault")), None)
audio_stream = default_audio or (audio_streams[0] if audio_streams else {})
video_stream = video_streams[0] if video_streams else {}
src_vcodec = _codec(video_stream.get("Codec", ""))
src_res = _res(video_stream.get("Width") or item.get("Width"),
video_stream.get("Height") or item.get("Height"))
src_acodec = _codec(audio_stream.get("Codec", ""))
src_channels = _channels(audio_stream.get("Channels"))
is_video_direct = transcode.get("IsVideoDirect", True)
is_audio_direct = transcode.get("IsAudioDirect", True)
if transcode and not is_video_direct:
dst_vcodec = _codec(transcode.get("VideoCodec", ""))
dst_res = _res(transcode.get("Width"), transcode.get("Height")) or src_res
if src_vcodec and dst_vcodec and src_vcodec != dst_vcodec:
video_part = f"{src_vcodec}\u2192{dst_vcodec} {dst_res}".strip()
else:
video_part = f"{dst_vcodec or src_vcodec} {dst_res}".strip()
else:
video_part = f"{src_vcodec} {src_res}".strip()
if transcode and not is_audio_direct:
dst_acodec = _codec(transcode.get("AudioCodec", ""))
dst_channels = _channels(transcode.get("AudioChannels")) or src_channels
if src_acodec and dst_acodec and src_acodec != dst_acodec:
audio_part = f"{src_acodec}\u2192{dst_acodec} {dst_channels}".strip()
else:
audio_part = f"{dst_acodec or src_acodec} {dst_channels}".strip()
else:
audio_part = f"{src_acodec} {src_channels}".strip()
bitrate = transcode.get("Bitrate") or item.get("Bitrate")
bitrate_part = f"{bitrate / 1_000_000:.1f} Mbps" if bitrate else ""
reasons = transcode.get("TranscodeReasons") or []
reason_part = f"[{', '.join(reasons)}]" if reasons else ""
stream_parts = [p for p in [method, video_part, audio_part, bitrate_part, reason_part] if p]
client_str = " \u00b7 ".join(filter(None, [client, device]))
lines = [f"{user}: {title}", " | ".join(stream_parts)]
if client_str:
lines.append(client_str)
return "\n".join(lines)
def load_state():
try:
with open(STATE_FILE) as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return {}
def save_state(state):
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
tmp = STATE_FILE + ".tmp"
with open(tmp, "w") as f:
json.dump(state, f)
os.replace(tmp, STATE_FILE)
def grafana_post(label, start_ms):
try:
result = http_json(
"POST",
f"{GRAFANA_URL}/api/annotations",
{"time": start_ms, "text": label, "tags": ["jellyfin"]},
)
return result.get("id")
except Exception as e:
print(f"Error posting annotation: {e}", file=sys.stderr)
return None
def grafana_close(grafana_id, end_ms):
try:
http_json(
"PATCH",
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
{"timeEnd": end_ms},
)
except Exception as e:
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
def main():
api_key = get_api_key()
state = load_state()
while True:
now_ms = int(time.time() * 1000)
sessions = get_active_sessions(api_key)
if sessions is not None:
current_ids = {s["Id"] for s in sessions}
for s in sessions:
sid = s["Id"]
if sid not in state:
label = format_label(s)
grafana_id = grafana_post(label, now_ms)
if grafana_id is not None:
state[sid] = {
"grafana_id": grafana_id,
"label": label,
"start_ms": now_ms,
}
save_state(state)
for sid in [k for k in state if k not in current_ids]:
info = state.pop(sid)
grafana_close(info["grafana_id"], now_ms)
save_state(state)
time.sleep(POLL_INTERVAL)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,110 @@
{
service_configs,
lib,
...
}:
let
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
in
{
imports = [
(lib.serviceMountWithZpool "prometheus" service_configs.zpool_ssds [
"/var/lib/prometheus"
])
(lib.serviceFilePerms "prometheus" [
"Z /var/lib/prometheus 0700 prometheus prometheus"
])
];
services.prometheus = {
enable = true;
port = service_configs.ports.private.prometheus.port;
listenAddress = "127.0.0.1";
stateDir = "prometheus";
retentionTime = "0d"; # 0 disables time-based retention (keep forever)
exporters = {
node = {
enable = true;
port = service_configs.ports.private.prometheus_node.port;
listenAddress = "127.0.0.1";
enabledCollectors = [
"hwmon"
"systemd"
"textfile"
];
extraFlags = [
"--collector.textfile.directory=${textfileDir}"
];
};
apcupsd = {
enable = true;
port = service_configs.ports.private.prometheus_apcupsd.port;
listenAddress = "127.0.0.1";
apcupsdAddress = "127.0.0.1:3551";
};
zfs = {
enable = true;
port = service_configs.ports.private.prometheus_zfs.port;
listenAddress = "127.0.0.1";
};
};
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus.port}" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_node.port}" ]; }
];
}
{
job_name = "apcupsd";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_apcupsd.port}" ]; }
];
}
{
job_name = "llama-cpp";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}" ]; }
];
}
{
job_name = "jellyfin";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.jellyfin_exporter.port}" ]; }
];
}
{
job_name = "qbittorrent";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.qbittorrent_exporter.port}" ]; }
];
}
{
job_name = "igpu";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.igpu_exporter.port}" ]; }
];
}
{
job_name = "zfs";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_zfs.port}" ]; }
];
}
];
};
systemd.tmpfiles.rules = [
"d ${textfileDir} 0755 root root -"
];
}

View File

@@ -0,0 +1,36 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
let
grafanaUrl = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
script = pkgs.writeShellApplication {
name = "zfs-scrub-annotations";
runtimeInputs = with pkgs; [
curl
jq
coreutils
gnugrep
gnused
config.boot.zfs.package
];
text = builtins.readFile ./zfs-scrub-annotations.sh;
};
in
lib.mkIf (config.services.grafana.enable && config.services.zfs.autoScrub.enable) {
systemd.services.zfs-scrub = {
environment = {
GRAFANA_URL = grafanaUrl;
STATE_DIR = "/run/zfs-scrub-annotations";
};
serviceConfig = {
RuntimeDirectory = "zfs-scrub-annotations";
ExecStartPre = [ "-${lib.getExe script} start" ];
ExecStopPost = [ "${lib.getExe script} stop" ];
};
};
}

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# ZFS scrub annotation script for Grafana
# Usage: zfs-scrub-annotations.sh {start|stop}
# Required env: GRAFANA_URL, STATE_DIR
# Required on PATH: zpool, curl, jq, paste, date, grep, sed
set -euo pipefail
ACTION="${1:-}"
GRAFANA_URL="${GRAFANA_URL:?GRAFANA_URL required}"
STATE_DIR="${STATE_DIR:?STATE_DIR required}"
case "$ACTION" in
start)
POOLS=$(zpool list -H -o name | paste -sd ', ')
NOW_MS=$(date +%s%3N)
RESPONSE=$(curl -sf --max-time 5 \
-X POST "$GRAFANA_URL/api/annotations" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg text "ZFS scrub: $POOLS" --argjson time "$NOW_MS" \
'{time: $time, text: $text, tags: ["zfs-scrub"]}')" \
) || exit 0
echo "$RESPONSE" | jq -r '.id' > "$STATE_DIR/annotation-id"
;;
stop)
ANN_ID=$(cat "$STATE_DIR/annotation-id" 2>/dev/null) || exit 0
[ -z "$ANN_ID" ] && exit 0
NOW_MS=$(date +%s%3N)
RESULTS=""
while IFS= read -r pool; do
scan_line=$(zpool status "$pool" | grep "scan:" | sed 's/^[[:space:]]*//')
RESULTS="${RESULTS}${pool}: ${scan_line}"$'\n'
done < <(zpool list -H -o name)
TEXT=$(printf "ZFS scrub completed\n%s" "$RESULTS")
curl -sf --max-time 5 \
-X PATCH "$GRAFANA_URL/api/annotations/$ANN_ID" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg text "$TEXT" --argjson timeEnd "$NOW_MS" \
'{timeEnd: $timeEnd, text: $text}')" || true
rm -f "$STATE_DIR/annotation-id"
;;
*)
echo "Usage: $0 {start|stop}" >&2
exit 1
;;
esac

View File

@@ -0,0 +1,16 @@
{
service_configs,
inputs,
pkgs,
...
}:
let
graphing-calculator =
inputs.ytbn-graphing-software.packages.${pkgs.stdenv.targetPlatform.system}.web;
in
{
services.caddy.virtualHosts."graphing.${service_configs.https.domain}".extraConfig = ''
root * ${graphing-calculator}
file_server browse
'';
}

38
services/harmonia.nix Normal file
View File

@@ -0,0 +1,38 @@
{
config,
lib,
service_configs,
...
}:
{
imports = [
(lib.serviceFilePerms "harmonia" [
"Z /run/agenix/harmonia-sign-key 0400 harmonia harmonia"
])
];
services.harmonia = {
enable = true;
signKeyPaths = [ config.age.secrets.harmonia-sign-key.path ];
settings.bind = "127.0.0.1:${toString service_configs.ports.private.harmonia.port}";
};
# serve latest deploy store paths (unauthenticated — just a path string)
# CI writes to /var/lib/dotfiles-deploy/<hostname> after building
services.caddy.virtualHosts."nix-cache.${service_configs.https.domain}".extraConfig = ''
handle_path /deploy/* {
root * /var/lib/dotfiles-deploy
file_server
}
handle {
import ${config.age.secrets.nix-cache-auth.path}
reverse_proxy :${toString service_configs.ports.private.harmonia.port}
}
'';
# directory for CI to record latest deploy store paths
systemd.tmpfiles.rules = [
"d /var/lib/dotfiles-deploy 0755 gitea-runner gitea-runner"
];
}

50
services/immich.nix Normal file
View File

@@ -0,0 +1,50 @@
{
service_configs,
pkgs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "immich-server" service_configs.zpool_ssds [
config.services.immich.mediaLocation
])
(lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [
config.services.immich.mediaLocation
])
(lib.serviceFilePerms "immich-server" [
"Z ${config.services.immich.mediaLocation} 0770 ${config.services.immich.user} ${config.services.immich.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "immich";
port = service_configs.ports.private.immich.port;
})
(lib.mkFail2banJail {
name = "immich";
unitName = "immich-server.service";
failregex = "^.*Failed login attempt for user .* from ip address <HOST>.*$";
})
];
services.immich = {
enable = true;
mediaLocation = service_configs.immich.dir;
port = service_configs.ports.private.immich.port;
# openFirewall = true;
host = "0.0.0.0";
database = {
createDB = false;
};
};
environment.systemPackages = with pkgs; [
immich-go
];
users.users.${config.services.immich.user}.extraGroups = [
"video"
"render"
];
}

View File

@@ -0,0 +1,6 @@
{
imports = [
./jellyfin.nix
./jellyfin-qbittorrent-monitor.nix
];
}

View File

@@ -0,0 +1,127 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
let
webhookPlugin = import ./jellyfin-webhook-plugin.nix { inherit pkgs lib; };
jellyfinPort = service_configs.ports.private.jellyfin.port;
webhookPort = service_configs.ports.private.jellyfin_qbittorrent_monitor_webhook.port;
in
lib.mkIf config.services.jellyfin.enable {
# Materialise the Jellyfin Webhook plugin into Jellyfin's plugins dir before
# Jellyfin starts. Jellyfin rewrites meta.json at runtime, so a read-only
# nix-store symlink would EACCES -- we copy instead.
#
# `wantedBy = [ "jellyfin.service" ]` alone is insufficient on initial rollout:
# if jellyfin is already running at activation time, systemd won't start the
# oneshot until the next jellyfin restart. `restartTriggers` on jellyfin pinned
# to the plugin package + install script forces that restart whenever either
# changes, which invokes this unit via the `before`/`wantedBy` chain.
systemd.services.jellyfin-webhook-install = {
before = [ "jellyfin.service" ];
wantedBy = [ "jellyfin.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
User = config.services.jellyfin.user;
Group = config.services.jellyfin.group;
ExecStart = webhookPlugin.mkInstallScript {
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
};
};
};
systemd.services.jellyfin.restartTriggers = [
webhookPlugin.package
(webhookPlugin.mkInstallScript {
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
})
];
# After Jellyfin starts, POST the plugin configuration so the webhook
# targets the monitor's receiver. Idempotent; runs on every boot.
systemd.services.jellyfin-webhook-configure = {
after = [ "jellyfin.service" ];
wants = [ "jellyfin.service" ];
before = [ "jellyfin-qbittorrent-monitor.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
DynamicUser = true;
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
ExecStart = webhookPlugin.mkConfigureScript {
jellyfinUrl = "http://127.0.0.1:${toString jellyfinPort}";
webhooks = [
{
name = "qBittorrent Monitor";
uri = "http://127.0.0.1:${toString webhookPort}/";
notificationTypes = [
"PlaybackStart"
"PlaybackProgress"
"PlaybackStop"
];
}
];
};
};
};
systemd.services."jellyfin-qbittorrent-monitor" = {
description = "Monitor Jellyfin streaming and control qBittorrent rate limits";
after = [
"network.target"
"jellyfin.service"
"qbittorrent.service"
"jellyfin-webhook-configure.service"
];
wants = [ "jellyfin-webhook-configure.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "jellyfin-monitor-start" ''
export JELLYFIN_API_KEY=$(cat $CREDENTIALS_DIRECTORY/jellyfin-api-key)
exec ${
pkgs.python3.withPackages (ps: with ps; [ requests ])
}/bin/python ${./jellyfin-qbittorrent-monitor.py}
'';
Restart = "always";
RestartSec = "10s";
# Security hardening
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
# Load credentials from agenix secrets
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
};
environment = {
JELLYFIN_URL = "http://localhost:${builtins.toString jellyfinPort}";
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
CHECK_INTERVAL = "30";
# Bandwidth budget configuration
TOTAL_BANDWIDTH_BUDGET = "30000000"; # 30 Mbps in bits per second
SERVICE_BUFFER = "5000000"; # 5 Mbps reserved for other services (bps)
DEFAULT_STREAM_BITRATE = "10000000"; # 10 Mbps fallback when bitrate unknown (bps)
MIN_TORRENT_SPEED = "100"; # KB/s - below this, pause torrents instead
STREAM_BITRATE_HEADROOM = "1.1"; # multiplier per stream for bitrate fluctuations
# Webhook receiver: Jellyfin Webhook plugin POSTs events here to throttle immediately.
WEBHOOK_BIND = "127.0.0.1";
WEBHOOK_PORT = toString webhookPort;
};
};
}

View File

@@ -0,0 +1,504 @@
#!/usr/bin/env python3
import requests
import time
import logging
import sys
import signal
import json
import ipaddress
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
class ServiceUnavailable(Exception):
"""Raised when a monitored service is temporarily unavailable."""
pass
class JellyfinQBittorrentMonitor:
def __init__(
self,
jellyfin_url="http://localhost:8096",
qbittorrent_url="http://localhost:8080",
check_interval=30,
jellyfin_api_key=None,
streaming_start_delay=10,
streaming_stop_delay=60,
total_bandwidth_budget=30000000,
service_buffer=5000000,
default_stream_bitrate=10000000,
min_torrent_speed=100,
stream_bitrate_headroom=1.1,
webhook_port=0,
webhook_bind="127.0.0.1",
):
self.jellyfin_url = jellyfin_url
self.qbittorrent_url = qbittorrent_url
self.check_interval = check_interval
self.jellyfin_api_key = jellyfin_api_key
self.total_bandwidth_budget = total_bandwidth_budget
self.service_buffer = service_buffer
self.default_stream_bitrate = default_stream_bitrate
self.min_torrent_speed = min_torrent_speed
self.stream_bitrate_headroom = stream_bitrate_headroom
self.last_streaming_state = None
self.current_state = "unlimited"
self.torrents_paused = False
self.last_alt_limits = None
self.running = True
self.session = requests.Session() # Use session for cookies
self.last_active_streams = []
# Hysteresis settings to prevent rapid switching
self.streaming_start_delay = streaming_start_delay
self.streaming_stop_delay = streaming_stop_delay
self.last_state_change = 0
# Webhook receiver: allows Jellyfin to push events instead of waiting for the poll
self.webhook_port = webhook_port
self.webhook_bind = webhook_bind
self.wake_event = threading.Event()
self.webhook_server = None
# Local network ranges (RFC 1918 private networks + localhost)
self.local_networks = [
ipaddress.ip_network("10.0.0.0/8"),
ipaddress.ip_network("172.16.0.0/12"),
ipaddress.ip_network("192.168.0.0/16"),
ipaddress.ip_network("127.0.0.0/8"),
ipaddress.ip_network("::1/128"), # IPv6 localhost
ipaddress.ip_network("fe80::/10"), # IPv6 link-local
]
def is_local_ip(self, ip_address: str) -> bool:
"""Check if an IP address is from a local network"""
try:
ip = ipaddress.ip_address(ip_address)
return any(ip in network for network in self.local_networks)
except ValueError:
logger.warning(f"Invalid IP address format: {ip_address}")
return True # Treat invalid IPs as local for safety
def signal_handler(self, signum, frame):
logger.info("Received shutdown signal, cleaning up...")
self.running = False
if self.webhook_server is not None:
# shutdown() blocks until serve_forever returns; run from a thread so we don't deadlock
threading.Thread(target=self.webhook_server.shutdown, daemon=True).start()
self.restore_normal_limits()
sys.exit(0)
def wake(self) -> None:
"""Signal the main loop to re-evaluate state immediately."""
self.wake_event.set()
def sleep_or_wake(self, seconds: float) -> None:
"""Wait up to `seconds`, returning early if a webhook wakes the loop."""
self.wake_event.wait(seconds)
self.wake_event.clear()
def start_webhook_server(self) -> None:
"""Start a background HTTP server that wakes the monitor on any POST."""
if not self.webhook_port:
return
monitor = self
class WebhookHandler(BaseHTTPRequestHandler):
def do_POST(self): # noqa: N802
length = int(self.headers.get("Content-Length", "0") or "0")
body = self.rfile.read(min(length, 65536)) if length else b""
event = "unknown"
try:
if body:
event = json.loads(body).get("NotificationType", "unknown")
except (json.JSONDecodeError, ValueError):
pass
logger.info(f"Webhook received: {event}")
self.send_response(204)
self.end_headers()
monitor.wake()
def log_message(self, format, *args):
return # suppress default access log
self.webhook_server = HTTPServer(
(self.webhook_bind, self.webhook_port), WebhookHandler
)
threading.Thread(
target=self.webhook_server.serve_forever, daemon=True, name="webhook-server"
).start()
logger.info(
f"Webhook receiver listening on http://{self.webhook_bind}:{self.webhook_port}"
)
def check_jellyfin_sessions(self) -> list[dict]:
headers = (
{"X-Emby-Token": self.jellyfin_api_key} if self.jellyfin_api_key else {}
)
try:
response = requests.get(
f"{self.jellyfin_url}/Sessions", headers=headers, timeout=10
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to check Jellyfin sessions: {e}")
raise ServiceUnavailable(f"Jellyfin unavailable: {e}") from e
try:
sessions = response.json()
except json.JSONDecodeError as e:
logger.error(f"Failed to parse Jellyfin response: {e}")
raise ServiceUnavailable(f"Jellyfin returned invalid JSON: {e}") from e
active_streams = []
for session in sessions:
if (
"NowPlayingItem" in session
and not session.get("PlayState", {}).get("IsPaused", True)
and not self.is_local_ip(session.get("RemoteEndPoint", ""))
):
item = session["NowPlayingItem"]
item_type = item.get("Type", "").lower()
if item_type in ["movie", "episode", "video"]:
user = session.get("UserName", "Unknown")
stream_name = f"{user}: {item.get('Name', 'Unknown')}"
if session.get("TranscodingInfo") and session[
"TranscodingInfo"
].get("Bitrate"):
bitrate = session["TranscodingInfo"]["Bitrate"]
elif item.get("Bitrate"):
bitrate = item["Bitrate"]
elif item.get("MediaSources", [{}])[0].get("Bitrate"):
bitrate = item["MediaSources"][0]["Bitrate"]
else:
bitrate = self.default_stream_bitrate
bitrate = min(int(bitrate), 100_000_000)
# Add headroom to account for bitrate fluctuations
bitrate = int(bitrate * self.stream_bitrate_headroom)
active_streams.append({"name": stream_name, "bitrate_bps": bitrate})
return active_streams
def check_qbittorrent_alternate_limits(self) -> bool:
try:
response = self.session.get(
f"{self.qbittorrent_url}/api/v2/transfer/speedLimitsMode", timeout=10
)
if response.status_code == 200:
return response.text.strip() == "1"
else:
logger.warning(
f"SpeedLimitsMode endpoint returned HTTP {response.status_code}"
)
raise ServiceUnavailable(
f"qBittorrent returned HTTP {response.status_code}"
)
except requests.exceptions.RequestException as e:
logger.error(f"SpeedLimitsMode endpoint failed: {e}")
raise ServiceUnavailable(f"qBittorrent unavailable: {e}") from e
def use_alt_limits(self, enable: bool) -> None:
action = "enabled" if enable else "disabled"
try:
current_throttle = self.check_qbittorrent_alternate_limits()
if current_throttle == enable:
logger.debug(
f"Alternate speed limits already {action}, no action needed"
)
return
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/transfer/toggleSpeedLimitsMode",
timeout=10,
)
response.raise_for_status()
new_state = self.check_qbittorrent_alternate_limits()
if new_state == enable:
logger.info(f"Alternate speed limits {action}")
else:
logger.warning(
f"Toggle may have failed: expected {enable}, got {new_state}"
)
except ServiceUnavailable:
logger.warning(
f"qBittorrent unavailable, cannot {action} alternate speed limits"
)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to {action} alternate speed limits: {e}")
def pause_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/stop",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to pause torrents: {e}")
def resume_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/start",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to resume torrents: {e}")
def set_alt_speed_limits(self, dl_kbs: float, ul_kbs: float) -> None:
try:
payload = {
"alt_dl_limit": int(dl_kbs * 1024),
"alt_up_limit": int(ul_kbs * 1024),
}
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/app/setPreferences",
data={"json": json.dumps(payload)},
timeout=10,
)
response.raise_for_status()
self.last_alt_limits = (dl_kbs, ul_kbs)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to set alternate speed limits: {e}")
def restore_normal_limits(self) -> None:
if self.torrents_paused:
logger.info("Resuming all torrents before shutdown...")
self.resume_all_torrents()
self.torrents_paused = False
if self.current_state != "unlimited":
logger.info("Restoring normal speed limits before shutdown...")
self.use_alt_limits(False)
self.current_state = "unlimited"
def sync_qbittorrent_state(self) -> None:
try:
if self.current_state == "unlimited":
actual_state = self.check_qbittorrent_alternate_limits()
if actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed OFF, got ON. Re-syncing..."
)
self.use_alt_limits(False)
elif self.current_state == "throttled":
if self.last_alt_limits:
self.set_alt_speed_limits(*self.last_alt_limits)
actual_state = self.check_qbittorrent_alternate_limits()
if not actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed ON, got OFF. Re-syncing..."
)
self.use_alt_limits(True)
elif self.current_state == "paused":
self.pause_all_torrents()
self.torrents_paused = True
except ServiceUnavailable:
pass
def should_change_state(self, new_streaming_state: bool) -> bool:
"""Apply hysteresis to prevent rapid state changes"""
now = time.time()
if new_streaming_state == self.last_streaming_state:
return False
time_since_change = now - self.last_state_change
if new_streaming_state and not self.last_streaming_state:
if time_since_change >= self.streaming_start_delay:
self.last_state_change = now
return True
else:
remaining = self.streaming_start_delay - time_since_change
logger.info(
f"Streaming started - waiting {remaining:.1f}s before enforcing limits"
)
elif not new_streaming_state and self.last_streaming_state:
if time_since_change >= self.streaming_stop_delay:
self.last_state_change = now
return True
else:
remaining = self.streaming_stop_delay - time_since_change
logger.info(
f"Streaming stopped - waiting {remaining:.1f}s before restoring unlimited mode"
)
return False
def run(self):
logger.info("Starting Jellyfin-qBittorrent monitor")
logger.info(f"Jellyfin URL: {self.jellyfin_url}")
logger.info(f"qBittorrent URL: {self.qbittorrent_url}")
logger.info(f"Check interval: {self.check_interval}s")
logger.info(f"Streaming start delay: {self.streaming_start_delay}s")
logger.info(f"Streaming stop delay: {self.streaming_stop_delay}s")
logger.info(f"Total bandwidth budget: {self.total_bandwidth_budget} bps")
logger.info(f"Service buffer: {self.service_buffer} bps")
logger.info(f"Default stream bitrate: {self.default_stream_bitrate} bps")
logger.info(f"Minimum torrent speed: {self.min_torrent_speed} KB/s")
logger.info(f"Stream bitrate headroom: {self.stream_bitrate_headroom}x")
if self.webhook_port:
logger.info(f"Webhook receiver: {self.webhook_bind}:{self.webhook_port}")
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.start_webhook_server()
while self.running:
try:
self.sync_qbittorrent_state()
try:
active_streams = self.check_jellyfin_sessions()
except ServiceUnavailable:
logger.warning("Jellyfin unavailable, maintaining current state")
self.sleep_or_wake(self.check_interval)
continue
streaming_active = len(active_streams) > 0
if active_streams:
for stream in active_streams:
logger.debug(
f"Active stream: {stream['name']} ({stream['bitrate_bps']} bps)"
)
if active_streams != self.last_active_streams:
if streaming_active:
stream_names = ", ".join(
stream["name"] for stream in active_streams
)
logger.info(
f"Active streams ({len(active_streams)}): {stream_names}"
)
elif len(active_streams) == 0 and self.last_streaming_state:
logger.info("No active streaming sessions")
if self.should_change_state(streaming_active):
self.last_streaming_state = streaming_active
streaming_state = bool(self.last_streaming_state)
total_streaming_bps = sum(
stream["bitrate_bps"] for stream in active_streams
)
remaining_bps = (
self.total_bandwidth_budget
- self.service_buffer
- total_streaming_bps
)
remaining_kbs = max(0, remaining_bps) / 8 / 1024
if not streaming_state:
desired_state = "unlimited"
elif streaming_active:
if remaining_kbs >= self.min_torrent_speed:
desired_state = "throttled"
else:
desired_state = "paused"
else:
desired_state = self.current_state
if desired_state != self.current_state:
if desired_state == "unlimited":
action = "resume torrents, disable alt speed"
elif desired_state == "throttled":
action = (
"set alt limits "
f"dl={int(remaining_kbs)}KB/s ul={int(remaining_kbs)}KB/s, enable alt speed"
)
else:
action = "pause torrents"
logger.info(
"State change %s -> %s | streams=%d total_bps=%d remaining_bps=%d action=%s",
self.current_state,
desired_state,
len(active_streams),
total_streaming_bps,
remaining_bps,
action,
)
if desired_state == "unlimited":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.use_alt_limits(False)
elif desired_state == "throttled":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.set_alt_speed_limits(remaining_kbs, remaining_kbs)
self.use_alt_limits(True)
else:
if not self.torrents_paused:
self.pause_all_torrents()
self.torrents_paused = True
self.current_state = desired_state
self.last_active_streams = active_streams
self.sleep_or_wake(self.check_interval)
except KeyboardInterrupt:
break
except Exception as e:
logger.error(f"Unexpected error in monitoring loop: {e}")
self.sleep_or_wake(self.check_interval)
self.restore_normal_limits()
logger.info("Monitor stopped")
if __name__ == "__main__":
import os
# Configuration from environment variables
jellyfin_url = os.getenv("JELLYFIN_URL", "http://localhost:8096")
qbittorrent_url = os.getenv("QBITTORRENT_URL", "http://localhost:8080")
check_interval = int(os.getenv("CHECK_INTERVAL", "30"))
jellyfin_api_key = os.getenv("JELLYFIN_API_KEY")
streaming_start_delay = int(os.getenv("STREAMING_START_DELAY", "10"))
streaming_stop_delay = int(os.getenv("STREAMING_STOP_DELAY", "60"))
total_bandwidth_budget = int(os.getenv("TOTAL_BANDWIDTH_BUDGET", "30000000"))
service_buffer = int(os.getenv("SERVICE_BUFFER", "5000000"))
default_stream_bitrate = int(os.getenv("DEFAULT_STREAM_BITRATE", "10000000"))
min_torrent_speed = int(os.getenv("MIN_TORRENT_SPEED", "100"))
stream_bitrate_headroom = float(os.getenv("STREAM_BITRATE_HEADROOM", "1.1"))
webhook_port = int(os.getenv("WEBHOOK_PORT", "0"))
webhook_bind = os.getenv("WEBHOOK_BIND", "127.0.0.1")
monitor = JellyfinQBittorrentMonitor(
jellyfin_url=jellyfin_url,
qbittorrent_url=qbittorrent_url,
check_interval=check_interval,
jellyfin_api_key=jellyfin_api_key,
streaming_start_delay=streaming_start_delay,
streaming_stop_delay=streaming_stop_delay,
total_bandwidth_budget=total_bandwidth_budget,
service_buffer=service_buffer,
default_stream_bitrate=default_stream_bitrate,
min_torrent_speed=min_torrent_speed,
stream_bitrate_headroom=stream_bitrate_headroom,
webhook_port=webhook_port,
webhook_bind=webhook_bind,
)
monitor.run()

View File

@@ -0,0 +1,105 @@
{ pkgs, lib }:
let
pluginVersion = "18.0.0.0";
# GUID from the plugin's meta.json; addresses it on /Plugins/<guid>/Configuration.
pluginGuid = "71552a5a-5c5c-4350-a2ae-ebe451a30173";
package = pkgs.stdenvNoCC.mkDerivation {
pname = "jellyfin-plugin-webhook";
version = pluginVersion;
src = pkgs.fetchurl {
url = "https://repo.jellyfin.org/files/plugin/webhook/webhook_${pluginVersion}.zip";
hash = "sha256-LFFojiPnBGl9KJ0xVyPBnCmatcaeVbllRwRkz5Z3dqI=";
};
nativeBuildInputs = [ pkgs.unzip ];
unpackPhase = ''unzip "$src"'';
installPhase = ''
mkdir -p "$out"
cp *.dll meta.json "$out/"
'';
dontFixup = true; # managed .NET assemblies must not be patched
};
# Minimal Handlebars template, base64 encoded. The monitor only needs the POST;
# NotificationType is parsed for the debug log line.
# Decoded: {"NotificationType":"{{NotificationType}}"}
templateB64 = "eyJOb3RpZmljYXRpb25UeXBlIjoie3tOb3RpZmljYXRpb25UeXBlfX0ifQ==";
# Build a PluginConfiguration payload accepted by Jellyfin's JSON deserializer.
# Each webhook is `{ name, uri, notificationTypes }`.
mkConfigJson =
webhooks:
builtins.toJSON {
ServerUrl = "";
GenericOptions = map (w: {
NotificationTypes = w.notificationTypes;
WebhookName = w.name;
WebhookUri = w.uri;
EnableMovies = true;
EnableEpisodes = true;
EnableVideos = true;
EnableWebhook = true;
Template = templateB64;
Headers = [
{
Key = "Content-Type";
Value = "application/json";
}
];
}) webhooks;
};
# Oneshot that POSTs the plugin configuration. Retries past the window
# between Jellyfin API health and plugin registration.
mkConfigureScript =
{ jellyfinUrl, webhooks }:
pkgs.writeShellScript "jellyfin-webhook-configure" ''
set -euo pipefail
export PATH=${
lib.makeBinPath [
pkgs.coreutils
pkgs.curl
]
}
URL=${lib.escapeShellArg jellyfinUrl}
AUTH="Authorization: MediaBrowser Token=\"$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")\""
CONFIG=${lib.escapeShellArg (mkConfigJson webhooks)}
for _ in $(seq 1 120); do curl -sf -o /dev/null "$URL/health" && break; sleep 1; done
curl -sf -o /dev/null "$URL/health"
for _ in $(seq 1 60); do
if printf '%s' "$CONFIG" | curl -sf -X POST \
-H "$AUTH" -H "Content-Type: application/json" --data-binary @- \
"$URL/Plugins/${pluginGuid}/Configuration"; then
echo "Jellyfin webhook plugin configured"; exit 0
fi
sleep 1
done
echo "Failed to configure webhook plugin" >&2; exit 1
'';
# Materialise a writable copy of the plugin. Jellyfin rewrites meta.json at
# runtime, so a read-only nix-store symlink would EACCES.
mkInstallScript =
{ pluginsDir }:
pkgs.writeShellScript "jellyfin-webhook-install" ''
set -euo pipefail
export PATH=${lib.makeBinPath [ pkgs.coreutils ]}
dst=${lib.escapeShellArg "${pluginsDir}/Webhook_${pluginVersion}"}
mkdir -p ${lib.escapeShellArg pluginsDir}
rm -rf "$dst" && mkdir -p "$dst"
cp ${package}/*.dll ${package}/meta.json "$dst/"
chmod u+rw "$dst"/*
'';
in
{
inherit
package
pluginVersion
pluginGuid
mkConfigureScript
mkInstallScript
;
}

View File

@@ -0,0 +1,66 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyfin" service_configs.zpool_ssds [
config.services.jellyfin.dataDir
config.services.jellyfin.cacheDir
])
(lib.serviceFilePerms "jellyfin" [
"Z ${config.services.jellyfin.dataDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
"Z ${config.services.jellyfin.cacheDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
])
];
services.jellyfin = {
enable = true;
package = pkgs.jellyfin.override { jellyfin-ffmpeg = (lib.optimizePackage pkgs.jellyfin-ffmpeg); };
inherit (service_configs.jellyfin) dataDir cacheDir;
};
services.caddy.virtualHosts."jellyfin.${service_configs.https.domain}".extraConfig = ''
reverse_proxy :${builtins.toString service_configs.ports.private.jellyfin.port} {
# Disable response buffering for streaming. Caddy's default partial
# buffering delays fMP4-HLS segments and direct-play responses where
# Content-Length is known (so auto-flush doesn't trigger).
flush_interval -1
transport http {
# Localhost: compression wastes CPU re-encoding already-compressed media.
compression off
}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
request_body {
max_size 4096MB
}
'';
users.users.${config.services.jellyfin.user}.extraGroups = [
"video"
"render"
service_configs.media_group
];
# Protect Jellyfin login from brute force attacks
services.fail2ban.jails.jellyfin = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "${config.services.jellyfin.dataDir}/log/log_*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = ''^.*Authentication request for .* has been denied \(IP: "<ADDR>"\)\..*$'';
ignoreregex = "";
};
};
}

103
services/llama-cpp.nix Normal file
View File

@@ -0,0 +1,103 @@
{
pkgs,
service_configs,
config,
inputs,
lib,
utils,
...
}:
let
cfg = config.services.llama-cpp;
modelUrl = "https://huggingface.co/bartowski/google_gemma-4-E2B-it-GGUF/resolve/main/google_gemma-4-E2B-it-IQ2_M.gguf";
modelAlias = lib.removeSuffix ".gguf" (baseNameOf modelUrl);
in
{
imports = [
(lib.mkCaddyReverseProxy {
subdomain = "llm";
port = service_configs.ports.private.llama_cpp.port;
})
];
services.llama-cpp = {
enable = true;
model = toString (
pkgs.fetchurl {
url = modelUrl;
sha256 = "17e869ac54d0e59faa884d5319fc55ad84cd866f50f0b3073fbb25accc875a23";
}
);
port = service_configs.ports.private.llama_cpp.port;
host = "0.0.0.0";
package = lib.optimizePackage (
inputs.llamacpp.packages.${pkgs.system}.vulkan.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [
];
})
);
extraFlags = [
"-ngl"
"999"
"-c"
"65536"
"-ctk"
"turbo3"
"-ctv"
"turbo3"
"-fa"
"on"
"--api-key-file"
config.age.secrets.llama-cpp-api-key.path
"--metrics"
"--alias"
modelAlias
"-b"
"4096"
"-ub"
"4096"
"--parallel"
"2"
];
};
# have to do this in order to get vulkan to work
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
# ANV driver's turbo3 shader compilation exceeds the default 8 MB thread stack.
systemd.services.llama-cpp.serviceConfig.LimitSTACK = lib.mkForce "67108864"; # 64 MB soft+hard
# llama-server tries to create ~/.cache; ProtectSystem=strict + impermanent
# root make /root read-only. Give it a writable cache dir and point HOME there.
systemd.services.llama-cpp.serviceConfig.CacheDirectory = "llama-cpp";
systemd.services.llama-cpp.environment.HOME = "/var/cache/llama-cpp";
# turbo3 KV cache quantization runs a 14-barrier WHT butterfly per 128-element
# workgroup in SET_ROWS. With 4 concurrent slots and batch=4096, the combined
# GPU dispatch can exceed the default i915 CCS engine preempt timeout (7.5s),
# causing GPU HANG -> ErrorDeviceLost. Increase compute engine timeouts.
# Note: batch<4096 is not viable -- GDN chunked mode needs a larger compute
# buffer at smaller batch sizes, exceeding the A380's 6 GB VRAM.
# '+' prefix runs as root regardless of service User=.
systemd.services.llama-cpp.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "set-gpu-compute-timeout" ''
for f in /sys/class/drm/card*/engine/ccs*/preempt_timeout_ms; do
[ -w "$f" ] && echo 30000 > "$f"
done
for f in /sys/class/drm/card*/engine/ccs*/heartbeat_interval_ms; do
[ -w "$f" ] && echo 10000 > "$f"
done
''}"
];
# upstream module hardcodes --log-disable; override ExecStart to keep logs
# so we can see prompt processing progress via journalctl
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
"${cfg.package}/bin/llama-server"
+ " --host ${cfg.host}"
+ " --port ${toString cfg.port}"
+ " -m ${cfg.model}"
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
);
}

View File

@@ -0,0 +1,59 @@
{
config,
lib,
service_configs,
...
}:
{
services.coturn = {
enable = true;
realm = service_configs.https.domain;
use-auth-secret = true;
static-auth-secret-file = config.age.secrets.coturn-auth-secret.path;
listening-port = service_configs.ports.public.coturn.port;
tls-listening-port = service_configs.ports.public.coturn_tls.port;
no-cli = true;
# recommended security settings from Synapse's coturn docs
extraConfig = ''
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255
denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
'';
};
# coturn needs these ports open
networking.firewall = {
allowedTCPPorts = [
service_configs.ports.public.coturn.port
service_configs.ports.public.coturn_tls.port
];
allowedUDPPorts = [
service_configs.ports.public.coturn.port
service_configs.ports.public.coturn_tls.port
];
# relay port range
allowedUDPPortRanges = [
{
from = config.services.coturn.min-port;
to = config.services.coturn.max-port;
}
];
};
}

View File

@@ -0,0 +1,7 @@
{
imports = [
./matrix.nix
./coturn.nix
./livekit.nix
];
}

View File

@@ -0,0 +1,51 @@
{
service_configs,
...
}:
let
keyFile = ../../secrets/livekit_keys;
in
{
services.livekit = {
enable = true;
inherit keyFile;
openFirewall = true;
settings = {
port = service_configs.ports.public.livekit.port;
bind_addresses = [ "127.0.0.1" ];
rtc = {
port_range_start = 50100;
port_range_end = 50200;
use_external_ip = true;
};
# Disable LiveKit's built-in TURN; coturn is already running
turn = {
enabled = false;
};
logging = {
level = "info";
};
};
};
services.lk-jwt-service = {
enable = true;
inherit keyFile;
livekitUrl = "wss://${service_configs.livekit.domain}";
port = service_configs.ports.private.lk_jwt.port;
};
services.caddy.virtualHosts."${service_configs.livekit.domain}".extraConfig = ''
@jwt path /sfu/get /healthz
handle @jwt {
reverse_proxy :${builtins.toString service_configs.ports.private.lk_jwt.port}
}
handle {
reverse_proxy :${builtins.toString service_configs.ports.public.livekit.port}
}
'';
}

View File

@@ -0,0 +1,73 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "continuwuity" service_configs.zpool_ssds [
"/var/lib/private/continuwuity"
])
(lib.serviceFilePerms "continuwuity" [
"Z /var/lib/private/continuwuity 0770 ${config.services.matrix-continuwuity.user} ${config.services.matrix-continuwuity.group}"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.matrix.domain;
port = service_configs.ports.private.matrix.port;
})
];
services.matrix-continuwuity = {
enable = true;
settings.global = {
port = [ service_configs.ports.private.matrix.port ];
server_name = service_configs.https.domain;
allow_registration = true;
registration_token_file = config.age.secrets.matrix-reg-token.path;
new_user_displayname_suffix = "";
trusted_servers = [
"matrix.org"
"constellatory.net"
"tchncs.de"
"envs.net"
];
address = [
"0.0.0.0"
];
# TURN server config (coturn)
turn_secret_file = config.age.secrets.matrix-turn-secret.path;
turn_uris = [
"turn:${service_configs.https.domain}?transport=udp"
"turn:${service_configs.https.domain}?transport=tcp"
];
turn_ttl = 86400;
};
};
services.caddy.virtualHosts.${service_configs.https.domain}.extraConfig = lib.mkBefore ''
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.https.port}"}`
respond /.well-known/matrix/client `{"m.server":{"base_url":"https://${service_configs.matrix.domain}"},"m.homeserver":{"base_url":"https://${service_configs.matrix.domain}"},"org.matrix.msc3575.proxy":{"base_url":"https://${config.services.matrix-continuwuity.settings.global.server_name}"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://${service_configs.livekit.domain}"}]}`
'';
# Exact duplicate for federation port
services.caddy.virtualHosts."${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.matrix_federation.port}".extraConfig =
config.services.caddy.virtualHosts."${service_configs.matrix.domain}".extraConfig;
# for federation
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.matrix_federation.port
];
# for federation
networking.firewall.allowedUDPPorts = [
service_configs.ports.public.matrix_federation.port
];
}

192
services/minecraft.nix Normal file
View File

@@ -0,0 +1,192 @@
{
pkgs,
service_configs,
lib,
config,
inputs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "minecraft-server-${service_configs.minecraft.server_name}"
service_configs.zpool_ssds
[
"${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}"
]
)
inputs.nix-minecraft.nixosModules.minecraft-servers
(lib.serviceFilePerms "minecraft-server-${service_configs.minecraft.server_name}" [
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 700 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web 750 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
# Allow caddy (in minecraft group) to traverse to squaremap/web for map.gardling.com
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
])
];
boot.kernel.sysctl = {
# Disable autogroup for better scheduling of game server threads
"kernel.sched_autogroup_enabled" = 0;
};
services.minecraft-servers = {
enable = true;
eula = true;
dataDir = service_configs.minecraft.parent_dir;
openFirewall = true;
servers.${service_configs.minecraft.server_name} = {
enable = true;
package = pkgs.fabricServers.fabric-26_1_2.override { jre_headless = pkgs.openjdk25_headless; };
jvmOpts = lib.concatStringsSep " " [
# Memory
"-Xmx${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
"-Xms${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
# GC
"-XX:+UseZGC"
"-XX:+ZGenerational"
# added in new minecraft version
"-XX:+UseCompactObjectHeaders"
"-XX:+UseStringDeduplication"
# Base JVM optimizations (brucethemoose/Minecraft-Performance-Flags-Benchmarks)
"-XX:+UnlockExperimentalVMOptions"
"-XX:+UnlockDiagnosticVMOptions"
"-XX:+AlwaysActAsServerClassMachine"
"-XX:+AlwaysPreTouch"
"-XX:+DisableExplicitGC"
"-XX:+UseNUMA"
"-XX:+PerfDisableSharedMem"
"-XX:+UseFastUnorderedTimeStamps"
"-XX:+UseCriticalJavaThreadPriority"
"-XX:ThreadPriorityPolicy=1"
"-XX:AllocatePrefetchStyle=3"
"-XX:-DontCompileHugeMethods"
"-XX:MaxNodeLimit=240000"
"-XX:NodeLimitFudgeFactor=8000"
"-XX:ReservedCodeCacheSize=400M"
"-XX:NonNMethodCodeHeapSize=12M"
"-XX:ProfiledCodeHeapSize=194M"
"-XX:NonProfiledCodeHeapSize=194M"
"-XX:NmethodSweepActivity=1"
"-XX:+UseVectorCmov"
# Large pages (requires vm.nr_hugepages sysctl)
"-XX:+UseLargePages"
"-XX:LargePageSizeInBytes=${builtins.toString service_configs.minecraft.memory.large_page_size_m}M"
];
serverProperties = {
server-port = service_configs.ports.public.minecraft.port;
enforce-whitelist = true;
gamemode = "survival";
white-list = true;
difficulty = "easy";
motd = "A Minecraft Server";
view-distance = 10;
simulation-distance = 6;
sync-chunk-writes = false;
spawn-protection = 0;
};
whitelist = import ../secrets/minecraft-whitelist.nix;
symlinks = {
"mods" = pkgs.linkFarmFromDrvs "mods" (
with pkgs;
builtins.attrValues {
FabricApi = fetchurl {
url = "https://cdn.modrinth.com/data/P7dR8mSH/versions/fm7UYECV/fabric-api-0.145.4%2B26.1.2.jar";
sha512 = "ffd5ef62a745f76cd2e5481252cb7bc67006c809b4f436827d05ea22c01d19279e94a3b24df3d57e127af1cd08440b5de6a92a4ea8f39b2dcbbe1681275564c3";
};
# No 26.1.2 version available
# FerriteCore = fetchurl {
# url = "https://cdn.modrinth.com/data/uXXizFIs/versions/d5ddUdiB/ferritecore-9.0.0-fabric.jar";
# sha512 = "d81fa97e11784c19d42f89c2f433831d007603dd7193cee45fa177e4a6a9c52b384b198586e04a0f7f63cd996fed713322578bde9a8db57e1188854ae5cbe584";
# };
Lithium = fetchurl {
url = "https://cdn.modrinth.com/data/gvQqBUqZ/versions/v2xoRvRP/lithium-fabric-0.24.1%2Bmc26.1.2.jar";
sha512 = "8711bc8c6f39be4c8511becb7a68e573ced56777bd691639f2fc62299b35bb4ccd2efe4a39bd9c308084b523be86a5f5c4bf921ab85f7a22bf075d8ea2359621";
};
NoChatReports = fetchurl {
url = "https://cdn.modrinth.com/data/qQyHxfxd/versions/2yrLNE3S/NoChatReports-FABRIC-26.1-v2.19.0.jar";
sha512 = "94d58a1a4cde4e3b1750bdf724e65c5f4ff3436c2532f36a465d497d26bf59f5ac996cddbff8ecdfed770c319aa2f2dcc9c7b2d19a35651c2a7735c5b2124dad";
};
squaremap = fetchurl {
url = "https://cdn.modrinth.com/data/PFb7ZqK6/versions/UBN6MFvH/squaremap-fabric-mc26.1.2-1.3.13.jar";
sha512 = "97bc130184b5d0ddc4ff98a15acef6203459d982e0e2afbd49a2976d546c55a86ef22b841378b51dd782be9b2cfbe4cfa197717f2b7f6800fd8b4ff4df6e564f";
};
scalablelux = fetchurl {
url = "https://cdn.modrinth.com/data/Ps1zyz6x/versions/gYbHVCz8/ScalableLux-0.2.0%2Bfabric.2b63825-all.jar";
sha512 = "48565a4d8a1cbd623f0044086d971f2c0cf1c40e1d0b6636a61d41512f4c1c1ddff35879d9dba24b088a670ee254e2d5842d13a30b6d76df23706fa94ea4a58b";
};
c2me = fetchurl {
url = "https://cdn.modrinth.com/data/VSNURh3q/versions/yrNQQ1AQ/c2me-fabric-mc26.1.2-0.3.7%2Balpha.0.65.jar";
sha512 = "6666ebaa3bfa403e386776590fc845b7c306107d37ebc7b1be3b057893fbf9f933abb2314c171d7fe19c177cf8823cb47fdc32040d34a9704f5ab656dd5d93f8";
};
# No 26.1 version available
# krypton = fetchurl {
# url = "https://cdn.modrinth.com/data/fQEb0iXm/versions/O9LmWYR7/krypton-0.2.10.jar";
# sha512 = "4dcd7228d1890ddfc78c99ff284b45f9cf40aae77ef6359308e26d06fa0d938365255696af4cc12d524c46c4886cdcd19268c165a2bf0a2835202fe857da5cab";
# };
# No 26.1.2 version available
# disconnect-packet-fix = fetchurl {
# url = "https://cdn.modrinth.com/data/rd9rKuJT/versions/x9gVeaTU/disconnect-packet-fix-fabric-2.1.0.jar";
# sha512 = "bf84d02bdcd737706df123e452dd31ef535580fa4ced6af1e4ceea022fef94e4764775253e970b8caa1292e2fa00eb470557f70b290fafdb444479fa801b07a1";
# };
packet-fixer = fetchurl {
url = "https://cdn.modrinth.com/data/c7m1mi73/versions/M8PqPQr4/packetfixer-fabric-3.3.4-26.1.2.jar";
sha512 = "698020edba2a1fd80bb282bfd4832a00d6447b08eaafbc2e16a8f3bf89e187fc9a622c92dfe94ae140dd485fc0220a86890f12158ec08054e473fef8337829bc";
};
# mVUS fork: upstream ModernFix no longer ships Fabric builds
modernfix = fetchurl {
url = "https://cdn.modrinth.com/data/TjSm1wrD/versions/dqQ7mabN/modernfix-5.26.2-build.1.jar";
sha512 = "fbef93c2dabf7bcd0ccd670226dfc4958f7ebe5d8c2b1158e88a65e6954a40f595efd58401d2a3dbb224660dca5952199cf64df29100e7bd39b1b1941290b57b";
};
debugify = fetchurl {
url = "https://cdn.modrinth.com/data/QwxR6Gcd/versions/mfTTfiKn/debugify-26.1.2%2B1.0.jar";
sha512 = "63db82f2163b9f7fc27ebea999ffcd7a961054435b3ed7d8bf32d905b5f60ce81715916b7fd4e9509dd23703d5492059f3ce7e5f176402f8ed4f985a415553f4";
};
}
);
};
};
};
systemd.services.minecraft-server-main = {
serviceConfig = {
Nice = -5;
IOSchedulingPriority = 0;
LimitMEMLOCK = "infinity"; # Required for large pages
};
};
services.caddy.virtualHosts = lib.mkIf (config.services.caddy.enable) {
"map.${service_configs.https.domain}".extraConfig = ''
root * ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web
file_server browse
'';
};
users.users = lib.mkIf (config.services.caddy.enable) {
${config.services.caddy.user}.extraGroups = [
# for `map.gardling.com`
config.services.minecraft-servers.group
];
};
}

36
services/mollysocket.nix Normal file
View File

@@ -0,0 +1,36 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "mollysocket" service_configs.zpool_ssds [
"/var/lib/private/mollysocket"
])
(lib.serviceFilePerms "mollysocket" [
"Z /var/lib/private/mollysocket 0700 root root"
])
];
services.mollysocket = {
enable = true;
settings = {
host = "127.0.0.1";
port = service_configs.ports.private.mollysocket.port;
# Explicitly allow our self-hosted ntfy instance.
# Local-network endpoints are denied by default for security.
allowed_endpoints = [ "https://${service_configs.ntfy.domain}" ];
# allowed_uuids set via MOLLY_ALLOWED_UUIDS in environmentFile
};
environmentFile = config.age.secrets.mollysocket-env.path;
};
services.caddy.virtualHosts."${service_configs.mollysocket.domain}".extraConfig = ''
reverse_proxy h2c://127.0.0.1:${builtins.toString service_configs.ports.private.mollysocket.port}
'';
}

View File

@@ -0,0 +1,8 @@
{
imports = [
./monero.nix
./p2pool.nix
./xmrig.nix
./xmrig-auto-pause.nix
];
}

View File

@@ -0,0 +1,37 @@
{
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "monero" service_configs.zpool_ssds [
service_configs.monero.dataDir
])
(lib.serviceFilePerms "monero" [
"Z ${service_configs.monero.dataDir} 0700 monero monero"
])
];
services.monero = {
enable = true;
dataDir = service_configs.monero.dataDir;
rpc = {
address = "0.0.0.0";
port = service_configs.ports.public.monero_rpc.port;
restricted = true;
};
extraConfig = ''
p2p-bind-port=${builtins.toString service_configs.ports.public.monero.port}
zmq-pub=tcp://127.0.0.1:${builtins.toString service_configs.ports.private.monero_zmq.port}
db-sync-mode=fast:async:1000000000bytes
public-node=1
confirm-external-bind=1
'';
};
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.monero.port
service_configs.ports.public.monero_rpc.port
];
}

View File

@@ -0,0 +1,39 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "p2pool" service_configs.zpool_ssds [
service_configs.p2pool.dataDir
])
(lib.serviceFilePerms "p2pool" [
"Z ${service_configs.p2pool.dataDir} 0700 p2pool p2pool"
])
];
services.p2pool = {
enable = true;
dataDir = service_configs.p2pool.dataDir;
walletAddress = service_configs.p2pool.walletAddress;
sidechain = "nano";
host = "127.0.0.1";
rpcPort = service_configs.ports.public.monero_rpc.port;
zmqPort = service_configs.ports.private.monero_zmq.port;
extraArgs = [
" --stratum 0.0.0.0:${builtins.toString service_configs.ports.private.p2pool_stratum.port}"
];
};
# Ensure p2pool starts after monero is ready
systemd.services.p2pool = {
after = [ "monero.service" ];
wants = [ "monero.service" ];
};
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.p2pool_p2p.port
];
}

View File

@@ -0,0 +1,39 @@
{
config,
lib,
pkgs,
...
}:
lib.mkIf config.services.xmrig.enable {
systemd.services.xmrig-auto-pause = {
description = "Auto-pause xmrig when other services need CPU";
after = [ "xmrig.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${./xmrig-auto-pause.py}";
Restart = "always";
RestartSec = "10s";
NoNewPrivileges = true;
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
RestrictAddressFamilies = [
"AF_UNIX" # systemctl talks to systemd over D-Bus unix socket
];
MemoryDenyWriteExecute = true;
StateDirectory = "xmrig-auto-pause";
};
environment = {
POLL_INTERVAL = "3";
GRACE_PERIOD = "15";
# Background services (qbittorrent, bitmagnet, postgresql, etc.) produce
# 15-25% non-nice CPU during normal operation. The stop threshold must
# sit above transient spikes; the resume threshold must be below the
# steady-state floor to avoid restarting xmrig while services are active.
CPU_STOP_THRESHOLD = "40";
CPU_RESUME_THRESHOLD = "10";
STARTUP_COOLDOWN = "10";
STATE_DIR = "/var/lib/xmrig-auto-pause";
};
};
}

View File

@@ -0,0 +1,210 @@
#!/usr/bin/env python3
"""
Auto-pause xmrig when other services need CPU.
Monitors non-nice CPU usage from /proc/stat. Since xmrig runs at Nice=19,
its CPU time lands in the 'nice' column and is excluded from the metric.
When real workload (user + system + irq + softirq) exceeds the stop
threshold, stops xmrig. When it drops below the resume threshold for
GRACE_PERIOD seconds, restarts xmrig.
This replaces per-service pause scripts with a single general-purpose
monitor that handles any CPU-intensive workload (gitea workers, llama-cpp
inference, etc.) without needing to know about specific processes.
Why scheduler priority alone isn't enough:
Nice=19 / SCHED_IDLE only affects which thread gets the next time slice.
RandomX's 2MB-per-thread scratchpad (24MB across 12 threads) pollutes
the shared 32MB L3 cache, and its memory access pattern saturates DRAM
bandwidth. Other services run slower even though they aren't denied CPU
time. The only fix is to stop xmrig entirely when real work is happening.
Hysteresis:
The stop threshold is set higher than the resume threshold to prevent
oscillation. When xmrig runs, its L3 cache pressure makes other processes
appear ~3-8% busier. A single threshold trips on this indirect effect,
causing stop/start thrashing. Separate thresholds break the cycle: the
resume threshold confirms the system is truly idle, while the stop
threshold requires genuine workload above xmrig's indirect pressure.
"""
import os
import subprocess
import sys
import time
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "3"))
GRACE_PERIOD = float(os.environ.get("GRACE_PERIOD", "15"))
# Percentage of total CPU ticks that non-nice processes must use to trigger
# a pause. On a 12-thread system, one fully loaded core ≈ 8.3% of total.
# Default 15% requires roughly two busy cores, which avoids false positives
# from xmrig's L3 cache pressure inflating other processes' apparent CPU.
CPU_STOP_THRESHOLD = float(os.environ.get("CPU_STOP_THRESHOLD", "15"))
# Percentage below which the system is considered idle enough to resume
# mining. Lower than the stop threshold to provide hysteresis.
CPU_RESUME_THRESHOLD = float(os.environ.get("CPU_RESUME_THRESHOLD", "5"))
# After starting xmrig, ignore CPU spikes for this many seconds to let
# RandomX dataset initialization complete (~4s on the target hardware)
# without retriggering a stop.
STARTUP_COOLDOWN = float(os.environ.get("STARTUP_COOLDOWN", "10"))
# Directory for persisting pause state across script restarts. Without
# this, a restart while xmrig is paused loses the paused_by_us flag and
# xmrig stays stopped permanently.
STATE_DIR = os.environ.get("STATE_DIR", "")
_PAUSE_FILE = os.path.join(STATE_DIR, "paused") if STATE_DIR else ""
def log(msg):
print(f"[xmrig-auto-pause] {msg}", file=sys.stderr, flush=True)
def read_cpu_ticks():
"""Read CPU tick counters from /proc/stat.
Returns (total_ticks, real_work_ticks) where real_work excludes the
'nice' column (xmrig) and idle/iowait.
"""
with open("/proc/stat") as f:
parts = f.readline().split()
# cpu user nice system idle iowait irq softirq steal
user, nice, system, idle, iowait, irq, softirq, steal = (
int(x) for x in parts[1:9]
)
total = user + nice + system + idle + iowait + irq + softirq + steal
real_work = user + system + irq + softirq
return total, real_work
def is_active(unit):
"""Check if a systemd unit is currently active."""
result = subprocess.run(
["systemctl", "is-active", "--quiet", unit],
capture_output=True,
)
return result.returncode == 0
def systemctl(action, unit):
result = subprocess.run(
["systemctl", action, unit],
capture_output=True,
text=True,
)
if result.returncode != 0:
log(f"systemctl {action} {unit} failed (rc={result.returncode}): {result.stderr.strip()}")
return result.returncode == 0
def _save_paused(paused):
"""Persist pause flag so a script restart can resume where we left off."""
if not _PAUSE_FILE:
return
try:
if paused:
open(_PAUSE_FILE, "w").close()
else:
os.remove(_PAUSE_FILE)
except OSError:
pass
def _load_paused():
"""Check if a previous instance left xmrig paused."""
if not _PAUSE_FILE:
return False
return os.path.isfile(_PAUSE_FILE)
def main():
paused_by_us = _load_paused()
idle_since = None
started_at = None # monotonic time when we last started xmrig
prev_total = None
prev_work = None
if paused_by_us:
log("Recovered pause state from previous instance")
log(
f"Starting: poll={POLL_INTERVAL}s grace={GRACE_PERIOD}s "
f"stop={CPU_STOP_THRESHOLD}% resume={CPU_RESUME_THRESHOLD}% "
f"cooldown={STARTUP_COOLDOWN}s"
)
while True:
total, work = read_cpu_ticks()
if prev_total is None:
prev_total = total
prev_work = work
time.sleep(POLL_INTERVAL)
continue
dt = total - prev_total
if dt <= 0:
prev_total = total
prev_work = work
time.sleep(POLL_INTERVAL)
continue
real_work_pct = ((work - prev_work) / dt) * 100
prev_total = total
prev_work = work
# Don't act during startup cooldown — RandomX dataset init causes
# a transient CPU spike that would immediately retrigger a stop.
if started_at is not None:
if time.monotonic() - started_at < STARTUP_COOLDOWN:
time.sleep(POLL_INTERVAL)
continue
# Cooldown expired — verify xmrig survived startup. If it
# crashed during init (hugepage failure, pool unreachable, etc.),
# re-enter the pause/retry cycle rather than silently leaving
# xmrig dead.
if not is_active("xmrig.service"):
log("xmrig died during startup cooldown — will retry")
paused_by_us = True
_save_paused(True)
started_at = None
above_stop = real_work_pct > CPU_STOP_THRESHOLD
below_resume = real_work_pct <= CPU_RESUME_THRESHOLD
if above_stop:
idle_since = None
if paused_by_us and is_active("xmrig.service"):
# Something else restarted xmrig (deploy, manual start, etc.)
# while we thought it was stopped. Reset ownership so we can
# manage it again.
log("xmrig was restarted externally while paused — reclaiming")
paused_by_us = False
_save_paused(False)
if not paused_by_us:
# Only claim ownership if xmrig is actually running.
# If something else stopped it (e.g. UPS battery hook),
# don't interfere — we'd wrongly restart it later.
if is_active("xmrig.service"):
log(f"Real workload detected ({real_work_pct:.1f}% CPU) — stopping xmrig")
if systemctl("stop", "xmrig.service"):
paused_by_us = True
_save_paused(True)
elif paused_by_us:
if below_resume:
if idle_since is None:
idle_since = time.monotonic()
elif time.monotonic() - idle_since >= GRACE_PERIOD:
log(f"Workload ended ({real_work_pct:.1f}% CPU) past grace period — starting xmrig")
if systemctl("start", "xmrig.service"):
paused_by_us = False
_save_paused(False)
started_at = time.monotonic()
idle_since = None
else:
# Between thresholds — not idle enough to resume.
idle_since = None
time.sleep(POLL_INTERVAL)
if __name__ == "__main__":
main()

59
services/monero/xmrig.nix Normal file
View File

@@ -0,0 +1,59 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
let
threadCount = 12;
in
{
services.xmrig = {
enable = true;
package = lib.optimizePackage pkgs.xmrig;
settings = {
autosave = true;
cpu = {
enabled = true;
huge-pages = true;
hw-aes = true;
rx = lib.range 0 (threadCount - 1);
};
randomx = {
"1gb-pages" = true;
};
opencl = false;
cuda = false;
pools = [
{
url = "127.0.0.1:${builtins.toString service_configs.ports.private.p2pool_stratum.port}";
tls = false;
}
];
};
};
systemd.services.xmrig.serviceConfig = {
Nice = 19;
CPUSchedulingPolicy = "idle";
IOSchedulingClass = "idle";
};
# Stop mining on UPS battery to conserve power
services.apcupsd.hooks = lib.mkIf config.services.apcupsd.enable {
onbattery = "systemctl stop xmrig";
offbattery = "systemctl start xmrig";
};
# Reserve 1GB huge pages for RandomX (dataset is ~2GB)
boot.kernelParams = [
"hugepagesz=1G"
"hugepages=3"
];
}

View File

@@ -0,0 +1,6 @@
{
imports = [
./ntfy.nix
./ntfy-alerts.nix
];
}

View File

@@ -0,0 +1,15 @@
{
config,
lib,
service_configs,
...
}:
lib.mkIf config.services.ntfy-sh.enable {
services.ntfyAlerts = {
enable = true;
serverUrl = "https://${service_configs.ntfy.domain}";
topicFile = config.age.secrets.ntfy-alerts-topic.path;
tokenFile = config.age.secrets.ntfy-alerts-token.path;
};
}

34
services/ntfy/ntfy.nix Normal file
View File

@@ -0,0 +1,34 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "ntfy-sh" service_configs.zpool_ssds [
"/var/lib/private/ntfy-sh"
])
(lib.serviceFilePerms "ntfy-sh" [
"Z /var/lib/private/ntfy-sh 0700 ${config.services.ntfy-sh.user} ${config.services.ntfy-sh.group}"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.ntfy.domain;
port = service_configs.ports.private.ntfy.port;
})
];
services.ntfy-sh = {
enable = true;
settings = {
base-url = "https://${service_configs.ntfy.domain}";
listen-http = "127.0.0.1:${builtins.toString service_configs.ports.private.ntfy.port}";
behind-proxy = true;
auth-default-access = "deny-all";
enable-login = true;
enable-signup = false;
};
};
}

62
services/postgresql.nix Normal file
View File

@@ -0,0 +1,62 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
let
pgCheckpoint = pkgs.writeShellScript "pg-checkpoint" ''
# Flush PostgreSQL dirty buffers to disk before ZFS snapshot so the
# on-disk state is consistent and the snapshot is recoverable.
# On failure: log a warning but exit 0 so sanoid still takes the
# snapshot (an inconsistent snapshot beats no snapshot).
if ! ${pkgs.systemd}/bin/systemctl is-active --quiet postgresql.service; then
echo "postgresql is not running, skipping checkpoint" >&2
exit 0
fi
if ${pkgs.coreutils}/bin/timeout 120 \
${pkgs.util-linux}/bin/runuser -u postgres -- \
${lib.getExe' config.services.postgresql.package "psql"} \
-v ON_ERROR_STOP=1 -c "CHECKPOINT" 2>&1; then
echo "postgresql checkpoint completed"
else
echo "WARNING: postgresql checkpoint failed, snapshot may be inconsistent" >&2
fi
# Always exit 0 sanoid must run regardless
exit 0
'';
in
{
imports = [
(lib.serviceMountWithZpool "postgresql" service_configs.zpool_ssds [
config.services.postgresql.dataDir
])
(lib.serviceFilePerms "postgresql" [
"Z ${config.services.postgresql.dataDir} 0700 postgres postgres"
])
];
services.postgresql = {
enable = true;
package = pkgs.postgresql_16;
dataDir = service_configs.postgres.dataDir;
settings = {
# ZFS provides checksumming and atomic writes, making PostgreSQL's
# full_page_writes redundant. Disabling reduces write amplification
# and SSD wear on the zpool.
# Did this in conjunction with setting recordsize=8k
# on the zvolume this is on
full_page_writes = false;
};
};
# Run a PostgreSQL CHECKPOINT before sanoid snapshots so the on-disk
# state is consistent (required since full_page_writes = false).
systemd.services.sanoid.serviceConfig = {
ExecStartPre = lib.mkAfter [ "+${pgCheckpoint}" ];
TimeoutStartSec = lib.mkForce 300; # checkpoint can be slow with large txg_timeout
};
}

199
services/qbittorrent.nix Normal file
View File

@@ -0,0 +1,199 @@
{
pkgs,
config,
service_configs,
lib,
inputs,
...
}:
let
categoriesFile = pkgs.writeText "categories.json" (
builtins.toJSON (lib.mapAttrs (_: path: { save_path = path; }) service_configs.torrent.categories)
);
in
{
imports = [
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceMountWithZpool "qbittorrent" service_configs.zpool_ssds [
"${config.services.qbittorrent.profileDir}/qBittorrent"
])
(lib.vpnNamespaceOpenPort config.services.qbittorrent.webuiPort "qbittorrent")
(lib.serviceFilePerms "qbittorrent" [
# 0770: group (media) needs write to delete files during upgrades —
# Radarr/Sonarr must unlink the old file before placing the new one.
# Non-recursive (z not Z): UMask=0007 ensures new files get correct perms.
# A recursive Z rule would walk millions of files on the HDD pool at every boot.
"z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.SavePath} 0770 ${config.services.qbittorrent.user} ${service_configs.media_group}"
"z ${config.services.qbittorrent.serverConfig.Preferences.Downloads.TempPath} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
"Z ${config.services.qbittorrent.profileDir} 0700 ${config.services.qbittorrent.user} ${config.services.qbittorrent.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "torrent";
port = service_configs.ports.private.torrent.port;
auth = true;
vpn = true;
})
];
services.qbittorrent = {
enable = true;
webuiPort = service_configs.ports.private.torrent.port;
profileDir = "/var/lib/qBittorrent";
# Set the service group to 'media' so the systemd unit runs with media as
# the primary GID. Linux assigns new file ownership from the process's GID
# (set by systemd's Group= directive), not from /etc/passwd. Without this,
# downloads land as qbittorrent:qbittorrent (0700), blocking Radarr/Sonarr.
group = service_configs.media_group;
serverConfig.LegalNotice.Accepted = true;
serverConfig.Preferences = {
WebUI = {
AlternativeUIEnabled = true;
RootFolder = "${pkgs.vuetorrent}/share/vuetorrent";
# disable auth because we use caddy for auth
AuthSubnetWhitelist = "0.0.0.0/0";
AuthSubnetWhitelistEnabled = true;
};
Downloads = {
inherit (service_configs.torrent) SavePath TempPath;
};
};
serverConfig.BitTorrent = {
Session = {
MaxConnectionsPerTorrent = 100;
MaxUploadsPerTorrent = 50;
MaxConnections = -1;
MaxUploads = -1;
MaxActiveCheckingTorrents = 2;
# queueing
QueueingSystemEnabled = true;
MaxActiveDownloads = 15;
MaxActiveUploads = -1;
MaxActiveTorrents = -1;
IgnoreSlowTorrentsForQueueing = true;
GlobalUPSpeedLimit = 0;
GlobalDLSpeedLimit = 0;
# Alternate speed limits for when Jellyfin is streaming
AlternativeGlobalUPSpeedLimit = 500; # 500 KB/s when throttled
AlternativeGlobalDLSpeedLimit = 800; # 800 KB/s when throttled
IncludeOverheadInLimits = true;
GlobalMaxRatio = 7.0;
AddTrackersEnabled = true;
AdditionalTrackers = lib.concatStringsSep "\\n" (
lib.lists.filter (x: x != "") (
lib.strings.splitString "\n" (builtins.readFile "${inputs.trackerlist}/trackers_all.txt")
)
);
AnnounceToAllTrackers = true;
# idk why it also has to be specified here too?
inherit (config.services.qbittorrent.serverConfig.Preferences.Downloads) TempPath;
TempPathEnabled = true;
ConnectionSpeed = 200; # half-open connections/s; faster peer discovery
SaveResumeDataInterval = 300; # save resume data every 5 min (default 60s)
ResumeDataStorageType = "SQLite"; # SQLite is more efficient than legacy per-file .fastresume storage
# Automatic Torrent Management: use category save paths for new torrents
DisableAutoTMMByDefault = false;
DisableAutoTMMTriggers.CategorySavePathChanged = false;
DisableAutoTMMTriggers.DefaultSavePathChanged = false;
ChokingAlgorithm = "RateBased";
SeedChokingAlgorithm = "FastestUpload"; # unchoke peers we upload to fastest
PieceExtentAffinity = true;
SuggestMode = true;
# POSIX-compliant disk I/O: uses pread/pwrite instead of mmap.
# On ZFS, mmap forces data into BOTH ARC and Linux page cache (double-caching),
# wasting RAM. pread/pwrite goes only through ARC, maximizing its effectiveness.
DiskIOType = "Posix";
FilePoolSize = 500; # keep more files open to reduce open/close overhead
AioThreads = 24; # 6 cores * 4; better disk I/O parallelism
SendBufferLowWatermark = 512; # 512 KiB -- trigger reads sooner to prevent upload stalls
SendBufferWatermark = 3072; # 3 MiB -- matches high_performance_seed
SendBufferWatermarkFactor = 150; # percent -- matches high_performance_seed
};
Network = {
# traffic is routed through a vpn, we don't need
# port forwarding
PortForwardingEnabled = false;
};
Session.UseUPnP = false;
};
};
systemd.services.qbittorrent.serviceConfig = {
TimeoutStopSec = lib.mkForce 10;
# Default UMask=0022 creates files as 0644 (group read-only). With 0007,
# new files get 0660/0770 so the media group has read+write immediately
# instead of relying on the tmpfiles Z rule to fix permissions at restart.
UMask = lib.mkForce "0007";
};
# Pre-define qBittorrent categories with explicit save paths so every
# torrent routes to its category directory instead of the SavePath root.
systemd.tmpfiles.settings.qbittorrent-categories = {
"${config.services.qbittorrent.profileDir}/qBittorrent/config/categories.json"."L+" = {
argument = "${categoriesFile}";
user = config.services.qbittorrent.user;
group = config.services.qbittorrent.group;
mode = "1400";
};
};
# Ensure category directories exist with correct ownership before first use.
systemd.tmpfiles.rules = lib.mapAttrsToList (
_: path: "d ${path} 0770 ${config.services.qbittorrent.user} ${service_configs.media_group} -"
) service_configs.torrent.categories;
# Periodically checkpoint qBittorrent's SQLite WAL (Write-Ahead Log).
# qBittorrent holds a read transaction open for its entire lifetime,
# preventing SQLite's auto-checkpoint from running. The WAL grows
# unbounded (observed: 405 MB) and must be replayed on next startup,
# causing 10+ minute "internal preparations" hangs.
# A second sqlite3 connection can checkpoint concurrently and safely.
# See: https://github.com/qbittorrent/qBittorrent/issues/20433
systemd.services.qbittorrent-wal-checkpoint = {
description = "Checkpoint qBittorrent SQLite WAL";
after = [ "qbittorrent.service" ];
requires = [ "qbittorrent.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.sqlite}/bin/sqlite3 ${config.services.qbittorrent.profileDir}/qBittorrent/data/torrents.db 'PRAGMA wal_checkpoint(TRUNCATE);'";
User = config.services.qbittorrent.user;
Group = config.services.qbittorrent.group;
};
};
systemd.timers.qbittorrent-wal-checkpoint = {
description = "Periodically checkpoint qBittorrent SQLite WAL";
wantedBy = [ "timers.target" ];
timerConfig = {
OnUnitActiveSec = "4h";
OnBootSec = "30min";
RandomizedDelaySec = "10min";
};
};
users.users.${config.services.qbittorrent.user}.extraGroups = [
service_configs.media_group
];
}

68
services/soulseek.nix Normal file
View File

@@ -0,0 +1,68 @@
{
pkgs,
config,
lib,
service_configs,
username,
...
}:
{
imports = [
(lib.serviceMountWithZpool "slskd" "" [
service_configs.slskd.base
service_configs.slskd.downloads
service_configs.slskd.incomplete
])
(lib.serviceFilePerms "slskd" [
"Z ${service_configs.music_dir} 0750 ${username} music"
"Z ${service_configs.slskd.base} 0750 ${config.services.slskd.user} ${config.services.slskd.group}"
"Z ${service_configs.slskd.downloads} 0750 ${config.services.slskd.user} music"
"Z ${service_configs.slskd.incomplete} 0750 ${config.services.slskd.user} music"
])
(lib.mkCaddyReverseProxy {
subdomain = "soulseek";
port = service_configs.ports.private.soulseek_web.port;
})
];
users.groups."music" = { };
services.slskd = {
enable = true;
domain = null; # null so we don't use nginx reverse proxy
environmentFile = config.age.secrets.slskd_env.path;
settings = {
web = {
port = service_configs.ports.private.soulseek_web.port;
};
soulseek = {
# description = "smth idk";
listen_port = service_configs.ports.public.soulseek_listen.port;
};
shares = {
directories = [ service_configs.music_dir ];
};
global = {
download = {
slots = -1;
speed_limit = -1;
};
upload = {
slots = 4;
speed_limit = 2000;
};
};
};
};
users.users.${config.services.slskd.user}.extraGroups = [ "music" ];
users.users.${config.services.jellyfin.user}.extraGroups = [ "music" ];
users.users.${username}.extraGroups = [ "music" ];
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.soulseek_listen.port
];
}

38
services/ssh.nix Normal file
View File

@@ -0,0 +1,38 @@
{
config,
lib,
pkgs,
username,
...
}:
{
# Enable the OpenSSH daemon.
services.openssh = {
enable = true;
settings = {
AllowUsers = [
username
"root"
];
PasswordAuthentication = false;
PermitRootLogin = "yes"; # for deploying configs
};
};
systemd.tmpfiles.rules = [
"Z /etc/ssh 755 root root"
"Z /etc/ssh/ssh_host_* 600 root root"
];
users.users.${username}.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO4jL6gYOunUlUtPvGdML0cpbKSsPNqQ1jit4E7U1RyH" # laptop
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBJjT5QZ3zRDb+V6Em20EYpSEgPW5e/U+06uQGJdraxi" # desktop
];
# used for deploying configs to server
users.users.root.openssh.authorizedKeys.keys =
config.users.users.${username}.openssh.authorizedKeys.keys
++ [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC5ZYN6idL/w/mUIfPOH1i+Q/SQXuzAMQUEuWpipx1Pc ci-deploy@muffin"
];
}

57
services/syncthing.nix Normal file
View File

@@ -0,0 +1,57 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "syncthing" service_configs.zpool_ssds [
service_configs.syncthing.dataDir
service_configs.syncthing.signalBackupDir
service_configs.syncthing.grayjayBackupDir
])
(lib.serviceFilePerms "syncthing" [
"Z ${service_configs.syncthing.dataDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
"Z ${service_configs.syncthing.signalBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
"Z ${service_configs.syncthing.grayjayBackupDir} 0750 ${config.services.syncthing.user} ${config.services.syncthing.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "syncthing";
port = service_configs.ports.private.syncthing_gui.port;
auth = true;
})
];
services.syncthing = {
enable = true;
dataDir = service_configs.syncthing.dataDir;
guiAddress = "127.0.0.1:${toString service_configs.ports.private.syncthing_gui.port}";
overrideDevices = false;
overrideFolders = false;
settings = {
gui = {
insecureSkipHostcheck = true; # Allow access via reverse proxy
};
options = {
urAccepted = 1; # enable usage reporting
relaysEnabled = true;
};
};
};
# Open firewall ports for syncthing protocol
networking.firewall = {
allowedTCPPorts = [ service_configs.ports.public.syncthing_protocol.port ];
allowedUDPPorts = [
service_configs.ports.public.syncthing_discovery.port
service_configs.ports.public.syncthing_protocol.port
];
};
}

27
services/trilium.nix Normal file
View File

@@ -0,0 +1,27 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "trilium-server" service_configs.zpool_ssds [
(service_configs.services_dir + "/trilium")
])
(lib.mkCaddyReverseProxy {
subdomain = "notes";
port = service_configs.ports.private.trilium.port;
auth = true;
})
];
services.trilium-server = {
enable = true;
port = service_configs.ports.private.trilium.port;
host = "127.0.0.1";
dataDir = service_configs.trilium.dataDir;
};
}

22
services/ups.nix Normal file
View File

@@ -0,0 +1,22 @@
{
config,
lib,
pkgs,
...
}:
{
services.apcupsd = {
enable = true;
configText = ''
UPSTYPE usb
NISIP 127.0.0.1
BATTERYLEVEL 5 # shutdown after reaching 5% battery
MINUTES 5 # shutdown if estimated runtime on battery reaches 5 minutes
'';
hooks = {
# command to run when shutdown condition is met
doshutdown = "systemctl poweroff";
};
};
}

52
services/wg.nix Normal file
View File

@@ -0,0 +1,52 @@
{
pkgs,
config,
inputs,
...
}:
{
imports = [
inputs.vpn-confinement.nixosModules.default
];
# network namespace that is proxied through mullvad
vpnNamespaces.wg = {
enable = true;
wireguardConfigFile = config.age.secrets.wg0-conf.path;
accessibleFrom = [
# "192.168.0.0/24"
];
};
boot = {
# BBR congestion control handles variable-latency VPN connections much
# better than CUBIC by probing bandwidth continuously rather than
# reacting to packet loss.
kernelModules = [ "tcp_bbr" ];
kernel.sysctl = {
# Use BBR + fair queuing for smooth throughput through the WireGuard VPN
"net.core.default_qdisc" = "fq";
"net.ipv4.tcp_congestion_control" = "bbr";
# Disable slow-start after idle: prevents TCP from resetting window
# size on each burst cycle (the primary cause of the 0 -> 40 MB/s spikes)
"net.ipv4.tcp_slow_start_after_idle" = 0;
# Larger socket buffers to accommodate the VPN bandwidth-delay product
# (22ms RTT * target throughput). Current 2.5MB max is too small.
"net.core.rmem_max" = 16777216;
"net.core.wmem_max" = 16777216;
"net.ipv4.tcp_rmem" = "4096 87380 16777216";
"net.ipv4.tcp_wmem" = "4096 65536 16777216";
# Higher backlog for the large number of concurrent torrent connections
"net.core.netdev_max_backlog" = 5000;
# Faster cleanup of dead connections from torrent peer churn
"net.ipv4.tcp_fin_timeout" = 15; # default 60
"net.ipv4.tcp_tw_reuse" = 1;
};
};
networking.firewall.trustedInterfaces = [ "wg-br" ];
}