phase 2: promote services/, tests/, patches/, lib/, scripts/

This commit is contained in:
primary
2026-04-18 00:47:39 -04:00
parent 99e98e39b7
commit 999ed05d9f
86 changed files with 0 additions and 0 deletions

115
services/arr/arr-search.nix Normal file
View File

@@ -0,0 +1,115 @@
{
pkgs,
lib,
service_configs,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
radarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
sonarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
curl = "${pkgs.curl}/bin/curl";
jq = "${pkgs.jq}/bin/jq";
# Max items to search per cycle per category (missing + cutoff) per app
maxPerCycle = 5;
searchScript = pkgs.writeShellScript "arr-search" ''
set -euo pipefail
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
search_radarr() {
local endpoint="$1"
local label="$2"
local ids
ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $RADARR_KEY" \
"${radarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending" \
| ${jq} -r '.records[].id // empty')
if [ -z "$ids" ]; then
echo "radarr: no $label items"
return
fi
local id_array
id_array=$(echo "$ids" | ${jq} -Rs '[split("\n") | .[] | select(. != "") | tonumber]')
echo "radarr: searching $label: $id_array"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $RADARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${radarrUrl}/api/v3/command" \
-d "{\"name\": \"MoviesSearch\", \"movieIds\": $id_array}" > /dev/null
}
search_sonarr() {
local endpoint="$1"
local label="$2"
local series_ids
series_ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $SONARR_KEY" \
"${sonarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending&includeSeries=true" \
| ${jq} -r '[.records[].seriesId] | unique | .[] // empty')
if [ -z "$series_ids" ]; then
echo "sonarr: no $label items"
return
fi
# search per series (sonarr searches by series, not episode)
for sid in $series_ids; do
echo "sonarr: searching $label series $sid"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $SONARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${sonarrUrl}/api/v3/command" \
-d "{\"name\": \"SeriesSearch\", \"seriesId\": $sid}" > /dev/null
done
}
echo "=== arr-search $(date -Iseconds) ==="
search_radarr "missing" "missing"
search_radarr "cutoff" "cutoff-unmet"
search_sonarr "missing" "missing"
search_sonarr "cutoff" "cutoff-unmet"
echo "=== done ==="
'';
in
{
systemd.services.arr-search = {
description = "Search for missing and cutoff-unmet media in Radarr/Sonarr";
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${searchScript}"; # + prefix: runs as root to read API keys from config.xml
TimeoutSec = 300;
};
};
systemd.timers.arr-search = {
description = "Periodically search for missing and cutoff-unmet media";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 03:00:00"; # daily at 3 AM
Persistent = true; # run on boot if missed
RandomizedDelaySec = "30m";
};
};
}

34
services/arr/bazarr.nix Normal file
View File

@@ -0,0 +1,34 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_ssds [
service_configs.bazarr.dataDir
])
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "bazarr" [
"Z ${service_configs.bazarr.dataDir} 0700 ${config.services.bazarr.user} ${config.services.bazarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "bazarr";
port = service_configs.ports.private.bazarr.port;
auth = true;
})
];
services.bazarr = {
enable = true;
listenPort = service_configs.ports.private.bazarr.port;
};
users.users.${config.services.bazarr.user}.extraGroups = [
service_configs.media_group
];
}

153
services/arr/init.nix Normal file
View File

@@ -0,0 +1,153 @@
{ config, service_configs, ... }:
{
services.arrInit = {
prowlarr = {
enable = true;
serviceName = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
dataDir = service_configs.prowlarr.dataDir;
apiVersion = "v1";
networkNamespacePath = "/run/netns/wg";
networkNamespaceService = "wg";
# Guarantee critical config.xml elements before startup. Prowlarr has a
# history of losing <Port> from config.xml, causing the service to run
# without binding any socket. See arr-init's configXml for details.
configXml = {
Port = service_configs.ports.private.prowlarr.port;
BindAddress = "*";
EnableSsl = false;
};
# Prowlarr runs in the wg netns; Sonarr/Radarr in the host netns.
# From host netns, Prowlarr is reachable at the wg namespace address,
# not at localhost (which resolves to the host's own netns).
# Health checks can now run — the reverse-connect is reachable.
healthChecks = true;
syncedApps = [
{
name = "Sonarr";
implementation = "Sonarr";
configContract = "SonarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.sonarr.port}";
apiKeyFrom = "${service_configs.sonarr.dataDir}/config.xml";
serviceName = "sonarr";
}
{
name = "Radarr";
implementation = "Radarr";
configContract = "RadarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.radarr.port}";
apiKeyFrom = "${service_configs.radarr.dataDir}/config.xml";
serviceName = "radarr";
}
];
};
sonarr = {
enable = true;
serviceName = "sonarr";
port = service_configs.ports.private.sonarr.port;
dataDir = service_configs.sonarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.sonarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.tvDir ];
naming = {
renameEpisodes = true;
replaceIllegalCharacters = true;
standardEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
dailyEpisodeFormat = "{Series Title} - {Air-Date} - {Episode Title} {Quality Full}";
animeEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
seasonFolderFormat = "Season {season}";
seriesFolderFormat = "{Series Title}";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
tvCategory = "tvshows";
};
}
];
};
radarr = {
enable = true;
serviceName = "radarr";
port = service_configs.ports.private.radarr.port;
dataDir = service_configs.radarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.radarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.moviesDir ];
naming = {
renameMovies = true;
replaceIllegalCharacters = true;
standardMovieFormat = "{Movie Title} ({Release Year}) {Quality Full}";
movieFolderFormat = "{Movie Title} ({Release Year})";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
movieCategory = "movies";
};
}
];
};
};
services.bazarrInit = {
enable = true;
dataDir = "/var/lib/bazarr";
port = service_configs.ports.private.bazarr.port;
sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
};
services.jellyseerrInit = {
enable = true;
configDir = service_configs.jellyseerr.configDir;
radarr = {
profileName = "Remux + WEB 2160p";
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
sonarr = {
profileName = "WEB-2160p";
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
};
}

View File

@@ -0,0 +1,43 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyseerr" service_configs.zpool_ssds [
service_configs.jellyseerr.configDir
])
(lib.serviceFilePerms "jellyseerr" [
"Z ${service_configs.jellyseerr.configDir} 0700 jellyseerr jellyseerr"
])
(lib.mkCaddyReverseProxy {
subdomain = "jellyseerr";
port = service_configs.ports.private.jellyseerr.port;
})
];
services.jellyseerr = {
enable = true;
port = service_configs.ports.private.jellyseerr.port;
configDir = service_configs.jellyseerr.configDir;
};
systemd.services.jellyseerr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "jellyseerr";
Group = "jellyseerr";
ReadWritePaths = [ service_configs.jellyseerr.configDir ];
};
users.users.jellyseerr = {
isSystemUser = true;
group = "jellyseerr";
home = service_configs.jellyseerr.configDir;
};
users.groups.jellyseerr = { };
}

60
services/arr/prowlarr.nix Normal file
View File

@@ -0,0 +1,60 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "prowlarr" service_configs.zpool_ssds [
service_configs.prowlarr.dataDir
])
(lib.vpnNamespaceOpenPort service_configs.ports.private.prowlarr.port "prowlarr")
(lib.serviceFilePerms "prowlarr" [
"Z ${service_configs.prowlarr.dataDir} 0700 prowlarr prowlarr"
])
(lib.mkCaddyReverseProxy {
subdomain = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
auth = true;
vpn = true;
})
];
services.prowlarr = {
enable = true;
dataDir = service_configs.prowlarr.dataDir;
settings.server.port = service_configs.ports.private.prowlarr.port;
};
# The upstream prowlarr module uses DynamicUser=true which is incompatible
# with ZFS-backed persistent storage — the dynamic user can't access files
# on the ZFS mount. Override with a static user to match sonarr/radarr.
users.users.prowlarr = {
isSystemUser = true;
group = "prowlarr";
home = service_configs.prowlarr.dataDir;
};
users.groups.prowlarr = { };
# The upstream prowlarr module hardcodes root:root in tmpfiles for custom dataDirs
# (systemd.tmpfiles.settings."10-prowlarr"), which gets applied by
# systemd-tmpfiles-setup.service on every boot/deploy, resetting the directory
# ownership and making Prowlarr unable to access its SQLite databases.
# Override to use the correct user as we disable DynamicUser
systemd.tmpfiles.settings."10-prowlarr".${service_configs.prowlarr.dataDir}.d = lib.mkForce {
user = "prowlarr";
group = "prowlarr";
mode = "0700";
};
systemd.services.prowlarr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "prowlarr";
Group = "prowlarr";
StateDirectory = lib.mkForce "";
ExecStart = lib.mkForce "${lib.getExe pkgs.prowlarr} -nobrowser -data=${service_configs.prowlarr.dataDir}";
};
}

36
services/arr/radarr.nix Normal file
View File

@@ -0,0 +1,36 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "radarr" service_configs.zpool_ssds [
service_configs.radarr.dataDir
])
(lib.serviceMountWithZpool "radarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "radarr" [
"Z ${service_configs.radarr.dataDir} 0700 ${config.services.radarr.user} ${config.services.radarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "radarr";
port = service_configs.ports.private.radarr.port;
auth = true;
})
];
services.radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
settings.server.port = service_configs.ports.private.radarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.radarr.user}.extraGroups = [
service_configs.media_group
];
}

224
services/arr/recyclarr.nix Normal file
View File

@@ -0,0 +1,224 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
configPath = "/var/lib/recyclarr/config.json";
# Runs as root (via + prefix) after the NixOS module writes config.json.
# Extracts API keys from radarr/sonarr config.xml and injects them via jq.
injectApiKeys = pkgs.writeShellScript "recyclarr-inject-api-keys" ''
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
${pkgs.jq}/bin/jq \
--arg rk "$RADARR_KEY" \
--arg sk "$SONARR_KEY" \
'.radarr.movies.api_key = $rk | .sonarr.series.api_key = $sk' \
${configPath} > ${configPath}.tmp
mv ${configPath}.tmp ${configPath}
chown recyclarr:recyclarr ${configPath}
'';
in
{
imports = [
(lib.serviceMountWithZpool "recyclarr" service_configs.zpool_ssds [
service_configs.recyclarr.dataDir
])
];
systemd.tmpfiles.rules = [
"d ${service_configs.recyclarr.dataDir} 0755 recyclarr recyclarr -"
];
services.recyclarr = {
enable = true;
command = "sync";
schedule = "daily";
user = "recyclarr";
group = "recyclarr";
configuration = {
radarr.movies = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "radarr-quality-definition-movie"; }
{ template = "radarr-quality-profile-remux-web-2160p"; }
{ template = "radarr-custom-formats-remux-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "Remux + WEB 2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "Remux-2160p";
until_score = 10000;
};
qualities = [
{ name = "Remux-2160p"; }
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Remux-1080p"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "923b6abef9b17f937fab56cfcf89e1f1" ];
assign_scores_to = [
{ name = "Remux + WEB 2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "bfd8eb01832d646a0a89c4deb46f8564" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = -10000;
}
];
}
];
};
sonarr.series = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "sonarr-quality-definition-series"; }
{ template = "sonarr-v4-quality-profile-web-2160p"; }
{ template = "sonarr-v4-custom-formats-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "WEB-2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "WEB/Bluray";
until_score = 10000;
};
qualities = [
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Bluray-1080p Remux"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "9b27ab6498ec0f31a3353992e19434ca" ];
assign_scores_to = [
{ name = "WEB-2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "23297a736ca77c0fc8e70f8edd7ee56c" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = -10000;
}
];
}
];
};
};
};
# Trigger immediate sync on deploy when recyclarr config changes.
# restartTriggers on the oneshot service are unreliable (systemd may
# no-op a restart of an inactive oneshot). Instead, embed a config
# hash in the timer unit -- NixOS restarts changed timers reliably,
# and OnActiveSec fires the sync within seconds.
systemd.timers.recyclarr = {
timerConfig.OnActiveSec = "5s";
unitConfig.X-ConfigHash = builtins.hashString "sha256" (
builtins.toJSON config.services.recyclarr.configuration
);
};
systemd.services.recyclarr = {
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig.ExecStartPre = [ "+${injectApiKeys}" ];
};
}

42
services/arr/sonarr.nix Normal file
View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_ssds [
service_configs.sonarr.dataDir
])
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "sonarr" [
"Z ${service_configs.sonarr.dataDir} 0700 ${config.services.sonarr.user} ${config.services.sonarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "sonarr";
port = service_configs.ports.private.sonarr.port;
auth = true;
})
];
systemd.tmpfiles.rules = [
"d /torrents/media 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.tvDir} 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.moviesDir} 2775 root ${service_configs.media_group} -"
];
services.sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
settings.server.port = service_configs.ports.private.sonarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.sonarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
systemd.services.torrent-audit = {
description = "Audit qBittorrent for unmanaged and abandoned upgrade torrents";
after = [
"network-online.target"
"sonarr.service"
"radarr.service"
"qbittorrent.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${
pkgs.python3.withPackages (
ps: with ps; [
pyarr
qbittorrent-api
]
)
}/bin/python ${./torrent-audit.py}";
TimeoutSec = 300;
};
environment = {
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
RADARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
RADARR_CONFIG = "${service_configs.radarr.dataDir}/config.xml";
SONARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
SONARR_CONFIG = "${service_configs.sonarr.dataDir}/config.xml";
CATEGORIES = lib.concatStringsSep "," (builtins.attrNames service_configs.torrent.categories);
TAG_TORRENTS = "true";
};
};
}

View File

@@ -0,0 +1,382 @@
#!/usr/bin/env python3
"""
Audit qBittorrent torrents against Radarr/Sonarr.
Reports two categories:
UNMANAGED -- torrents in qBittorrent that no *arr service has ever touched.
These were added manually or by some other tool.
ABANDONED -- torrents that *arr grabbed but later replaced with a better
version. The old torrent is still seeding while the library
points to the new one.
Abandoned detection uses API cross-referencing (not filesystem hardlinks) and
verifies against the *arr's current file state:
1. HISTORY -- group imports by content unit (movieId / episodeId); the
most recent import is the keeper, older ones are candidates.
2. CURRENT -- verify against the *arr's active file mapping.
"""
import logging
import os
import sys
from collections import defaultdict
from xml.etree import ElementTree
import qbittorrentapi
from pyarr import RadarrAPI, SonarrAPI
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
stream=sys.stderr,
)
log = logging.getLogger(__name__)
def get_api_key(config_path: str) -> str:
tree = ElementTree.parse(config_path)
return tree.find(".//ApiKey").text
def paginate(arr_client, endpoint: str, page_size: int = 1000):
method = getattr(arr_client, f"get_{endpoint}")
page = 1
while True:
data = method(page=page, page_size=page_size)
yield from data["records"]
if page * page_size >= data["totalRecords"]:
break
page += 1
def get_qbit_torrents(qbit_client, category: str) -> dict[str, dict]:
torrents = qbit_client.torrents_info(category=category)
return {t["hash"].upper(): t for t in torrents}
def gib(size_bytes: int) -> str:
return f"{size_bytes / 1073741824:.1f}"
# ---------------------------------------------------------------------------
# Collect all known hashes from *arr history + queue
# ---------------------------------------------------------------------------
def collect_all_known_hashes(arr_client, page_size: int = 1000) -> set[str]:
hashes = set()
for endpoint in ("queue", "history"):
for rec in paginate(arr_client, endpoint, page_size):
did = (rec.get("downloadId") or "").upper()
if did:
hashes.add(did)
return hashes
# ---------------------------------------------------------------------------
# Unmanaged: torrents with hashes not in any *arr history/queue
# ---------------------------------------------------------------------------
def find_unmanaged(qbit_torrents: dict, known_hashes: set) -> list[dict]:
results = []
for uhash, torrent in qbit_torrents.items():
if uhash not in known_hashes:
results.append(torrent)
return sorted(results, key=lambda t: t["added_on"])
# ---------------------------------------------------------------------------
# Abandoned movies: group imports by movieId, older = abandoned
# ---------------------------------------------------------------------------
def find_movie_abandoned(radarr, qbit_movies):
log.info("Analysing Radarr import history ...")
imports_by_movie = defaultdict(list)
for rec in paginate(radarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
if not did:
continue
mid = rec.get("movieId")
if not mid:
continue
imports_by_movie[mid].append(
{"downloadId": did, "date": rec["date"]}
)
# Identify keeper (latest) and abandoned (older) hashes per movie.
abandoned_hashes: set[str] = set()
keeper_hashes: set[str] = set()
hash_to_movie: dict[str, int] = {}
for mid, events in imports_by_movie.items():
ordered = sorted(events, key=lambda e: e["date"])
keeper_hashes.add(ordered[-1]["downloadId"])
for e in ordered[:-1]:
abandoned_hashes.add(e["downloadId"])
hash_to_movie[e["downloadId"]] = mid
# A hash that is a keeper for *any* movie must not be deleted.
abandoned_hashes -= keeper_hashes
log.info("Fetching Radarr current movie state ...")
radarr_movies = {m["id"]: m for m in radarr.get_movie()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_movies.get(ahash)
if torrent is None:
continue
mid = hash_to_movie.get(ahash)
movie = radarr_movies.get(mid) if mid else None
mf = (movie or {}).get("movieFile") or {}
current_quality = (mf.get("quality") or {}).get("quality", {}).get("name", "?")
current_size = mf.get("size", 0)
status = "SAFE"
notes = []
if not movie or not movie.get("hasFile"):
notes.append("movie removed or has no file in Radarr")
status = "REVIEW"
elif torrent["size"] > current_size * 1.05:
notes.append(
f"abandoned is larger than current "
f"({gib(torrent['size'])} > {gib(current_size)} GiB)"
)
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
"current_quality": current_quality,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Abandoned TV: group imports by episodeId, a hash is abandoned only when
# it is NOT the latest import for ANY episode it covers.
# ---------------------------------------------------------------------------
def find_tv_abandoned(sonarr, qbit_tvshows):
log.info("Analysing Sonarr import history ...")
episode_imports = defaultdict(list)
all_download_ids: set[str] = set()
hash_to_series: dict[str, int] = {}
for rec in paginate(sonarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
eid = rec.get("episodeId")
if not did or not eid:
continue
episode_imports[eid].append({"downloadId": did, "date": rec["date"]})
all_download_ids.add(did)
sid = rec.get("seriesId")
if sid:
hash_to_series[did] = sid
# A hash is "active" if it is the latest import for *any* episode.
active_hashes: set[str] = set()
for events in episode_imports.values():
latest = max(events, key=lambda e: e["date"])
active_hashes.add(latest["downloadId"])
abandoned_hashes = all_download_ids - active_hashes
log.info("Fetching Sonarr current series state ...")
current_series = {s["id"] for s in sonarr.get_series()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_tvshows.get(ahash)
if torrent is None:
continue
status = "SAFE"
notes = []
sid = hash_to_series.get(ahash)
if sid and sid not in current_series:
notes.append("series removed from Sonarr")
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Report
# ---------------------------------------------------------------------------
def print_section(torrents, show_status=False):
if not torrents:
print(" (none)\n")
return
total_size = sum(t["size"] for t in torrents)
for t in torrents:
prefix = f"[{t['status']:6s}] " if show_status else " "
print(f" {prefix}{t['name']}")
extra = f"{gib(t['size'])} GiB | {t['state']}"
print(f" {' ' * len(prefix)}{extra}")
for note in t.get("notes", []):
print(f" {' ' * len(prefix)}** {note}")
print()
if show_status:
safe = [t for t in torrents if t["status"] == "SAFE"]
review = [t for t in torrents if t["status"] == "REVIEW"]
print(
f" total={len(torrents)} ({gib(total_size)} GiB) | "
f"safe={len(safe)} | review={len(review)}"
)
else:
print(f" total={len(torrents)} ({gib(total_size)} GiB)")
print()
AUDIT_TAGS = {"audit:unmanaged", "audit:abandoned-safe", "audit:abandoned-review"}
def tag_torrents(qbit_client, qbit_torrents, all_known, all_abandoned):
log.info("Tagging torrents ...")
abandoned_by_hash = {t["hash"].upper(): t for t in all_abandoned}
all_hashes = []
for torrents in qbit_torrents.values():
all_hashes.extend(torrents.keys())
for h in all_hashes:
current_tags = set()
torrent_info = None
for torrents in qbit_torrents.values():
if h in torrents:
torrent_info = torrents[h]
break
if not torrent_info:
continue
existing_tags = {t.strip() for t in torrent_info.get("tags", "").split(",") if t.strip()}
existing_audit_tags = existing_tags & AUDIT_TAGS
if h in abandoned_by_hash:
status = abandoned_by_hash[h]["status"]
desired = "audit:abandoned-safe" if status == "SAFE" else "audit:abandoned-review"
elif h not in all_known:
desired = "audit:unmanaged"
else:
desired = None
tags_to_remove = existing_audit_tags - ({desired} if desired else set())
tags_to_add = ({desired} if desired else set()) - existing_audit_tags
low_hash = torrent_info["hash"]
for tag in tags_to_remove:
qbit_client.torrents_remove_tags(tags=tag, torrent_hashes=low_hash)
for tag in tags_to_add:
qbit_client.torrents_add_tags(tags=tag, torrent_hashes=low_hash)
log.info("Tagging complete")
def main():
qbit_url = os.environ["QBITTORRENT_URL"]
radarr_url = os.environ["RADARR_URL"]
radarr_config = os.environ["RADARR_CONFIG"]
sonarr_url = os.environ["SONARR_URL"]
sonarr_config = os.environ["SONARR_CONFIG"]
categories = os.environ.get("CATEGORIES", "tvshows,movies,anime").split(",")
radarr_key = get_api_key(radarr_config)
sonarr_key = get_api_key(sonarr_config)
radarr = RadarrAPI(radarr_url, radarr_key)
sonarr = SonarrAPI(sonarr_url, sonarr_key)
qbit = qbittorrentapi.Client(host=qbit_url)
log.info("Getting qBittorrent state ...")
qbit_torrents = {cat: get_qbit_torrents(qbit, cat) for cat in categories}
for cat, torrents in qbit_torrents.items():
log.info(" %s: %d torrents", cat, len(torrents))
log.info("Collecting known hashes from Sonarr ...")
sonarr_hashes = collect_all_known_hashes(sonarr)
log.info(" %d unique hashes", len(sonarr_hashes))
log.info("Collecting known hashes from Radarr ...")
radarr_hashes = collect_all_known_hashes(radarr)
log.info(" %d unique hashes", len(radarr_hashes))
all_known = sonarr_hashes | radarr_hashes
# -- Unmanaged --
print("\n========== UNMANAGED TORRENTS ==========\n")
for cat in categories:
unmanaged = find_unmanaged(qbit_torrents[cat], all_known)
print(f"--- {cat} ({len(unmanaged)} unmanaged / {len(qbit_torrents[cat])} total) ---\n")
print_section(unmanaged)
# -- Abandoned --
print("========== ABANDONED UPGRADE LEFTOVERS ==========\n")
movie_abandoned = find_movie_abandoned(
radarr, qbit_torrents.get("movies", {})
)
print(f"--- movies ({len(movie_abandoned)} abandoned) ---\n")
print_section(movie_abandoned, show_status=True)
tv_abandoned = find_tv_abandoned(
sonarr, qbit_torrents.get("tvshows", {})
)
print(f"--- tvshows ({len(tv_abandoned)} abandoned) ---\n")
print_section(tv_abandoned, show_status=True)
# -- Summary --
all_abandoned = movie_abandoned + tv_abandoned
safe = [t for t in all_abandoned if t["status"] == "SAFE"]
print("=" * 50)
print(
f"ABANDONED: {len(all_abandoned)} total ({len(safe)} safe to delete)"
)
print(f"SAFE TO RECLAIM: {gib(sum(t['size'] for t in safe))} GiB")
# -- Tagging --
if os.environ.get("TAG_TORRENTS", "").lower() in ("1", "true", "yes"):
tag_torrents(qbit, qbit_torrents, all_known, all_abandoned)
if __name__ == "__main__":
main()