Add 'legacy/server-config/' from commit '4bc5d57fa69a393877e7019d7673ceb33c3ab4b4'

git-subtree-dir: legacy/server-config
git-subtree-mainline: dc481c24b0
git-subtree-split: 4bc5d57fa6
This commit is contained in:
primary
2026-04-18 00:45:33 -04:00
136 changed files with 12893 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
# Do not edit this file. To specify the files to encrypt, create your own
# .gitattributes file in the directory where your files are.
* !filter !diff
*.gpg binary

3
legacy/server-config/.gitattributes vendored Normal file
View File

@@ -0,0 +1,3 @@
secrets/** filter=git-crypt diff=git-crypt
usb-secrets/usb-secrets-key* filter=git-crypt diff=git-crypt

View File

@@ -0,0 +1,60 @@
name: Build and Deploy
on:
push:
branches: [main]
jobs:
deploy:
runs-on: nix
env:
GIT_SSH_COMMAND: "ssh -i /run/agenix/ci-deploy-key -o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts"
steps:
- uses: https://github.com/actions/checkout@v4
with:
fetch-depth: 0
- name: Unlock git-crypt
run: |
git-crypt unlock /run/agenix/git-crypt-key-server-config
- name: Build NixOS configuration
run: |
nix build .#nixosConfigurations.muffin.config.system.build.toplevel -L
- name: Deploy via deploy-rs
run: |
eval $(ssh-agent -s)
ssh-add /run/agenix/ci-deploy-key
nix run github:serokell/deploy-rs -- .#muffin --skip-checks --ssh-opts="-o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts"
- name: Health check
run: |
sleep 10
ssh -i /run/agenix/ci-deploy-key -o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts root@server-public \
"systemctl is-active gitea && systemctl is-active caddy && systemctl is-active continuwuity && systemctl is-active coturn"
- name: Notify success
if: success()
run: |
TOPIC=$(cat /run/agenix/ntfy-alerts-topic | tr -d '[:space:]')
TOKEN=$(cat /run/agenix/ntfy-alerts-token | tr -d '[:space:]')
curl -sf -o /dev/null -X POST \
"https://ntfy.sigkill.computer/$TOPIC" \
-H "Authorization: Bearer $TOKEN" \
-H "Title: [muffin] Deploy succeeded" \
-H "Priority: default" \
-H "Tags: white_check_mark" \
-d "server-config deployed from commit ${GITHUB_SHA::8}"
- name: Notify failure
if: failure()
run: |
TOPIC=$(cat /run/agenix/ntfy-alerts-topic 2>/dev/null | tr -d '[:space:]')
TOKEN=$(cat /run/agenix/ntfy-alerts-token 2>/dev/null | tr -d '[:space:]')
curl -sf -o /dev/null -X POST \
"https://ntfy.sigkill.computer/$TOPIC" \
-H "Authorization: Bearer $TOKEN" \
-H "Title: [muffin] Deploy FAILED" \
-H "Priority: urgent" \
-H "Tags: rotating_light" \
-d "server-config deploy failed at commit ${GITHUB_SHA::8}" || true

1
legacy/server-config/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/result

View File

@@ -0,0 +1,144 @@
# AGENTS.md - server-config (NixOS server "muffin")
## Overview
NixOS flake-based server configuration for host **muffin** (deployed to `root@server-public`).
Uses deploy-rs for remote deployment, disko for disk management, impermanence (tmpfs root),
agenix for secrets, lanzaboote for secure boot, and ZFS for data storage.
## Target Hardware
- **CPU**: AMD Ryzen 5 5600X (6C/12T, Zen 3 / `znver3`)
- **RAM**: 64 GB DDR4, no swap
- **Motherboard**: ASRock B550M Pro4
- **Boot drive**: WD_BLACK SN770 1TB NVMe (f2fs: 20G /persistent, 911G /nix; root is tmpfs)
- **SSD pool `tank`**: 4x 2TB SATA SSDs (raidz2) -- services, backups, music, misc
- **HDD pool `hdds`**: 4x 18TB Seagate Exos X18 (raidz1)-- torrents
- Connected via esata to external enclosure
- **USB**: 8GB VFAT drive mounted at /mnt/usb-secrets (agenix identity key)
- **GPU**: Intel (integrated, xe driver) -- used for Jellyfin hardware transcoding
- **NIC**: enp4s0 (static 192.168.1.50/24)
## Build / Deploy / Test Commands
```bash
# Format code (nixfmt-tree)
nix fmt
# Build the system configuration (check for eval errors)
nix build .#nixosConfigurations.muffin.config.system.build.toplevel -L
# Deploy to server
nix run .#deploy -- .#muffin
# Run ALL tests (NixOS VM tests, takes a long time)
nix build .#packages.x86_64-linux.tests -L
# Run a SINGLE test by name (preferred during development)
nix build .#test-zfsTest -L
nix build .#test-testTest -L
nix build .#test-fail2banSshTest -L
nix build .#test-ntfyAlertsTest -L
nix build .#test-filePermsTest -L
# Pattern: nix build .#test-<testName> -L
# Test names are defined in tests/tests.nix (keys of the returned attrset)
# Check flake outputs (list what's available)
nix flake show
# Evaluate without building (fast syntax/eval check)
nix eval .#nixosConfigurations.muffin.config.system.build.toplevel --no-build 2>&1 | head -5
```
## Code Style
### Nix Formatting
- **Formatter**: `nixfmt-tree` (declared in flake.nix). Always run `nix fmt` before committing.
- **Indentation**: 2 spaces (enforced by nixfmt-tree).
### Module Pattern
Every `.nix` file is a function taking an attrset with named args and `...`:
```nix
{
config,
lib,
pkgs,
service_configs,
...
}:
{
# module body
}
```
- Function args on separate lines, one per line, with trailing comma.
- Opening brace on its own line for multi-line arg lists.
- Use `service_configs` (from `service-configs.nix`) for all ports, paths, domains -- never hardcode.
### Service File Convention
Each service file in `services/` follows this structure:
1. `imports` block with `lib.serviceMountWithZpool` and optionally `lib.serviceFilePerms`
2. Service configuration (`services.<name> = { ... }`)
3. Caddy reverse proxy vhost (`services.caddy.virtualHosts."subdomain.${service_configs.https.domain}"`)
4. Firewall rules if needed (`networking.firewall.allowed{TCP,UDP}Ports`)
5. fail2ban jail if the service has authentication (`services.fail2ban.jails.<name>`)
### Custom Lib Functions (modules/lib.nix)
- `lib.serviceMountWithZpool serviceName zpoolName [dirs]` -- ensures ZFS datasets are mounted before service starts, validates pool membership
- `lib.serviceFilePerms serviceName [tmpfilesRules]` -- sets file permissions via systemd-tmpfiles before service starts
- `lib.optimizePackage pkg` -- applies `-O3 -march=znver3 -mtune=znver3` compiler flags
- `lib.vpnNamespaceOpenPort port serviceName` -- confines service to WireGuard VPN namespace
### Naming Conventions
- **Files**: lowercase with hyphens (`jellyfin-qbittorrent-monitor.nix`)
- **Test names**: camelCase with `Test` suffix in `tests/tests.nix` (`fail2banSshTest`, `zfsTest`)
- **Ports**: all declared in `service-configs.nix` under `ports.*`, referenced as `service_configs.ports.<name>`
- **ZFS datasets**: `tank/services/<name>` for SSD-backed, `hdds/services/<name>` for HDD-backed
- **Commit messages**: terse, lowercase; prefix with service/module name when scoped (`caddy: add redirect`, `zfs: remove unneeded options`). Generic changes use `update` or short description.
### Secrets
- **git-crypt**: `secrets/` directory and `usb-secrets/usb-secrets-key*` are encrypted (see `.gitattributes`)
- **agenix**: secrets declared in `modules/age-secrets.nix`, decrypted at runtime to `/run/agenix/`
- **Identity**: USB drive at `/mnt/usb-secrets/usb-secrets-key`
- **Encrypting new secrets**: The agenix identity is an SSH private key at `usb-secrets/usb-secrets-key` (git-crypt encrypted). To encrypt a new secret, use the SSH public key directly with `age -R`:
```bash
age -R <(ssh-keygen -y -f usb-secrets/usb-secrets-key) -o secrets/<name>.age /path/to/plaintext
```
- **DO NOT use `ssh-to-age`**. Using `ssh-to-age` to derive a native age public key and then encrypting with `age -r age1...` produces `X25519` recipient stanzas. The SSH private key identity on the server can only decrypt `ssh-ed25519` stanzas. This mismatch causes `age: error: no identity matched any of the recipients` at deploy time. Always use `age -R` with the SSH public key directly.
- Never read or commit plaintext secrets. Never log secret values.
### Important Patterns
- **Impermanence**: Root `/` is tmpfs. Only `/persistent`, `/nix`, and ZFS mounts survive reboots. Any new persistent state must be declared in `modules/impermanence.nix`.
- **Port uniqueness**: `flake.nix` has an assertion that all ports in `service_configs.ports` are unique. Always add new ports there. Make sure to put them in the specific "Public" and "Private" sections that are seperated by comments.
- **Hugepages**: Services needing large pages declare their budget in `service-configs.nix` under `hugepages_2m.services`. The kernel sysctl is set automatically from the total.
- **Domain**: Primary domain is `sigkill.computer`. Old domain `gardling.com` redirects automatically.
- **Hardened kernel**: Uses `_hardened` kernel. Security-sensitive defaults apply.
- **PostgreSQL as central database**: All services that support PostgreSQL MUST use it instead of embedded databases (H2, SQLite, etc.). Connect via Unix socket with peer auth when possible (JDBC services can use junixsocket). The PostgreSQL instance is declared in `services/postgresql.nix` with ZFS-backed storage. Use `ensureDatabases`/`ensureUsers` to auto-create databases and roles.
### Test Pattern
Tests use `pkgs.testers.runNixOSTest` (NixOS VM tests):
```nix
{ config, lib, pkgs, ... }:
pkgs.testers.runNixOSTest {
name = "descriptive-test-name";
nodes.machine = { pkgs, ... }: {
imports = [ /* modules under test */ ];
# VM config
};
testScript = ''
start_all()
machine.wait_for_unit("multi-user.target")
# Python test script using machine.succeed/machine.fail
'';
}
```
- Register new tests in `tests/tests.nix` with `handleTest ./filename.nix`
- Tests needing the overlay should use `pkgs.appendOverlays [ (import ../modules/overlays.nix) ]`
- Test scripts are Python; use `machine.succeed(...)`, `machine.fail(...)`, `assert`, `subtest`
## SSH Access
```bash
ssh root@server-public # deploy user
ssh primary@server-public # normal user (doas instead of sudo)
```

View File

@@ -0,0 +1,328 @@
{
config,
lib,
pkgs,
hostname,
username,
eth_interface,
service_configs,
options,
...
}:
{
imports = [
./modules/hardware.nix
./modules/zfs.nix
./modules/impermanence.nix
./modules/usb-secrets.nix
./modules/age-secrets.nix
./modules/secureboot.nix
./modules/no-rgb.nix
./modules/security.nix
./modules/ntfy-alerts.nix
./modules/power.nix
./services/postgresql.nix
./services/jellyfin
./services/caddy
./services/immich.nix
./services/gitea.nix
./services/gitea-actions-runner.nix
./services/minecraft.nix
./services/wg.nix
./services/qbittorrent.nix
./services/bitmagnet.nix
./services/arr/prowlarr.nix
./services/arr/sonarr.nix
./services/arr/radarr.nix
./services/arr/bazarr.nix
./services/arr/jellyseerr.nix
./services/arr/recyclarr.nix
./services/arr/arr-search.nix
./services/arr/torrent-audit.nix
./services/arr/init.nix
./services/soulseek.nix
# ./services/llama-cpp.nix
./services/trilium.nix
./services/ups.nix
./services/grafana
./services/bitwarden.nix
./services/firefox-syncserver.nix
./services/matrix
./services/monero
./services/graphing-calculator.nix
./services/ssh.nix
./services/syncthing.nix
./services/ntfy
./services/mollysocket.nix
./services/harmonia.nix
./services/ddns-updater.nix
];
# Hosts entries for CI/CD deploy targets
networking.hosts."192.168.1.50" = [ "server-public" ];
networking.hosts."192.168.1.223" = [ "desktop" ];
# SSH known_hosts for CI runner (pinned host keys)
environment.etc."ci-known-hosts".text = ''
server-public ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFMjgaMnE+zS7tL+m5E7gh9Q9U1zurLdmU0qcmEmaucu
192.168.1.50 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFMjgaMnE+zS7tL+m5E7gh9Q9U1zurLdmU0qcmEmaucu
git.sigkill.computer ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFMjgaMnE+zS7tL+m5E7gh9Q9U1zurLdmU0qcmEmaucu
git.gardling.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFMjgaMnE+zS7tL+m5E7gh9Q9U1zurLdmU0qcmEmaucu
'';
services.kmscon.enable = true;
# Disable serial getty on ttyS0 to prevent dmesg warnings
systemd.services."serial-getty@ttyS0".enable = false;
# srvos enables vim, i don't want to use vim, disable it here:
programs.vim = {
defaultEditor = false;
}
// lib.optionalAttrs (options.programs.vim ? enable) {
enable = false;
};
# https://github.com/NixOS/nixpkgs/issues/101459#issuecomment-758306434
security.pam.loginLimits = [
{
domain = "*";
type = "soft";
item = "nofile";
value = "4096";
}
];
nix = {
# optimize the store
optimise.automatic = true;
# garbage collection
gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 7d";
};
};
# Intel Arc A380 (DG2, 56a5) uses the i915 driver on kernel 6.12.
# The xe driver's iHD media driver integration has buffer mapping
# failures on this GPU/kernel combination. i915 works correctly for
# VAAPI transcode as long as ASPM deep states are disabled for the
# GPU (see modules/power.nix).
hardware.intelgpu.driver = "i915";
# Per-service 2MB hugepage budget calculated in service-configs.nix.
boot.kernel.sysctl."vm.nr_hugepages" = service_configs.hugepages_2m.total_pages;
boot = {
# 6.12 LTS until 2027-03. Kernel 6.18 causes a reproducible ZFS deadlock
# in dbuf_evict due to page allocator changes (__free_frozen_pages).
# https://github.com/openzfs/zfs/issues/18426
kernelPackages = pkgs.linuxPackages_6_12;
loader = {
# Use the systemd-boot EFI boot loader.
# Disabled: ASRock B550M Pro4 AMI UEFI hangs on POST when NixOS
# writes EFI variables (NVRAM corruption). Lanzaboote boot entries
# are discovered via BLS Type #2 on the ESP, so this is not needed.
efi.canTouchEfiVariables = false;
# 1s timeout
timeout = 1;
};
initrd = {
compressor = "zstd";
supportedFilesystems = [ "f2fs" ];
};
};
environment.etc = {
"issue".text = "";
};
# Set your time zone.
time.timeZone = "America/New_York";
hardware.graphics = {
enable = true;
extraPackages = with pkgs; [
libva-vdpau-driver
intel-compute-runtime # OpenCL filter support (hardware tonemapping and subtitle burn-in)
vpl-gpu-rt # QSV on 11th gen or newer
];
};
#fwupd for updating firmware
services.fwupd = {
enable = true;
extraRemotes = [ "lvfs-testing" ];
};
environment.systemPackages = with pkgs; [
helix
lm_sensors
bottom
htop
doas-sudo-shim
neofetch
borgbackup
smartmontools
ripgrep
intel-gpu-tools
iotop
iftop
tmux
wget
powertop
lsof
reflac
pfetch-rs
sbctl
# add `skdump`
libatasmart
];
networking = {
nameservers = [
"1.1.1.1"
"9.9.9.9"
];
hostName = hostname;
hostId = "0f712d56";
firewall.enable = true;
useDHCP = false;
# Disabled because of Jellyfin (various issues)
enableIPv6 = false;
interfaces.${eth_interface} = {
ipv4.addresses = [
{
address = "192.168.1.50";
# address = "10.1.1.102";
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = "fe80::9e6b:ff:fe4d:abb";
prefixLength = 64;
}
];
};
defaultGateway = {
#address = "10.1.1.1";
address = "192.168.1.1";
interface = eth_interface;
};
# TODO! fix this
# defaultGateway6 = {
# address = "fe80::/64";
# interface = eth_interface;
# };
};
users.groups.${service_configs.media_group} = { };
users.users.gitea-runner = {
isSystemUser = true;
group = "gitea-runner";
home = "/var/lib/gitea-runner";
description = "Gitea Actions CI runner";
};
users.groups.gitea-runner = { };
users.users.${username} = {
isNormalUser = true;
extraGroups = [
"wheel"
"video"
"render"
service_configs.media_group
];
hashedPasswordFile = config.age.secrets.hashedPass.path;
};
# https://nixos.wiki/wiki/Fish#Setting_fish_as_your_shell
programs.fish.enable = true;
programs.bash = {
interactiveShellInit = ''
if [[ $(${pkgs.procps}/bin/ps --no-header --pid=$PPID --format=comm) != "fish" && -z ''${BASH_EXECUTION_STRING} ]]
then
shopt -q login_shell && LOGIN_OPTION='--login' || LOGIN_OPTION=""
exec ${pkgs.fish}/bin/fish $LOGIN_OPTION
fi
'';
};
security = {
#lets use doas and not sudo!
doas.enable = true;
sudo.enable = false;
# Configure doas
doas.extraRules = [
{
users = [ username ];
keepEnv = true;
persist = true;
}
];
};
services.murmur = {
enable = true;
openFirewall = true;
welcometext = "meow meow meow meow meow :3 xd";
password = "$MURMURD_PASSWORD";
environmentFile = config.age.secrets.murmur-password-env.path;
port = service_configs.ports.public.murmur.port;
};
# services.botamusique = {
# enable = true;
# settings = {
# server = {port = config.services.murmur.port;
# password = config.services.murmur.password;
# };
# };
# };
# systemd.tmpfiles.rules = [
# "Z /tank/music 775 ${username} users"
# ];
system.stateVersion = "24.11";
}

View File

@@ -0,0 +1,59 @@
{ inputs, ... }:
{
imports = [
inputs.disko.nixosModules.disko
];
disko.devices = {
disk = {
main = {
type = "disk";
content = {
type = "gpt";
partitions = {
ESP = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
persistent = {
size = "20G";
content = {
type = "filesystem";
format = "f2fs";
mountpoint = "/persistent";
};
};
nix = {
size = "100%";
content = {
type = "filesystem";
format = "f2fs";
mountpoint = "/nix";
};
};
};
};
};
};
nodev = {
"/" = {
fsType = "tmpfs";
mountOptions = [
"defaults"
"size=2G"
"mode=755"
];
};
};
};
fileSystems."/persistent".neededForBoot = true;
fileSystems."/nix".neededForBoot = true;
}

837
legacy/server-config/flake.lock generated Normal file
View File

@@ -0,0 +1,837 @@
{
"nodes": {
"agenix": {
"inputs": {
"darwin": [],
"home-manager": [
"home-manager"
],
"nixpkgs": [
"nixpkgs"
],
"systems": "systems"
},
"locked": {
"lastModified": 1770165109,
"narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=",
"owner": "ryantm",
"repo": "agenix",
"rev": "b027ee29d959fda4b60b57566d64c98a202e0feb",
"type": "github"
},
"original": {
"owner": "ryantm",
"repo": "agenix",
"type": "github"
}
},
"arr-init": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1776401121,
"narHash": "sha256-BELV1YMBuLL0aQNQ3SLvSLq8YN5h2o1jcrwz1+Zt32Q=",
"ref": "refs/heads/main",
"rev": "6dde2a3e0d087208b8084b61113707c5533c4c2d",
"revCount": 19,
"type": "git",
"url": "ssh://gitea@git.gardling.com/titaniumtown/arr-init"
},
"original": {
"type": "git",
"url": "ssh://gitea@git.gardling.com/titaniumtown/arr-init"
}
},
"crane": {
"locked": {
"lastModified": 1773189535,
"narHash": "sha256-E1G/Or6MWeP+L6mpQ0iTFLpzSzlpGrITfU2220Gq47g=",
"owner": "ipetkov",
"repo": "crane",
"rev": "6fa2fb4cf4a89ba49fc9dd5a3eb6cde99d388269",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"deploy-rs": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"utils": "utils"
},
"locked": {
"lastModified": 1770019181,
"narHash": "sha256-hwsYgDnby50JNVpTRYlF3UR/Rrpt01OrxVuryF40CFY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "77c906c0ba56aabdbc72041bf9111b565cdd6171",
"type": "github"
},
"original": {
"owner": "serokell",
"repo": "deploy-rs",
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1773889306,
"narHash": "sha256-PAqwnsBSI9SVC2QugvQ3xeYCB0otOwCacB1ueQj2tgw=",
"owner": "nix-community",
"repo": "disko",
"rev": "5ad85c82cc52264f4beddc934ba57f3789f28347",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"qbittorrent-metrics-exporter",
"naersk",
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1752475459,
"narHash": "sha256-z6QEu4ZFuHiqdOPbYss4/Q8B0BFhacR8ts6jO/F/aOU=",
"owner": "nix-community",
"repo": "fenix",
"rev": "bf0d6f70f4c9a9cf8845f992105652173f4b617f",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1730504689,
"narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "506278e768c2a08bec68eb62932193e341f55c90",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_6"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"pre-commit",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1775425411,
"narHash": "sha256-KY6HsebJHEe5nHOWP7ur09mb0drGxYSzE3rQxy62rJo=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "0d02ec1d0a05f88ef9e74b516842900c41f0f2fe",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-25.11",
"repo": "home-manager",
"type": "github"
}
},
"home-manager_2": {
"inputs": {
"nixpkgs": [
"impermanence",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768598210,
"narHash": "sha256-kkgA32s/f4jaa4UG+2f8C225Qvclxnqs76mf8zvTVPg=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "c47b2cc64a629f8e075de52e4742de688f930dc6",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"impermanence": {
"inputs": {
"home-manager": "home-manager_2",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1769548169,
"narHash": "sha256-03+JxvzmfwRu+5JafM0DLbxgHttOQZkUtDWBmeUkN8Y=",
"owner": "nix-community",
"repo": "impermanence",
"rev": "7b1d382faf603b6d264f58627330f9faa5cba149",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "impermanence",
"type": "github"
}
},
"lanzaboote": {
"inputs": {
"crane": "crane",
"nixpkgs": [
"nixpkgs"
],
"pre-commit": "pre-commit",
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1776248416,
"narHash": "sha256-TC6yzbCAex1pDfqUZv9u8fVm8e17ft5fNrcZ0JRDOIQ=",
"owner": "nix-community",
"repo": "lanzaboote",
"rev": "18e9e64bae15b828c092658335599122a6db939b",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "lanzaboote",
"type": "github"
}
},
"llamacpp": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1776301820,
"narHash": "sha256-Yr3JRZ05PNmX4sR2Ak7e0jT+oCQgTAAML7FUoyTmitk=",
"owner": "TheTom",
"repo": "llama-cpp-turboquant",
"rev": "1073622985bb68075472474b4b0fdfcdabcfc9d0",
"type": "github"
},
"original": {
"owner": "TheTom",
"ref": "feature/turboquant-kv-cache",
"repo": "llama-cpp-turboquant",
"type": "github"
}
},
"naersk": {
"inputs": {
"fenix": "fenix",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1763384566,
"narHash": "sha256-r+wgI+WvNaSdxQmqaM58lVNvJYJ16zoq+tKN20cLst4=",
"owner": "nix-community",
"repo": "naersk",
"rev": "d4155d6ebb70fbe2314959842f744aa7cabbbf6a",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "master",
"repo": "naersk",
"type": "github"
}
},
"nix-minecraft": {
"inputs": {
"flake-compat": "flake-compat_3",
"nixpkgs": [
"nixpkgs"
],
"systems": "systems_4"
},
"locked": {
"lastModified": 1776310483,
"narHash": "sha256-xMFl+umxGmo5VEgcZcXT5Dk9sXU5WyTRz1Olpywr/60=",
"owner": "Infinidoge",
"repo": "nix-minecraft",
"rev": "74abd91054e2655d6c392428a27e5d27edd5e6bf",
"type": "github"
},
"original": {
"owner": "Infinidoge",
"repo": "nix-minecraft",
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1775490113,
"narHash": "sha256-2ZBhDNZZwYkRmefK5XLOusCJHnoeKkoN95hoSGgMxWM=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "c775c2772ba56e906cbeb4e0b2db19079ef11ff7",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nixos-hardware",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1776221942,
"narHash": "sha256-FbQAeVNi7G4v3QCSThrSAAvzQTmrmyDLiHNPvTF2qFM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1766437c5509f444c1b15331e82b8b6a9b967000",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1730504152,
"narHash": "sha256-lXvH/vOfb4aGYyvFmZK/HlsNsr/0CVWlwYvo2rxJk3s=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/cc2f28000298e1269cea6612cd06ec9979dd5d7f.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/cc2f28000298e1269cea6612cd06ec9979dd5d7f.tar.gz"
}
},
"nixpkgs-p2pool-module": {
"flake": false,
"locked": {
"lastModified": 1773298780,
"narHash": "sha256-7awJKfaH2uTuuW6gyA/lmPPfSruObm7bIkiYADxZBro=",
"owner": "JacoMalan1",
"repo": "nixpkgs",
"rev": "501e6bb1697590473c87c2ff9d2a92043a8d0e06",
"type": "github"
},
"original": {
"owner": "JacoMalan1",
"ref": "create-p2pool-service",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1752077645,
"narHash": "sha256-HM791ZQtXV93xtCY+ZxG1REzhQenSQO020cu6rHtAPk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "be9e214982e20b8310878ac2baa063a961c1bdf6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1764517877,
"narHash": "sha256-pp3uT4hHijIC8JUK5MEqeAWmParJrgBVzHLNfJDZxg4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2d293cbfa5a793b4c50d17c05ef9e385b90edf6c",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit": {
"inputs": {
"flake-compat": "flake-compat_2",
"gitignore": "gitignore",
"nixpkgs": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1772893680,
"narHash": "sha256-JDqZMgxUTCq85ObSaFw0HhE+lvdOre1lx9iI6vYyOEs=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "8baab586afc9c9b57645a734c820e4ac0a604af9",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"qbittorrent-metrics-exporter": {
"inputs": {
"naersk": "naersk",
"nixpkgs": [
"nixpkgs"
],
"systems": "systems_5"
},
"locked": {
"lastModified": 1771989937,
"narHash": "sha256-bPUV4gVvSbF4VMkbLKYrfwVwzTeS+Sr41wucDj1///g=",
"ref": "refs/heads/main",
"rev": "cb94f866b7a2738532b1cae31d0b9f89adecbd54",
"revCount": 112,
"type": "git",
"url": "https://codeberg.org/anriha/qbittorrent-metrics-exporter"
},
"original": {
"type": "git",
"url": "https://codeberg.org/anriha/qbittorrent-metrics-exporter"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"arr-init": "arr-init",
"deploy-rs": "deploy-rs",
"disko": "disko",
"home-manager": "home-manager",
"impermanence": "impermanence",
"lanzaboote": "lanzaboote",
"llamacpp": "llamacpp",
"nix-minecraft": "nix-minecraft",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs",
"nixpkgs-p2pool-module": "nixpkgs-p2pool-module",
"qbittorrent-metrics-exporter": "qbittorrent-metrics-exporter",
"senior_project-website": "senior_project-website",
"srvos": "srvos",
"trackerlist": "trackerlist",
"vpn-confinement": "vpn-confinement",
"website": "website",
"ytbn-graphing-software": "ytbn-graphing-software"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1752428706,
"narHash": "sha256-EJcdxw3aXfP8Ex1Nm3s0awyH9egQvB2Gu+QEnJn2Sfg=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "591e3b7624be97e4443ea7b5542c191311aa141d",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"lanzaboote",
"nixpkgs"
]
},
"locked": {
"lastModified": 1773544328,
"narHash": "sha256-Iv+qez54LAz+isij4APBk31VWA//Go81hwFOXr5iWTw=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "4f977d776793c8bfbfdd7eca7835847ccc48874e",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"rust-overlay_2": {
"inputs": {
"nixpkgs": [
"ytbn-graphing-software",
"nixpkgs"
]
},
"locked": {
"lastModified": 1764729618,
"narHash": "sha256-z4RA80HCWv2los1KD346c+PwNPzMl79qgl7bCVgz8X0=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "52764074a85145d5001bf0aa30cb71936e9ad5b8",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"senior_project-website": {
"flake": false,
"locked": {
"lastModified": 1775019649,
"narHash": "sha256-zVQy5ydiWKnIixf79pmd2LJTPkwyiv4V5piKZETDdwI=",
"owner": "Titaniumtown",
"repo": "senior-project-website",
"rev": "bfd504c77c90524b167158652e1d87a260680120",
"type": "github"
},
"original": {
"owner": "Titaniumtown",
"repo": "senior-project-website",
"type": "github"
}
},
"srvos": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1776306894,
"narHash": "sha256-l4N3O1cfXiQCHJGspAkg6WlZyOFBTbLXhi8Anf8jB0g=",
"owner": "nix-community",
"repo": "srvos",
"rev": "01d98209264c78cb323b636d7ab3fe8e7a8b60c7",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "srvos",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_4": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_5": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_6": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"trackerlist": {
"flake": false,
"locked": {
"lastModified": 1776290985,
"narHash": "sha256-eNWDOLBA0vk1TiKqse71siIAgLycjvBFDw35eAtnUPs=",
"owner": "ngosang",
"repo": "trackerslist",
"rev": "9bb380b3c2a641a3289f92dedef97016f2e47f36",
"type": "github"
},
"original": {
"owner": "ngosang",
"repo": "trackerslist",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"vpn-confinement": {
"locked": {
"lastModified": 1767604552,
"narHash": "sha256-FddhMxnc99KYOZ/S3YNqtDSoxisIhVtJ7L4s8XD2u0A=",
"owner": "Maroka-chan",
"repo": "VPN-Confinement",
"rev": "a6b2da727853886876fd1081d6bb2880752937f3",
"type": "github"
},
"original": {
"owner": "Maroka-chan",
"repo": "VPN-Confinement",
"type": "github"
}
},
"website": {
"flake": false,
"locked": {
"lastModified": 1773169503,
"narHash": "sha256-P+T2H18k3zmEHxu7ZIDYyTrK5G3KUcZYW1AzVMKyCMs=",
"ref": "refs/heads/main",
"rev": "ae7a7d8325f841c52efb6fd81c4956b84631aa06",
"revCount": 24,
"type": "git",
"url": "https://git.sigkill.computer/titaniumtown/website"
},
"original": {
"type": "git",
"url": "https://git.sigkill.computer/titaniumtown/website"
}
},
"ytbn-graphing-software": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3",
"rust-overlay": "rust-overlay_2"
},
"locked": {
"lastModified": 1765615270,
"narHash": "sha256-12C6LccKRe5ys0iRd+ob+BliswUSmqOKWhMTI8fNpr0=",
"ref": "refs/heads/main",
"rev": "ac6265eae734363f95909df9a3739bf6360fa721",
"revCount": 1130,
"type": "git",
"url": "https://git.sigkill.computer/titaniumtown/YTBN-Graphing-Software"
},
"original": {
"type": "git",
"url": "https://git.sigkill.computer/titaniumtown/YTBN-Graphing-Software"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,281 @@
{
description = "Flake for server muffin";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
lanzaboote = {
url = "github:nix-community/lanzaboote";
inputs.nixpkgs.follows = "nixpkgs";
};
nixos-hardware.url = "github:NixOS/nixos-hardware/master";
nix-minecraft = {
url = "github:Infinidoge/nix-minecraft";
inputs.nixpkgs.follows = "nixpkgs";
};
vpn-confinement.url = "github:Maroka-chan/VPN-Confinement";
home-manager = {
url = "github:nix-community/home-manager/release-25.11";
inputs.nixpkgs.follows = "nixpkgs";
};
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
llamacpp = {
url = "github:TheTom/llama-cpp-turboquant/feature/turboquant-kv-cache";
inputs.nixpkgs.follows = "nixpkgs";
};
srvos = {
url = "github:nix-community/srvos";
inputs.nixpkgs.follows = "nixpkgs";
};
deploy-rs = {
url = "github:serokell/deploy-rs";
inputs.nixpkgs.follows = "nixpkgs";
};
impermanence = {
url = "github:nix-community/impermanence";
inputs.nixpkgs.follows = "nixpkgs";
};
agenix = {
url = "github:ryantm/agenix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.home-manager.follows = "home-manager";
inputs.darwin.follows = "";
};
senior_project-website = {
url = "github:Titaniumtown/senior-project-website";
flake = false;
};
website = {
url = "git+https://git.sigkill.computer/titaniumtown/website";
flake = false;
};
trackerlist = {
url = "github:ngosang/trackerslist";
flake = false;
};
ytbn-graphing-software = {
url = "git+https://git.sigkill.computer/titaniumtown/YTBN-Graphing-Software";
};
arr-init = {
url = "git+ssh://gitea@git.gardling.com/titaniumtown/arr-init";
inputs.nixpkgs.follows = "nixpkgs";
};
nixpkgs-p2pool-module = {
url = "github:JacoMalan1/nixpkgs/create-p2pool-service";
flake = false;
};
qbittorrent-metrics-exporter = {
url = "git+https://codeberg.org/anriha/qbittorrent-metrics-exporter";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
{
self,
nixpkgs,
nix-minecraft,
nixos-hardware,
vpn-confinement,
home-manager,
lanzaboote,
disko,
srvos,
deploy-rs,
impermanence,
arr-init,
nixpkgs-p2pool-module,
...
}@inputs:
let
username = "primary";
hostname = "muffin";
eth_interface = "enp4s0";
system = "x86_64-linux";
service_configs = import ./service-configs.nix;
# Bootstrap pkgs used only to apply patches to nixpkgs source.
bootstrapPkgs = import nixpkgs { inherit system; };
# Patch nixpkgs to add PostgreSQL backend support for firefox-syncserver.
patchedNixpkgsSrc = bootstrapPkgs.applyPatches {
name = "nixpkgs-patched";
src = nixpkgs;
patches = [
./patches/nixpkgs/0001-firefox-syncserver-add-postgresql-backend-support.patch
];
};
pkgs = import patchedNixpkgsSrc {
inherit system;
targetPlatform = system;
buildPlatform = builtins.currentSystem;
};
lib = import ./modules/lib.nix { inherit inputs pkgs service_configs; };
testSuite = import ./tests/tests.nix {
inherit pkgs lib inputs;
config = self.nixosConfigurations.muffin.config;
};
in
{
formatter.x86_64-linux = nixpkgs.legacyPackages.x86_64-linux.nixfmt-tree;
nixosConfigurations.${hostname} = lib.nixosSystem {
inherit system;
specialArgs = {
inherit
username
hostname
eth_interface
service_configs
inputs
;
};
modules = [
# SAFETY! port sanity checks
(
{ config, lib, ... }:
let
publicPorts = lib.attrValues service_configs.ports.public;
privatePorts = lib.attrValues service_configs.ports.private;
allPortNumbers = map (p: p.port) (publicPorts ++ privatePorts);
uniquePortNumbers = lib.unique allPortNumbers;
# Which public ports must be in each firewall list
publicTcp = map (p: p.port) (lib.filter (p: p.proto == "tcp" || p.proto == "both") publicPorts);
publicUdp = map (p: p.port) (lib.filter (p: p.proto == "udp" || p.proto == "both") publicPorts);
privatePortNumbers = map (p: p.port) privatePorts;
fwTcp = config.networking.firewall.allowedTCPPorts;
fwUdp = config.networking.firewall.allowedUDPPorts;
missingTcp = lib.filter (p: !(builtins.elem p fwTcp)) publicTcp;
missingUdp = lib.filter (p: !(builtins.elem p fwUdp)) publicUdp;
leakedTcp = lib.filter (p: builtins.elem p fwTcp) privatePortNumbers;
leakedUdp = lib.filter (p: builtins.elem p fwUdp) privatePortNumbers;
in
{
config.assertions = [
{
assertion = (lib.length allPortNumbers) == (lib.length uniquePortNumbers);
message = "Duplicate port numbers detected in ports.public / ports.private";
}
{
assertion = missingTcp == [ ];
message = "Public ports missing from allowedTCPPorts: ${builtins.toString missingTcp}";
}
{
assertion = missingUdp == [ ];
message = "Public ports missing from allowedUDPPorts: ${builtins.toString missingUdp}";
}
{
assertion = leakedTcp == [ ] && leakedUdp == [ ];
message = "Private ports leaked into firewall allow-lists TCP: ${builtins.toString leakedTcp}, UDP: ${builtins.toString leakedUdp}";
}
];
}
)
# sets up things like the watchdog
srvos.nixosModules.server
# diff terminal support
srvos.nixosModules.mixins-terminfo
./disk-config.nix
./configuration.nix
# Replace upstream firefox-syncserver module + package with patched
# versions that add PostgreSQL backend support.
{
disabledModules = [ "services/networking/firefox-syncserver.nix" ];
imports = [
"${patchedNixpkgsSrc}/nixos/modules/services/networking/firefox-syncserver.nix"
];
nixpkgs.overlays = [
nix-minecraft.overlay
(import ./modules/overlays.nix)
(_final: prev: {
syncstorage-rs =
prev.callPackage "${patchedNixpkgsSrc}/pkgs/by-name/sy/syncstorage-rs/package.nix"
{ };
})
];
nixpkgs.config.allowUnfreePredicate =
pkg:
builtins.elem (nixpkgs.lib.getName pkg) [
"minecraft-server"
];
}
lanzaboote.nixosModules.lanzaboote
arr-init.nixosModules.default
(import "${nixpkgs-p2pool-module}/nixos/modules/services/networking/p2pool.nix")
home-manager.nixosModules.home-manager
(
{
home-manager,
...
}:
{
home-manager.users.${username} = import ./modules/home.nix;
}
)
]
++ (with nixos-hardware.nixosModules; [
common-cpu-amd-pstate
common-cpu-amd-zenpower
common-pc-ssd
common-gpu-intel
]);
};
deploy.nodes.muffin = {
hostname = "server-public";
profiles.system = {
sshUser = "root";
user = "root";
path = deploy-rs.lib.${system}.activate.nixos self.nixosConfigurations.muffin;
};
};
checks.${system} = testSuite;
packages.${system} = {
tests = pkgs.linkFarm "all-tests" (
pkgs.lib.mapAttrsToList (name: test: {
name = name;
path = test;
}) testSuite
);
}
// (pkgs.lib.mapAttrs' (name: test: {
name = "test-${name}";
value = test;
}) testSuite);
};
}

View File

@@ -0,0 +1,203 @@
{
config,
lib,
pkgs,
inputs,
...
}:
{
imports = [
inputs.agenix.nixosModules.default
];
# Configure all agenix secrets
age.secrets = {
# ZFS encryption key
# path is set to /etc/zfs-key to match the ZFS dataset keylocation property
zfs-key = {
file = ../secrets/zfs-key.age;
mode = "0400";
owner = "root";
group = "root";
path = "/etc/zfs-key";
};
# Secureboot keys archive
secureboot-tar = {
file = ../secrets/secureboot.tar.age;
mode = "0400";
owner = "root";
group = "root";
};
# System passwords
hashedPass = {
file = ../secrets/hashedPass.age;
mode = "0400";
owner = "root";
group = "root";
};
# Service authentication
caddy_auth = {
file = ../secrets/caddy_auth.age;
mode = "0400";
owner = "caddy";
group = "caddy";
};
# Njalla API token (NJALLA_API_TOKEN=...) for Caddy DNS-01 challenge
njalla-api-token-env = {
file = ../secrets/njalla-api-token-env.age;
mode = "0400";
owner = "caddy";
group = "caddy";
};
# ddns-updater config.json with Njalla provider credentials
ddns-updater-config = {
file = ../secrets/ddns-updater-config.age;
mode = "0400";
owner = "ddns-updater";
group = "ddns-updater";
};
jellyfin-api-key = {
file = ../secrets/jellyfin-api-key.age;
mode = "0400";
owner = "root";
group = "root";
};
slskd_env = {
file = ../secrets/slskd_env.age;
mode = "0500";
owner = config.services.slskd.user;
group = config.services.slskd.group;
};
# Network configuration
wg0-conf = {
file = ../secrets/wg0.conf.age;
mode = "0400";
owner = "root";
group = "root";
};
# ntfy-alerts secrets (group-readable for CI runner notifications)
ntfy-alerts-topic = {
file = ../secrets/ntfy-alerts-topic.age;
mode = "0440";
owner = "root";
group = "gitea-runner";
};
ntfy-alerts-token = {
file = ../secrets/ntfy-alerts-token.age;
mode = "0440";
owner = "root";
group = "gitea-runner";
};
# Firefox Sync server secrets (SYNC_MASTER_SECRET)
firefox-syncserver-env = {
file = ../secrets/firefox-syncserver-env.age;
mode = "0400";
};
# MollySocket env (MOLLY_VAPID_PRIVKEY + MOLLY_ALLOWED_UUIDS)
mollysocket-env = {
file = ../secrets/mollysocket-env.age;
mode = "0400";
};
# Murmur (Mumble) server password
murmur-password-env = {
file = ../secrets/murmur-password-env.age;
mode = "0400";
owner = "murmur";
group = "murmur";
};
# Coturn static auth secret
coturn-auth-secret = {
file = ../secrets/coturn-auth-secret.age;
mode = "0400";
owner = "turnserver";
group = "turnserver";
};
# Matrix (continuwuity) registration token
matrix-reg-token = {
file = ../secrets/matrix-reg-token.age;
mode = "0400";
owner = "continuwuity";
group = "continuwuity";
};
# Matrix (continuwuity) TURN secret — same secret as coturn-auth-secret,
# decrypted separately so continuwuity can read it with its own ownership
matrix-turn-secret = {
file = ../secrets/coturn-auth-secret.age;
mode = "0400";
owner = "continuwuity";
group = "continuwuity";
};
# CI deploy SSH key
ci-deploy-key = {
file = ../secrets/ci-deploy-key.age;
mode = "0400";
owner = "gitea-runner";
group = "gitea-runner";
};
# Git-crypt symmetric key for dotfiles repo
git-crypt-key-dotfiles = {
file = ../secrets/git-crypt-key-dotfiles.age;
mode = "0400";
owner = "gitea-runner";
group = "gitea-runner";
};
# Git-crypt symmetric key for server-config repo
git-crypt-key-server-config = {
file = ../secrets/git-crypt-key-server-config.age;
mode = "0400";
owner = "gitea-runner";
group = "gitea-runner";
};
# Gitea Actions runner registration token
gitea-runner-token = {
file = ../secrets/gitea-runner-token.age;
mode = "0400";
owner = "gitea-runner";
group = "gitea-runner";
};
# llama-cpp API key for bearer token auth
llama-cpp-api-key = {
file = ../secrets/llama-cpp-api-key.age;
mode = "0400";
owner = "root";
group = "root";
};
# Harmonia binary cache signing key
harmonia-sign-key = {
file = ../secrets/harmonia-sign-key.age;
mode = "0400";
owner = "harmonia";
group = "harmonia";
};
# Caddy basic auth for nix binary cache (separate from main caddy_auth)
nix-cache-auth = {
file = ../secrets/nix-cache-auth.age;
mode = "0400";
owner = "caddy";
group = "caddy";
};
};
}

View File

@@ -0,0 +1,62 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
let
hddTuneIosched = pkgs.writeShellScript "hdd-tune-iosched" ''
# Called by udev with the partition kernel name (e.g. sdb1).
# Derives the parent disk and applies mq-deadline iosched params.
parent=''${1%%[0-9]*}
dev="/sys/block/$parent"
[ -d "$dev/queue/iosched" ] || exit 0
echo 500 > "$dev/queue/iosched/read_expire"
echo 15000 > "$dev/queue/iosched/write_expire"
echo 128 > "$dev/queue/iosched/fifo_batch"
echo 16 > "$dev/queue/iosched/writes_starved"
echo 4096 > "$dev/queue/max_sectors_kb" 2>/dev/null || true
'';
in
{
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
swapDevices = [ ];
hardware.cpu.amd.updateMicrocode = true;
hardware.enableRedistributableFirmware = true;
# HDD I/O tuning for torrent seeding workload (high-concurrency random reads)
# sharing the pool with latency-sensitive sequential reads (Jellyfin playback).
#
# mq-deadline sorts requests into elevator sweeps, reducing seek distance.
# read_expire=500ms keeps reads bounded so a Jellyfin segment can't queue for
# seconds behind a torrent burst; write_expire=15s lets the scheduler batch
# writes for coalescence (torrent writes are async and tolerate delay).
# The bulk of read coalescence already happens above the scheduler via ZFS
# aggregation (zfs_vdev_aggregation_limit=4M, read_gap_limit=128K,
# async_read_max=32), so the scheduler deadline only needs to be large enough
# to keep the elevator sweep coherent -- 500ms is plenty on rotational disks.
# fifo_batch=128 keeps sweeps long; writes_starved=16 heavily favors reads.
# 4 MiB readahead matches libtorrent piece extent affinity for sequential prefetch.
#
# The NixOS ZFS module hardcodes a udev rule that forces scheduler="none" on all
# ZFS member partitions' parent disks (on both add AND change events). We counter
# it with lib.mkAfter so our rule appears after theirs in 99-local.rules — our
# rule matches the same partition events and sets mq-deadline back, then a RUN
# script applies the iosched params. Only targets rotational, non-removable disks
# (i.e. HDDs, not SSDs or USB).
services.udev.extraRules = lib.mkAfter ''
ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/rotational}=="1", ATTR{../removable}=="0", ATTR{../queue/scheduler}="mq-deadline", ATTR{../queue/read_ahead_kb}="4096", ATTR{../queue/nr_requests}="512", RUN+="${hddTuneIosched} %k"
'';
}

View File

@@ -0,0 +1,31 @@
{
pkgs,
lib,
...
}:
{
home.stateVersion = "24.11";
programs.fish = {
enable = true;
interactiveShellInit = ''
# disable greeting
set fish_greeting
# pfetch on shell start (disable pkgs because of execution time)
PF_INFO="ascii title os host kernel uptime memory editor wm" ${lib.getExe pkgs.pfetch-rs}
'';
shellAliases =
let
eza = "${lib.getExe pkgs.eza} --color=always --group-directories-first";
in
{
# from DistroTube's dot files: Changing "ls" to "eza"
ls = "${eza} -al";
la = "${eza} -a";
ll = "${eza} -l";
lt = "${eza} -aT";
};
};
}

View File

@@ -0,0 +1,71 @@
{
config,
lib,
pkgs,
username,
service_configs,
inputs,
...
}:
{
imports = [
inputs.impermanence.nixosModules.impermanence
];
environment.persistence."/persistent" = {
hideMounts = true;
directories = [
"/var/log"
"/var/lib/systemd/coredump"
"/var/lib/nixos"
"/var/lib/systemd/timers"
# ZFS cache directory - persisting the directory instead of the file
# avoids "device busy" errors when ZFS atomically updates the cache
"/etc/zfs"
"/var/lib/gitea-runner"
];
files = [
# Machine ID
"/etc/machine-id"
];
users.${username} = {
files = [
".local/share/fish/fish_history"
];
};
users.root = {
files = [
".local/share/fish/fish_history"
];
};
};
# Store SSH host keys directly in /persistent to survive tmpfs root wipes.
# This is more reliable than bind mounts for service-generated files.
services.openssh.hostKeys = [
{
path = "/persistent/etc/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
{
path = "/persistent/etc/ssh/ssh_host_rsa_key";
type = "rsa";
bits = 4096;
}
];
# Enforce root ownership on /persistent/etc. The impermanence activation
# script copies ownership from /persistent/etc to /etc via
# `chown --reference`. If /persistent/etc ever gets non-root ownership,
# sshd StrictModes rejects /etc/ssh/authorized_keys.d/root and root SSH
# breaks while non-root users still work.
# Use "z" (set ownership, non-recursive) not "d" (create only, no-op on existing).
systemd.tmpfiles.rules = [
"z /persistent/etc 0755 root root"
];
}

View File

@@ -0,0 +1,287 @@
{
inputs,
pkgs,
service_configs,
...
}:
inputs.nixpkgs.lib.extend (
final: prev:
let
lib = prev;
in
{
optimizeWithFlags =
pkg: flags:
pkg.overrideAttrs (old: {
env = (old.env or { }) // {
NIX_CFLAGS_COMPILE =
(old.env.NIX_CFLAGS_COMPILE or old.NIX_CFLAGS_COMPILE or "")
+ " "
+ (lib.concatStringsSep " " flags);
};
});
optimizePackage =
pkg:
final.optimizeWithFlags pkg [
"-O3"
"-march=${service_configs.cpu_arch}"
"-mtune=${service_configs.cpu_arch}"
];
vpnNamespaceOpenPort =
port: service:
{ ... }:
{
vpnNamespaces.wg = {
portMappings = [
{
from = port;
to = port;
}
];
openVPNPorts = [
{
port = port;
protocol = "both";
}
];
};
systemd.services.${service}.vpnConfinement = {
enable = true;
vpnNamespace = "wg";
};
};
serviceMountWithZpool =
serviceName: zpool: dirs:
{ pkgs, config, ... }:
{
systemd.services."${serviceName}-mounts" = {
wants = [
"zfs.target"
"zfs-mount.service"
]
++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
after = [ "zfs-mount.service" ] ++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
before = [ "${serviceName}.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = [
(lib.getExe (
pkgs.writeShellApplication {
name = "ensure-zfs-mounts-with-pool-${serviceName}-${zpool}";
runtimeInputs = with pkgs; [
gawk
coreutils
config.boot.zfs.package
];
text = ''
set -euo pipefail
echo "Ensuring ZFS mounts for service: ${serviceName} (pool: ${zpool})"
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
# Validate mounts exist (ensureZfsMounts already has proper PATH)
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
# Additional runtime check: verify paths are on correct zpool
${lib.optionalString (zpool != "") ''
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
exit 1
fi
# shellcheck disable=SC2043
for target in ${lib.strings.concatStringsSep " " dirs}; do
echo "Checking: $target"
# Find dataset that has this mountpoint
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
if [ -z "$dataset" ]; then
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
exit 1
fi
# Extract pool name from dataset (first part before /)
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
if [ "$actual_pool" != "${zpool}" ]; then
echo "ERROR: ZFS pool mismatch for $target" >&2
echo " Expected pool: ${zpool}" >&2
echo " Actual pool: $actual_pool" >&2
echo " Dataset: $dataset" >&2
exit 1
fi
echo "$target is on $dataset (pool: $actual_pool)"
done
echo "All paths verified successfully on pool '${zpool}'"
''}
echo "Mount validation completed for ${serviceName} (pool: ${zpool})"
'';
}
))
];
};
};
systemd.services.${serviceName} = {
wants = [
"${serviceName}-mounts.service"
];
after = [
"${serviceName}-mounts.service"
];
requires = [
"${serviceName}-mounts.service"
];
};
# assert that the pool is even enabled
#assertions = lib.optionals (zpool != "") [
# {
# assertion = builtins.elem zpool config.boot.zfs.extraPools;
# message = "${zpool} is not enabled in `boot.zfs.extraPools`";
# }
#];
};
serviceFilePerms =
serviceName: tmpfilesRules:
{ pkgs, ... }:
let
confFile = pkgs.writeText "${serviceName}-file-perms.conf" (
lib.concatStringsSep "\n" tmpfilesRules
);
in
{
systemd.services."${serviceName}-file-perms" = {
after = [ "${serviceName}-mounts.service" ];
before = [ "${serviceName}.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.systemd}/bin/systemd-tmpfiles --create ${confFile}";
};
};
systemd.services.${serviceName} = {
wants = [ "${serviceName}-file-perms.service" ];
after = [ "${serviceName}-file-perms.service" ];
};
};
# Creates a Caddy virtualHost with reverse_proxy to a local or VPN-namespaced port.
# Use `subdomain` for "<name>.${domain}" or `domain` for a full custom domain.
# Exactly one of `subdomain` or `domain` must be provided.
mkCaddyReverseProxy =
{
subdomain ? null,
domain ? null,
port,
auth ? false,
vpn ? false,
}:
assert (subdomain != null) != (domain != null);
{ config, ... }:
let
vhostDomain = if domain != null then domain else "${subdomain}.${service_configs.https.domain}";
upstream =
if vpn then
"${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString port}"
else
":${builtins.toString port}";
in
{
services.caddy.virtualHosts."${vhostDomain}".extraConfig = lib.concatStringsSep "\n" (
lib.optional auth "import ${config.age.secrets.caddy_auth.path}" ++ [ "reverse_proxy ${upstream}" ]
);
};
# Creates a fail2ban jail with systemd journal backend.
# Covers the common pattern: journal-based detection, http/https ports, default thresholds.
mkFail2banJail =
{
name,
unitName ? "${name}.service",
failregex,
}:
{ ... }:
{
services.fail2ban.jails.${name} = {
enabled = true;
settings = {
backend = "systemd";
port = "http,https";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
inherit failregex;
ignoreregex = "";
journalmatch = "_SYSTEMD_UNIT=${unitName}";
};
};
};
# Creates a hardened Grafana annotation daemon service.
# Provides DynamicUser, sandboxing, state directory, and GRAFANA_URL/STATE_FILE automatically.
mkGrafanaAnnotationService =
{
name,
description,
script,
after ? [ ],
environment ? { },
loadCredential ? null,
}:
{
systemd.services."${name}-annotations" = {
inherit description;
after = [
"network.target"
"grafana.service"
]
++ after;
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${script}";
Restart = "always";
RestartSec = "10s";
DynamicUser = true;
StateDirectory = "${name}-annotations";
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
MemoryDenyWriteExecute = true;
}
// lib.optionalAttrs (loadCredential != null) {
LoadCredential = loadCredential;
};
environment = {
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
STATE_FILE = "/var/lib/${name}-annotations/state.json";
}
// environment;
};
};
# Shell command to extract an API key from an *arr config.xml file.
# Returns a string suitable for $() command substitution in shell scripts.
extractArrApiKey =
configXmlPath: "${lib.getExe pkgs.gnugrep} -oP '(?<=<ApiKey>)[^<]+' ${configXmlPath}";
}
)

View File

@@ -0,0 +1,66 @@
{
config,
lib,
pkgs,
...
}:
{
systemd.services.no-rgb =
let
no-rgb = (
pkgs.writeShellApplication {
name = "no-rgb";
runtimeInputs = with pkgs; [
openrgb
coreutils
gnugrep
];
text = ''
# Retry loop to wait for hardware to be ready
NUM_DEVICES=0
for attempt in 1 2 3 4 5; do
DEVICE_LIST=$(openrgb --noautoconnect --list-devices 2>/dev/null) || DEVICE_LIST=""
NUM_DEVICES=$(echo "$DEVICE_LIST" | grep -cE '^[0-9]+: ') || NUM_DEVICES=0
if [ "$NUM_DEVICES" -gt 0 ]; then
break
fi
if [ "$attempt" -lt 5 ]; then
sleep 2
fi
done
# If no devices found after retries, exit gracefully
if [ "$NUM_DEVICES" -eq 0 ]; then
exit 0
fi
# Disable RGB on each device
for i in $(seq 0 $((NUM_DEVICES - 1))); do
openrgb --noautoconnect --device "$i" --mode direct --color 000000 || true
done
'';
}
);
in
{
description = "disable rgb";
after = [ "systemd-udev-settle.service" ];
serviceConfig = {
ExecStart = lib.getExe no-rgb;
Type = "oneshot";
Restart = "on-failure";
RestartSec = 5;
};
wantedBy = [ "multi-user.target" ];
};
services.hardware.openrgb = {
enable = true;
package = pkgs.openrgb-with-all-plugins;
motherboard = "amd";
};
services.udev.packages = [ pkgs.openrgb-with-all-plugins ];
hardware.i2c.enable = true;
}

View File

@@ -0,0 +1,132 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.ntfyAlerts;
curl = "${pkgs.curl}/bin/curl";
hostname = config.networking.hostName;
# Build the curl auth args as a proper bash array fragment
authCurlArgs =
if cfg.tokenFile != null then
''
if [ -f "${cfg.tokenFile}" ]; then
TOKEN=$(cat "${cfg.tokenFile}" 2>/dev/null || echo "")
if [ -n "$TOKEN" ]; then
AUTH_ARGS=(-H "Authorization: Bearer $TOKEN")
fi
fi
''
else
"";
# Systemd failure alert script
systemdAlertScript = pkgs.writeShellScript "ntfy-systemd-alert" ''
set -euo pipefail
UNIT_NAME="$1"
SERVER_URL="${cfg.serverUrl}"
TOPIC=$(cat "${cfg.topicFile}" 2>/dev/null | tr -d '[:space:]')
if [ -z "$TOPIC" ]; then
echo "ERROR: Could not read topic from ${cfg.topicFile}"
exit 1
fi
# Get journal output for context
JOURNAL_OUTPUT=$(${pkgs.systemd}/bin/journalctl -u "$UNIT_NAME" -n 15 --no-pager 2>/dev/null || echo "No journal output available")
# Build auth args
AUTH_ARGS=()
${authCurlArgs}
# Send notification
${curl} -sf --max-time 15 -X POST \
"$SERVER_URL/$TOPIC" \
-H "Title: [${hostname}] Service failed: $UNIT_NAME" \
-H "Priority: high" \
-H "Tags: warning" \
"''${AUTH_ARGS[@]}" \
-d "$JOURNAL_OUTPUT" || true
'';
in
{
options.services.ntfyAlerts = {
enable = lib.mkEnableOption "ntfy push notifications for system alerts";
serverUrl = lib.mkOption {
type = lib.types.str;
description = "The ntfy server URL (e.g. https://ntfy.example.com)";
example = "https://ntfy.example.com";
};
topicFile = lib.mkOption {
type = lib.types.path;
description = "Path to a file containing the ntfy topic name to publish alerts to.";
example = "/run/agenix/ntfy-alerts-topic";
};
tokenFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
Path to a file containing the ntfy auth token.
If set, uses Authorization: Bearer header for authentication.
'';
example = "/run/secrets/ntfy-token";
};
};
config = lib.mkIf cfg.enable {
# Per-service OnFailure for monitored services
systemd.services = {
"ntfy-alert@" = {
description = "Send ntfy notification for failed service %i";
unitConfig.OnFailure = lib.mkForce "";
serviceConfig = {
Type = "oneshot";
ExecStart = "${systemdAlertScript} %i";
TimeoutSec = 30;
};
};
# TODO: sanoid's ExecStartPre runs `zfs allow` which blocks on TXG sync;
# on the hdds pool (slow spinning disks + large async frees) this causes
# 30+ minute hangs and guaranteed timeouts. Suppress until we fix sanoid
# to run as root without `zfs allow`. See: nixpkgs#72060, openzfs/zfs#14180
"sanoid".unitConfig.OnFailure = lib.mkForce "";
};
# Global OnFailure drop-in for all services
systemd.packages = [
(pkgs.writeTextDir "etc/systemd/system/service.d/onfailure.conf" ''
[Unit]
OnFailure=ntfy-alert@%p.service
'')
# Sanoid-specific drop-in to override the global OnFailure (see TODO above)
(pkgs.writeTextDir "etc/systemd/system/sanoid.service.d/onfailure.conf" ''
[Unit]
OnFailure=
'')
];
# ZED (ZFS Event Daemon) ntfy notification settings
services.zfs.zed = {
enableMail = false;
settings = {
ZED_NTFY_URL = cfg.serverUrl;
ZED_NTFY_TOPIC = "$(cat ${cfg.topicFile} | tr -d '[:space:]')";
ZED_NTFY_ACCESS_TOKEN = lib.mkIf (cfg.tokenFile != null) "$(cat ${cfg.tokenFile})";
ZED_NOTIFY_VERBOSE = true;
};
};
};
}

View File

@@ -0,0 +1,78 @@
final: prev: {
ensureZfsMounts = prev.writeShellApplication {
name = "zfsEnsureMounted";
runtimeInputs = with prev; [
zfs
gawk
coreutils
];
text = ''
#!/bin/sh
if [[ "$#" -eq "0" ]]; then
echo "no arguments passed"
exit 1
fi
MOUNTED=$(zfs list -o mountpoint,mounted -H | awk '$NF == "yes" {NF--; print}')
MISSING=""
for target in "$@"; do
if ! grep -Fxq "$target" <<< "$MOUNTED"; then
MISSING="$MISSING $target"
fi
done
if [[ -n "$MISSING" ]]; then
echo "FAILURE, missing:$MISSING" 1>&2
exit 1
fi
'';
};
reflac = prev.writeShellApplication {
name = "reflac";
runtimeInputs = with prev; [ flac ];
excludeShellChecks = [ "2086" ];
text = builtins.readFile (
prev.fetchurl {
url = "https://raw.githubusercontent.com/chungy/reflac/refs/heads/master/reflac";
sha256 = "61c6cc8be3d276c6714e68b55e5de0e6491f50bbf195233073dbce14a1e278a7";
}
);
};
jellyfin-exporter = prev.buildGoModule rec {
pname = "jellyfin-exporter";
version = "unstable-2025-03-27";
src = prev.fetchFromGitHub {
owner = "rebelcore";
repo = "jellyfin_exporter";
rev = "8e3970cb1bdf3cb21fac099c13072bb7c1b20cf9";
hash = "sha256-wDnhepYj1MyLRZlwKfmwf4xiEEL3mgQY6V+7TnBd0MY=";
};
vendorHash = "sha256-e08u10e/wNapNZSsD/fGVN9ybMHe3sW0yDIOqI8ZcYs=";
# upstream tests require a running Jellyfin instance
doCheck = false;
meta.mainProgram = "jellyfin_exporter";
};
igpu-exporter = prev.buildGoModule rec {
pname = "igpu-exporter";
version = "unstable-2025-03-27";
src = prev.fetchFromGitHub {
owner = "mike1808";
repo = "igpu-exporter";
rev = "db2dace1a895c2b950f6d3ba1a2e46729251d124";
hash = "sha256-xWTiu26UzTZIK/6jeda+x6VePUgoWTS0AekejFdgFWs=";
};
vendorHash = "sha256-oeCSKwDKVwvYQ1fjXXTwQSXNl/upDE3WAAk680vqh3U=";
subPackages = [ "cmd" ];
postInstall = ''
mv $out/bin/cmd $out/bin/igpu-exporter
'';
meta.mainProgram = "igpu-exporter";
};
}

View File

@@ -0,0 +1,41 @@
{
...
}:
{
powerManagement = {
enable = true;
cpuFreqGovernor = "powersave";
};
# Always-on server: disable all sleep targets.
systemd.targets = {
sleep.enable = false;
suspend.enable = false;
hibernate.enable = false;
hybrid-sleep.enable = false;
};
boot.kernelParams = [
# Disable NMI watchdog at boot. Eliminates periodic perf-counter interrupts
# across all cores (~1 W). Safe: apcupsd provides hardware hang detection
# via UPS, and softlockup watchdog remains active.
"nmi_watchdog=0"
# Route kernel work items to already-busy CPUs rather than waking idle ones.
# Reduces C-state exit frequency at the cost of slightly higher latency on
# work items -- irrelevant for a server whose latency-sensitive paths are
# all in userspace (caddy, jellyfin).
"workqueue.power_efficient=1"
];
boot.kernel.sysctl = {
# Belt-and-suspenders: also set via boot param, but sysctl ensures it
# stays off if anything re-enables it at runtime.
"kernel.nmi_watchdog" = 0;
};
# Server has no audio consumers. Power-gate the HDA codec at module load.
boot.extraModprobeConfig = ''
options snd_hda_intel power_save=1 power_save_controller=Y
'';
}

View File

@@ -0,0 +1,42 @@
{
config,
lib,
pkgs,
...
}:
{
boot = {
loader.systemd-boot.enable = lib.mkForce false;
lanzaboote = {
enable = true;
# needed to be in `/etc/secureboot` for sbctl to work
pkiBundle = "/etc/secureboot";
};
};
system.activationScripts = {
# extract secureboot keys from agenix-decrypted tar
"secureboot-keys" = {
deps = [ "agenix" ];
text = ''
#!/bin/sh
(
umask 077
# Check if keys already exist (e.g., from disko-install)
if [[ -d ${config.boot.lanzaboote.pkiBundle} && -f ${config.boot.lanzaboote.pkiBundle}/db.key ]]; then
echo "Secureboot keys already present, skipping extraction"
else
echo "Extracting secureboot keys from agenix"
rm -fr ${config.boot.lanzaboote.pkiBundle} || true
install -d -o root -g wheel -m 0500 ${config.boot.lanzaboote.pkiBundle}
${pkgs.gnutar}/bin/tar xf ${config.age.secrets.secureboot-tar.path} -C ${config.boot.lanzaboote.pkiBundle}
fi
chown -R root:wheel ${config.boot.lanzaboote.pkiBundle}
chmod -R 500 ${config.boot.lanzaboote.pkiBundle}
)
'';
};
};
}

View File

@@ -0,0 +1,120 @@
{
config,
lib,
pkgs,
...
}:
{
# memory allocator
# BREAKS REDIS-IMMICH
# environment.memoryAllocator.provider = "graphene-hardened";
# disable coredumps
systemd.coredump.enable = false;
# Needed for Nix sandbox UID/GID mapping inside derivation builds.
# See https://github.com/NixOS/nixpkgs/issues/287194
security.unprivilegedUsernsClone = true;
# Disable kexec to prevent replacing the running kernel at runtime.
security.protectKernelImage = true;
# Kernel hardening boot parameters. These recover most of the runtime-
# configurable protections that the linux-hardened patchset provided.
boot.kernelParams = [
# Zero all page allocator pages on free / alloc. Prevents info leaks
# and use-after-free from seeing stale data. Modest CPU overhead.
"init_on_alloc=1"
"init_on_free=1"
# Prevent SLUB allocator from merging caches with similar size/flags.
# Keeps different kernel object types in separate slabs, making heap
# exploitation (type confusion, spray, use-after-free) significantly harder.
"slab_nomerge"
# Randomize order of pages returned by the buddy allocator.
"page_alloc.shuffle=1"
# Disable debugfs entirely (exposes kernel internals).
"debugfs=off"
# Disable legacy vsyscall emulation (unused by any modern glibc).
"vsyscall=none"
# Strict IOMMU TLB invalidation (no batching). Prevents DMA-capable
# devices from accessing stale mappings after unmap.
"iommu.strict=1"
];
boot.kernel.sysctl = {
# Immediately reboot on kernel oops (don't leave a compromised
# kernel running). Negative value = reboot without delay.
"kernel.panic" = -1;
# Hide kernel pointers from all processes, including CAP_SYSLOG.
# Prevents info leaks used to defeat KASLR.
"kernel.kptr_restrict" = 2;
# Disable bpf() JIT compiler (eliminates JIT spray attack vector).
"net.core.bpf_jit_enable" = false;
# Disable ftrace (kernel function tracer) at runtime.
"kernel.ftrace_enabled" = false;
# Strict reverse-path filtering: drop packets arriving on an interface
# where the source address isn't routable back via that interface.
"net.ipv4.conf.all.rp_filter" = 1;
"net.ipv4.conf.default.rp_filter" = 1;
"net.ipv4.conf.all.log_martians" = true;
"net.ipv4.conf.default.log_martians" = true;
# Ignore ICMP redirects (prevents route table poisoning).
"net.ipv4.conf.all.accept_redirects" = false;
"net.ipv4.conf.all.secure_redirects" = false;
"net.ipv4.conf.default.accept_redirects" = false;
"net.ipv4.conf.default.secure_redirects" = false;
"net.ipv6.conf.all.accept_redirects" = false;
"net.ipv6.conf.default.accept_redirects" = false;
# Don't send ICMP redirects (we are not a router).
"net.ipv4.conf.all.send_redirects" = false;
"net.ipv4.conf.default.send_redirects" = false;
# Ignore broadcast ICMP (SMURF amplification mitigation).
"net.ipv4.icmp_echo_ignore_broadcasts" = true;
# Filesystem hardening: prevent hardlink/symlink-based attacks.
# protected_hardlinks/symlinks: block unprivileged creation of hard/symlinks
# to files the user doesn't own (prevents TOCTOU privilege escalation).
# protected_fifos/regular (level 2): restrict opening FIFOs and regular files
# in world-writable sticky directories to owner/group match only.
# Also required for systemd-tmpfiles to chmod hardlinked files.
"fs.protected_hardlinks" = true;
"fs.protected_symlinks" = true;
"fs.protected_fifos" = 2;
"fs.protected_regular" = 2;
};
services = {
dbus.implementation = "broker";
/*
logrotate.enable = true;
journald = {
storage = "volatile"; # Store logs in memory
upload.enable = false; # Disable remote log upload (the default)
extraConfig = ''
SystemMaxUse=500M
SystemMaxFileSize=50M
'';
};
*/
};
services.fail2ban = {
enable = true;
# Use iptables actions for compatibility
banaction = "iptables-multiport";
banaction-allports = "iptables-allports";
};
}

View File

@@ -0,0 +1,22 @@
{
config,
lib,
pkgs,
...
}:
{
# Mount USB secrets drive via fileSystems
fileSystems."/mnt/usb-secrets" = {
device = "/dev/disk/by-label/SECRETS";
fsType = "vfat";
options = [
"ro"
"uid=root"
"gid=root"
"umask=377"
];
neededForBoot = true;
};
age.identityPaths = [ "/mnt/usb-secrets/usb-secrets-key" ];
}

View File

@@ -0,0 +1,127 @@
{
config,
lib,
service_configs,
pkgs,
...
}:
let
# Total RAM in bytes (from /proc/meminfo: 65775836 KiB).
totalRamBytes = 65775836 * 1024;
# Hugepage reservations that the kernel carves out before ZFS can use them.
hugepages2mBytes = service_configs.hugepages_2m.total_pages * 2 * 1024 * 1024;
hugepages1gBytes = 3 * 1024 * 1024 * 1024; # 3x 1G pages for RandomX (xmrig.nix)
totalHugepageBytes = hugepages2mBytes + hugepages1gBytes;
# ARC max: 60% of RAM remaining after hugepages. Leaves headroom for
# application RSS (PostgreSQL, qBittorrent, Jellyfin, Grafana, etc.),
# kernel slabs, and page cache.
arcMaxBytes = (totalRamBytes - totalHugepageBytes) * 60 / 100;
in
{
boot.zfs.package = pkgs.zfs_2_4;
boot.initrd.kernelModules = [ "zfs" ];
boot.kernelParams = [
# 120s TXG timeout: batch more dirty data per transaction group so the
# HDD pool (hdds) writes larger, sequential I/Os instead of many small syncs.
# This is a global setting (no per-pool control); the SSD pool (tank) syncs
# infrequently but handles it fine since SSDs don't suffer from seek overhead.
"zfs.zfs_txg_timeout=120"
# Cap ARC to prevent it from claiming memory reserved for hugepages.
# Without this, ZFS auto-sizes c_max to ~62 GiB on a 64 GiB system,
# ignoring the 11.5 GiB of hugepage reservations.
"zfs.zfs_arc_max=${toString arcMaxBytes}"
# vdev I/O scheduler: feed more concurrent reads to the block scheduler so
# mq-deadline has a larger pool of requests to sort and merge into elevator sweeps.
# Default async_read_max is 3 — far too few for effective coalescence.
# 32 was empirically optimal (64 overwhelmed the drives, 3 gave near-zero merges).
"zfs.zfs_vdev_async_read_max_active=32"
"zfs.zfs_vdev_async_read_min_active=4"
# Merge reads within 128 KiB of each other (default 32 KiB). On HDDs, reading a
# 128 KiB gap is far cheaper than a mechanical seek (~8 ms).
"zfs.zfs_vdev_read_gap_limit=131072"
# Allow ZFS to aggregate I/Os up to 4 MiB (default 1 MiB), matching the
# libtorrent piece extent size for larger sequential disk operations.
"zfs.zfs_vdev_aggregation_limit=4194304"
];
boot.supportedFilesystems = [ "zfs" ];
boot.zfs.extraPools = [
service_configs.zpool_ssds
service_configs.zpool_hdds
];
services.sanoid = {
enable = true;
datasets."${service_configs.zpool_ssds}" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 5;
daily = 7;
monthly = 3;
yearly = 0;
};
datasets."${service_configs.zpool_ssds}/services/sql" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 12;
daily = 2;
monthly = 0;
yearly = 0;
};
datasets."${service_configs.zpool_ssds}/services/jellyfin/cache" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 0;
daily = 0;
monthly = 0;
yearly = 0;
};
datasets."${service_configs.zpool_ssds}/services/monero" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 0;
daily = 0;
monthly = 0;
yearly = 0;
};
datasets."${service_configs.zpool_ssds}/services/p2pool" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 0;
daily = 0;
monthly = 0;
yearly = 0;
};
datasets."${service_configs.zpool_hdds}" = {
recursive = true;
autoprune = true;
autosnap = true;
hourly = 0;
daily = 0;
monthly = 0;
yearly = 0;
};
};
services.zfs = {
autoScrub.enable = true;
trim.enable = true;
};
}

View File

@@ -0,0 +1,379 @@
From ab57092a60123e361cf0de1c1a314a9888c45219 Mon Sep 17 00:00:00 2001
From: Simon Gardling <titaniumtown@proton.me>
Date: Sat, 21 Mar 2026 09:24:39 -0400
Subject: [PATCH] temp
---
.../services/networking/firefox-syncserver.md | 23 +++
.../networking/firefox-syncserver.nix | 140 ++++++++++++++----
pkgs/by-name/sy/syncstorage-rs/package.nix | 49 ++++--
3 files changed, 174 insertions(+), 38 deletions(-)
diff --git a/nixos/modules/services/networking/firefox-syncserver.md b/nixos/modules/services/networking/firefox-syncserver.md
index 991e97f799d6..3bc45cfa5640 100644
--- a/nixos/modules/services/networking/firefox-syncserver.md
+++ b/nixos/modules/services/networking/firefox-syncserver.md
@@ -32,6 +32,29 @@ This configuration should never be used in production. It is not encrypted and
stores its secrets in a world-readable location.
:::
+## Database backends {#module-services-firefox-syncserver-database}
+
+The sync server supports MySQL/MariaDB (the default) and PostgreSQL as database
+backends. Set `database.type` to choose the backend:
+
+```nix
+{
+ services.firefox-syncserver = {
+ enable = true;
+ database.type = "postgresql";
+ secrets = "/run/secrets/firefox-syncserver";
+ singleNode = {
+ enable = true;
+ hostname = "localhost";
+ url = "http://localhost:5000";
+ };
+ };
+}
+```
+
+When `database.createLocally` is `true` (the default), the module will
+automatically enable and configure the corresponding database service.
+
## More detailed setup {#module-services-firefox-syncserver-configuration}
The `firefox-syncserver` service provides a number of options to make setting up
diff --git a/nixos/modules/services/networking/firefox-syncserver.nix b/nixos/modules/services/networking/firefox-syncserver.nix
index 6a50e49fc096..70a56314e323 100644
--- a/nixos/modules/services/networking/firefox-syncserver.nix
+++ b/nixos/modules/services/networking/firefox-syncserver.nix
@@ -13,7 +13,21 @@ let
defaultUser = "firefox-syncserver";
dbIsLocal = cfg.database.host == "localhost";
- dbURL = "mysql://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?socket=/run/mysqld/mysqld.sock"}";
+ dbIsMySQL = cfg.database.type == "mysql";
+ dbIsPostgreSQL = cfg.database.type == "postgresql";
+
+ dbURL =
+ if dbIsMySQL then
+ "mysql://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?socket=/run/mysqld/mysqld.sock"}"
+ else
+ "postgres://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}${lib.optionalString dbIsLocal "?host=/run/postgresql"}";
+
+ # postgresql.target waits for postgresql-setup.service (which runs
+ # ensureDatabases / ensureUsers) to complete, avoiding race conditions
+ # where the syncserver starts before its database and role exist.
+ dbService = if dbIsMySQL then "mysql.service" else "postgresql.target";
+
+ syncserver = cfg.package.override { dbBackend = cfg.database.type; };
format = pkgs.formats.toml { };
settings = {
@@ -22,7 +36,7 @@ let
database_url = dbURL;
};
tokenserver = {
- node_type = "mysql";
+ node_type = if dbIsMySQL then "mysql" else "postgres";
database_url = dbURL;
fxa_email_domain = "api.accounts.firefox.com";
fxa_oauth_server_url = "https://oauth.accounts.firefox.com/v1";
@@ -41,7 +55,8 @@ let
};
};
configFile = format.generate "syncstorage.toml" (lib.recursiveUpdate settings cfg.settings);
- setupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
+
+ mysqlSetupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
set -euo pipefail
shopt -s inherit_errexit
@@ -79,6 +94,47 @@ let
echo "Single-node setup failed"
exit 1
'';
+
+ postgresqlSetupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
+ set -euo pipefail
+ shopt -s inherit_errexit
+
+ schema_configured() {
+ psql -d ${cfg.database.name} -tAc "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'services')" | grep -q t
+ }
+
+ update_config() {
+ psql -d ${cfg.database.name} <<'EOF'
+ BEGIN;
+
+ INSERT INTO services (id, service, pattern)
+ VALUES (1, 'sync-1.5', '{node}/1.5/{uid}')
+ ON CONFLICT (id) DO UPDATE SET service = 'sync-1.5', pattern = '{node}/1.5/{uid}';
+ INSERT INTO nodes (id, service, node, available, current_load,
+ capacity, downed, backoff)
+ VALUES (1, 1, '${cfg.singleNode.url}', ${toString cfg.singleNode.capacity},
+ 0, ${toString cfg.singleNode.capacity}, 0, 0)
+ ON CONFLICT (id) DO UPDATE SET node = '${cfg.singleNode.url}', capacity = ${toString cfg.singleNode.capacity};
+
+ COMMIT;
+ EOF
+ }
+
+
+ for (( try = 0; try < 60; try++ )); do
+ if ! schema_configured; then
+ sleep 2
+ else
+ update_config
+ exit 0
+ fi
+ done
+
+ echo "Single-node setup failed"
+ exit 1
+ '';
+
+ setupScript = if dbIsMySQL then mysqlSetupScript else postgresqlSetupScript;
in
{
@@ -88,25 +144,26 @@ in
the Firefox Sync storage service.
Out of the box this will not be very useful unless you also configure at least
- one service and one nodes by inserting them into the mysql database manually, e.g.
- by running
-
- ```
- INSERT INTO `services` (`id`, `service`, `pattern`) VALUES ('1', 'sync-1.5', '{node}/1.5/{uid}');
- INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`,
- `capacity`, `downed`, `backoff`)
- VALUES ('1', '1', 'https://mydomain.tld', '1', '0', '10', '0', '0');
- ```
+ one service and one nodes by inserting them into the database manually, e.g.
+ by running the equivalent SQL for your database backend.
{option}`${opt.singleNode.enable}` does this automatically when enabled
'';
package = lib.mkPackageOption pkgs "syncstorage-rs" { };
+ database.type = lib.mkOption {
+ type = lib.types.enum [
+ "mysql"
+ "postgresql"
+ ];
+ default = "mysql";
+ description = ''
+ Which database backend to use for storage.
+ '';
+ };
+
database.name = lib.mkOption {
- # the mysql module does not allow `-quoting without resorting to shell
- # escaping, so we restrict db names for forward compaitiblity should this
- # behavior ever change.
type = lib.types.strMatching "[a-z_][a-z0-9_]*";
default = defaultDatabase;
description = ''
@@ -117,9 +174,15 @@ in
database.user = lib.mkOption {
type = lib.types.str;
- default = defaultUser;
+ default = if dbIsPostgreSQL then defaultDatabase else defaultUser;
+ defaultText = lib.literalExpression ''
+ if database.type == "postgresql" then "${defaultDatabase}" else "${defaultUser}"
+ '';
description = ''
- Username for database connections.
+ Username for database connections. When using PostgreSQL with
+ `createLocally`, this defaults to the database name so that
+ `ensureDBOwnership` works (it requires user and database names
+ to match).
'';
};
@@ -137,7 +200,8 @@ in
default = true;
description = ''
Whether to create database and user on the local machine if they do not exist.
- This includes enabling unix domain socket authentication for the configured user.
+ This includes enabling the configured database service and setting up
+ authentication for the configured user.
'';
};
@@ -237,7 +301,7 @@ in
};
config = lib.mkIf cfg.enable {
- services.mysql = lib.mkIf cfg.database.createLocally {
+ services.mysql = lib.mkIf (cfg.database.createLocally && dbIsMySQL) {
enable = true;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
@@ -250,16 +314,27 @@ in
];
};
+ services.postgresql = lib.mkIf (cfg.database.createLocally && dbIsPostgreSQL) {
+ enable = true;
+ ensureDatabases = [ cfg.database.name ];
+ ensureUsers = [
+ {
+ name = cfg.database.user;
+ ensureDBOwnership = true;
+ }
+ ];
+ };
+
systemd.services.firefox-syncserver = {
wantedBy = [ "multi-user.target" ];
- requires = lib.mkIf dbIsLocal [ "mysql.service" ];
- after = lib.mkIf dbIsLocal [ "mysql.service" ];
+ requires = lib.mkIf dbIsLocal [ dbService ];
+ after = lib.mkIf dbIsLocal [ dbService ];
restartTriggers = lib.optional cfg.singleNode.enable setupScript;
environment.RUST_LOG = cfg.logLevel;
serviceConfig = {
- User = defaultUser;
- Group = defaultUser;
- ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
+ User = cfg.database.user;
+ Group = cfg.database.user;
+ ExecStart = "${syncserver}/bin/syncserver --config ${configFile}";
EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
# hardening
@@ -303,10 +378,19 @@ in
systemd.services.firefox-syncserver-setup = lib.mkIf cfg.singleNode.enable {
wantedBy = [ "firefox-syncserver.service" ];
- requires = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
- after = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
- path = [ config.services.mysql.package ];
- serviceConfig.ExecStart = [ "${setupScript}" ];
+ requires = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal dbService;
+ after = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal dbService;
+ path =
+ if dbIsMySQL then [ config.services.mysql.package ] else [ config.services.postgresql.package ];
+ serviceConfig = {
+ ExecStart = [ "${setupScript}" ];
+ }
+ // lib.optionalAttrs dbIsPostgreSQL {
+ # PostgreSQL peer authentication requires the system user to match the
+ # database user. Run as the superuser so we can access all databases.
+ User = "postgres";
+ Group = "postgres";
+ };
};
services.nginx.virtualHosts = lib.mkIf cfg.singleNode.enableNginx {
diff --git a/pkgs/by-name/sy/syncstorage-rs/package.nix b/pkgs/by-name/sy/syncstorage-rs/package.nix
index 39b2b53ab03c..944ed72525af 100644
--- a/pkgs/by-name/sy/syncstorage-rs/package.nix
+++ b/pkgs/by-name/sy/syncstorage-rs/package.nix
@@ -1,14 +1,18 @@
{
fetchFromGitHub,
+ fetchurl,
rustPlatform,
pkg-config,
python3,
cmake,
libmysqlclient,
+ libpq,
+ openssl,
makeBinaryWrapper,
lib,
nix-update-script,
nixosTests,
+ dbBackend ? "mysql",
}:
let
@@ -19,17 +23,23 @@ let
p.tokenlib
p.cryptography
]);
+ # utoipa-swagger-ui downloads Swagger UI assets at build time.
+ # Prefetch the archive for sandboxed builds.
+ swaggerUi = fetchurl {
+ url = "https://github.com/swagger-api/swagger-ui/archive/refs/tags/v5.17.14.zip";
+ hash = "sha256-SBJE0IEgl7Efuu73n3HZQrFxYX+cn5UU5jrL4T5xzNw=";
+ };
in
-rustPlatform.buildRustPackage rec {
+rustPlatform.buildRustPackage (finalAttrs: {
pname = "syncstorage-rs";
- version = "0.21.1-unstable-2026-01-26";
+ version = "0.21.1-unstable-2026-02-24";
src = fetchFromGitHub {
owner = "mozilla-services";
repo = "syncstorage-rs";
- rev = "11659d98f9c69948a0aab353437ce2036c388711";
- hash = "sha256-G37QvxTNh/C3gmKG0UYHI6QBr0F+KLGRNI/Sx33uOsc=";
+ rev = "50a739b58dc9ec81995f86e71d992aa14ccc450e";
+ hash = "sha256-idq0RGdwoV6GVuq36IVVVCFbyMTe8i/EpVWE59D/dhM=";
};
nativeBuildInputs = [
@@ -39,16 +49,35 @@ rustPlatform.buildRustPackage rec {
python3
];
- buildInputs = [
- libmysqlclient
- ];
+ buildInputs =
+ lib.optional (dbBackend == "mysql") libmysqlclient
+ ++ lib.optionals (dbBackend == "postgresql") [
+ libpq
+ openssl
+ ];
+
+ buildNoDefaultFeatures = true;
+ # The syncserver "postgres" feature only enables syncstorage-db/postgres.
+ # tokenserver-db/postgres must be enabled separately so the tokenserver
+ # can also connect to PostgreSQL (it dispatches on the URL scheme at runtime).
+ buildFeatures =
+ let
+ cargoFeature = if dbBackend == "postgresql" then "postgres" else dbBackend;
+ in
+ [
+ cargoFeature
+ "tokenserver-db/${cargoFeature}"
+ "py_verifier"
+ ];
+
+ SWAGGER_UI_DOWNLOAD_URL = "file://${swaggerUi}";
preFixup = ''
wrapProgram $out/bin/syncserver \
--prefix PATH : ${lib.makeBinPath [ pyFxADeps ]}
'';
- cargoHash = "sha256-9Dcf5mDyK/XjsKTlCPXTHoBkIq+FFPDg1zfK24Y9nHQ=";
+ cargoHash = "sha256-80EztkSX+SnmqsRWIXbChUB8AeV1Tp9WXoWNbDY8rUE=";
# almost all tests need a DB to test against
doCheck = false;
@@ -60,10 +89,10 @@ rustPlatform.buildRustPackage rec {
meta = {
description = "Mozilla Sync Storage built with Rust";
homepage = "https://github.com/mozilla-services/syncstorage-rs";
- changelog = "https://github.com/mozilla-services/syncstorage-rs/releases/tag/${version}";
+ changelog = "https://github.com/mozilla-services/syncstorage-rs/releases/tag/${finalAttrs.version}";
license = lib.licenses.mpl20;
maintainers = [ ];
platforms = lib.platforms.linux;
mainProgram = "syncserver";
};
-}
+})
--
2.53.0

View File

@@ -0,0 +1,443 @@
From f0582558f0a8b0ef543b3251c4a07afab89fde63 Mon Sep 17 00:00:00 2001
From: Simon Gardling <titaniumtown@proton.me>
Date: Fri, 17 Apr 2026 19:37:11 -0400
Subject: [PATCH] nixos/jellyfin: add declarative network.xml options
Adds services.jellyfin.network.* (baseUrl, ports, IPv4/6, LAN subnets,
known proxies, remote IP filter, etc.) and services.jellyfin.forceNetworkConfig,
mirroring the existing hardwareAcceleration / forceEncodingConfig pattern.
Motivation: running Jellyfin behind a reverse proxy requires configuring
KnownProxies (so the real client IP is extracted from X-Forwarded-For)
and LocalNetworkSubnets (so LAN clients are correctly classified and not
subject to RemoteClientBitrateLimit). These settings previously had no
declarative option -- they could only be set via the web dashboard or
by hand-editing network.xml, with no guarantee they would survive a
reinstall or be consistent across deployments.
Implementation:
- Adds a networkXmlText template alongside the existing encodingXmlText.
- Factors the force-vs-soft install logic out of preStart into a
small 'manage_config_xml' shell helper; encoding.xml and network.xml
now share the same install/backup semantics.
- Extends the VM test with a machineWithNetworkConfig node and a
subtest that verifies the declared values land in network.xml,
Jellyfin parses them at startup, and the backup-on-overwrite path
works (same shape as the existing 'Force encoding config' subtest).
---
nixos/modules/services/misc/jellyfin.nix | 303 ++++++++++++++++++++---
nixos/tests/jellyfin.nix | 50 ++++
2 files changed, 317 insertions(+), 36 deletions(-)
diff --git a/nixos/modules/services/misc/jellyfin.nix b/nixos/modules/services/misc/jellyfin.nix
index 5c08fc478e45..387da907c652 100644
--- a/nixos/modules/services/misc/jellyfin.nix
+++ b/nixos/modules/services/misc/jellyfin.nix
@@ -26,8 +26,10 @@ let
bool
enum
ints
+ listOf
nullOr
path
+ port
str
submodule
;
@@ -68,6 +70,41 @@ let
</EncodingOptions>
'';
encodingXmlFile = pkgs.writeText "encoding.xml" encodingXmlText;
+ stringListToXml =
+ tag: items:
+ if items == [ ] then
+ "<${tag} />"
+ else
+ "<${tag}>\n ${
+ concatMapStringsSep "\n " (item: "<string>${escapeXML item}</string>") items
+ }\n </${tag}>";
+ networkXmlText = ''
+ <?xml version="1.0" encoding="utf-8"?>
+ <NetworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <BaseUrl>${escapeXML cfg.network.baseUrl}</BaseUrl>
+ <EnableHttps>${boolToString cfg.network.enableHttps}</EnableHttps>
+ <RequireHttps>${boolToString cfg.network.requireHttps}</RequireHttps>
+ <InternalHttpPort>${toString cfg.network.internalHttpPort}</InternalHttpPort>
+ <InternalHttpsPort>${toString cfg.network.internalHttpsPort}</InternalHttpsPort>
+ <PublicHttpPort>${toString cfg.network.publicHttpPort}</PublicHttpPort>
+ <PublicHttpsPort>${toString cfg.network.publicHttpsPort}</PublicHttpsPort>
+ <AutoDiscovery>${boolToString cfg.network.autoDiscovery}</AutoDiscovery>
+ <EnableUPnP>${boolToString cfg.network.enableUPnP}</EnableUPnP>
+ <EnableIPv4>${boolToString cfg.network.enableIPv4}</EnableIPv4>
+ <EnableIPv6>${boolToString cfg.network.enableIPv6}</EnableIPv6>
+ <EnableRemoteAccess>${boolToString cfg.network.enableRemoteAccess}</EnableRemoteAccess>
+ ${stringListToXml "LocalNetworkSubnets" cfg.network.localNetworkSubnets}
+ ${stringListToXml "LocalNetworkAddresses" cfg.network.localNetworkAddresses}
+ ${stringListToXml "KnownProxies" cfg.network.knownProxies}
+ <IgnoreVirtualInterfaces>${boolToString cfg.network.ignoreVirtualInterfaces}</IgnoreVirtualInterfaces>
+ ${stringListToXml "VirtualInterfaceNames" cfg.network.virtualInterfaceNames}
+ <EnablePublishedServerUriByRequest>${boolToString cfg.network.enablePublishedServerUriByRequest}</EnablePublishedServerUriByRequest>
+ ${stringListToXml "PublishedServerUriBySubnet" cfg.network.publishedServerUriBySubnet}
+ ${stringListToXml "RemoteIPFilter" cfg.network.remoteIPFilter}
+ <IsRemoteIPFilterBlacklist>${boolToString cfg.network.isRemoteIPFilterBlacklist}</IsRemoteIPFilterBlacklist>
+ </NetworkConfiguration>
+ '';
+ networkXmlFile = pkgs.writeText "network.xml" networkXmlText;
codecListToType =
desc: list:
submodule {
@@ -205,6 +242,196 @@ in
'';
};
+ network = {
+ baseUrl = mkOption {
+ type = str;
+ default = "";
+ example = "/jellyfin";
+ description = ''
+ Prefix added to Jellyfin's internal URLs when it sits behind a reverse proxy at a sub-path.
+ Leave empty when Jellyfin is served at the root of its host.
+ '';
+ };
+
+ enableHttps = mkOption {
+ type = bool;
+ default = false;
+ description = ''
+ Serve HTTPS directly from Jellyfin. Usually unnecessary when terminating TLS in a reverse proxy.
+ '';
+ };
+
+ requireHttps = mkOption {
+ type = bool;
+ default = false;
+ description = ''
+ Redirect plaintext HTTP requests to HTTPS. Only meaningful when {option}`enableHttps` is true.
+ '';
+ };
+
+ internalHttpPort = mkOption {
+ type = port;
+ default = 8096;
+ description = "TCP port Jellyfin binds for HTTP.";
+ };
+
+ internalHttpsPort = mkOption {
+ type = port;
+ default = 8920;
+ description = "TCP port Jellyfin binds for HTTPS. Only used when {option}`enableHttps` is true.";
+ };
+
+ publicHttpPort = mkOption {
+ type = port;
+ default = 8096;
+ description = "HTTP port Jellyfin advertises in server discovery responses and published URIs.";
+ };
+
+ publicHttpsPort = mkOption {
+ type = port;
+ default = 8920;
+ description = "HTTPS port Jellyfin advertises in server discovery responses and published URIs.";
+ };
+
+ autoDiscovery = mkOption {
+ type = bool;
+ default = true;
+ description = "Respond to LAN client auto-discovery broadcasts (UDP 7359).";
+ };
+
+ enableUPnP = mkOption {
+ type = bool;
+ default = false;
+ description = "Attempt to open the public ports on the router via UPnP.";
+ };
+
+ enableIPv4 = mkOption {
+ type = bool;
+ default = true;
+ description = "Listen on IPv4.";
+ };
+
+ enableIPv6 = mkOption {
+ type = bool;
+ default = true;
+ description = "Listen on IPv6.";
+ };
+
+ enableRemoteAccess = mkOption {
+ type = bool;
+ default = true;
+ description = ''
+ Allow connections from clients outside the subnets listed in {option}`localNetworkSubnets`.
+ When false, Jellyfin rejects non-local requests regardless of reverse proxy configuration.
+ '';
+ };
+
+ localNetworkSubnets = mkOption {
+ type = listOf str;
+ default = [ ];
+ example = [
+ "192.168.1.0/24"
+ "10.0.0.0/8"
+ ];
+ description = ''
+ CIDR ranges (or bare IPs) that Jellyfin classifies as the local network.
+ Clients originating from these ranges -- as seen after {option}`knownProxies` X-Forwarded-For
+ unwrapping -- are not subject to {option}`services.jellyfin` remote-client bitrate limits.
+ '';
+ };
+
+ localNetworkAddresses = mkOption {
+ type = listOf str;
+ default = [ ];
+ example = [ "192.168.1.50" ];
+ description = ''
+ Specific interface addresses Jellyfin binds to. Leave empty to bind all interfaces.
+ '';
+ };
+
+ knownProxies = mkOption {
+ type = listOf str;
+ default = [ ];
+ example = [ "127.0.0.1" ];
+ description = ''
+ Addresses of reverse proxies trusted to forward the real client IP via `X-Forwarded-For`.
+ Without this, Jellyfin sees the proxy's address for every request and cannot apply
+ {option}`localNetworkSubnets` classification to the true client.
+ '';
+ };
+
+ ignoreVirtualInterfaces = mkOption {
+ type = bool;
+ default = true;
+ description = "Skip virtual network interfaces (matching {option}`virtualInterfaceNames`) during auto-bind.";
+ };
+
+ virtualInterfaceNames = mkOption {
+ type = listOf str;
+ default = [ "veth" ];
+ description = "Interface name prefixes treated as virtual when {option}`ignoreVirtualInterfaces` is true.";
+ };
+
+ enablePublishedServerUriByRequest = mkOption {
+ type = bool;
+ default = false;
+ description = ''
+ Derive the server's public URI from the incoming request's Host header instead of any
+ configured {option}`publishedServerUriBySubnet` entry.
+ '';
+ };
+
+ publishedServerUriBySubnet = mkOption {
+ type = listOf str;
+ default = [ ];
+ example = [ "192.168.1.0/24=http://jellyfin.lan:8096" ];
+ description = ''
+ Per-subnet overrides for the URI Jellyfin advertises to clients, in `subnet=uri` form.
+ '';
+ };
+
+ remoteIPFilter = mkOption {
+ type = listOf str;
+ default = [ ];
+ example = [ "203.0.113.0/24" ];
+ description = ''
+ IPs or CIDRs used as the allow- or denylist for remote access.
+ Behaviour is controlled by {option}`isRemoteIPFilterBlacklist`.
+ '';
+ };
+
+ isRemoteIPFilterBlacklist = mkOption {
+ type = bool;
+ default = false;
+ description = ''
+ When true, {option}`remoteIPFilter` is a denylist; when false, it is an allowlist
+ (and an empty list allows all remote addresses).
+ '';
+ };
+ };
+
+ forceNetworkConfig = mkOption {
+ type = bool;
+ default = false;
+ description = ''
+ Whether to overwrite Jellyfin's `network.xml` configuration file on each service start.
+
+ When enabled, the network configuration specified in {option}`services.jellyfin.network`
+ is applied on every service restart. A backup of the existing `network.xml` will be
+ created at `network.xml.backup-$timestamp`.
+
+ ::: {.warning}
+ Enabling this option means that any changes made to networking settings through
+ Jellyfin's web dashboard will be lost on the next service restart. The NixOS configuration
+ becomes the single source of truth for network settings.
+ :::
+
+ When disabled (the default), the network configuration is only written if no `network.xml`
+ exists yet. This allows settings to be changed through Jellyfin's web dashboard and persist
+ across restarts, but means the NixOS configuration options will be ignored after the initial setup.
+ '';
+ };
+
transcoding = {
maxConcurrentStreams = mkOption {
type = nullOr ints.positive;
@@ -384,46 +611,50 @@ in
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
- preStart = mkIf cfg.hardwareAcceleration.enable (
- ''
- configDir=${escapeShellArg cfg.configDir}
- encodingXml="$configDir/encoding.xml"
- ''
- + (
- if cfg.forceEncodingConfig then
- ''
- if [[ -e $encodingXml ]]; then
+ preStart =
+ let
+ # manage_config_xml <source> <destination> <force> <description>
+ #
+ # Installs a NixOS-declared XML config at <destination>, preserving
+ # any existing file as a timestamped backup when <force> is true.
+ # With <force>=false, leaves existing files untouched and warns if
+ # the on-disk content differs from the declared content.
+ helper = ''
+ manage_config_xml() {
+ local src="$1" dest="$2" force="$3" desc="$4"
+ if [[ -e "$dest" ]]; then
# this intentionally removes trailing newlines
- currentText="$(<"$encodingXml")"
- configuredText="$(<${encodingXmlFile})"
- if [[ $currentText == "$configuredText" ]]; then
- # don't need to do anything
- exit 0
- else
- encodingXmlBackup="$configDir/encoding.xml.backup-$(date -u +"%FT%H_%M_%SZ")"
- mv --update=none-fail -T "$encodingXml" "$encodingXmlBackup"
+ local currentText configuredText
+ currentText="$(<"$dest")"
+ configuredText="$(<"$src")"
+ if [[ "$currentText" == "$configuredText" ]]; then
+ return 0
fi
- fi
- cp --update=none-fail -T ${encodingXmlFile} "$encodingXml"
- chmod u+w "$encodingXml"
- ''
- else
- ''
- if [[ -e $encodingXml ]]; then
- # this intentionally removes trailing newlines
- currentText="$(<"$encodingXml")"
- configuredText="$(<${encodingXmlFile})"
- if [[ $currentText != "$configuredText" ]]; then
- echo "WARN: $encodingXml already exists and is different from the configured settings. transcoding options NOT applied." >&2
- echo "WARN: Set config.services.jellyfin.forceEncodingConfig = true to override." >&2
+ if [[ "$force" == true ]]; then
+ local backup
+ backup="$dest.backup-$(date -u +"%FT%H_%M_%SZ")"
+ mv --update=none-fail -T "$dest" "$backup"
+ else
+ echo "WARN: $dest already exists and is different from the configured settings. $desc options NOT applied." >&2
+ echo "WARN: Set the corresponding force*Config option to override." >&2
+ return 0
fi
- else
- cp --update=none-fail -T ${encodingXmlFile} "$encodingXml"
- chmod u+w "$encodingXml"
fi
- ''
- )
- );
+ cp --update=none-fail -T "$src" "$dest"
+ chmod u+w "$dest"
+ }
+ configDir=${escapeShellArg cfg.configDir}
+ '';
+ in
+ (
+ helper
+ + optionalString cfg.hardwareAcceleration.enable ''
+ manage_config_xml ${encodingXmlFile} "$configDir/encoding.xml" ${boolToString cfg.forceEncodingConfig} transcoding
+ ''
+ + ''
+ manage_config_xml ${networkXmlFile} "$configDir/network.xml" ${boolToString cfg.forceNetworkConfig} network
+ ''
+ );
# This is mostly follows: https://github.com/jellyfin/jellyfin/blob/master/fedora/jellyfin.service
# Upstream also disable some hardenings when running in LXC, we do the same with the isContainer option
diff --git a/nixos/tests/jellyfin.nix b/nixos/tests/jellyfin.nix
index 4896c13d4eca..0c9191960f78 100644
--- a/nixos/tests/jellyfin.nix
+++ b/nixos/tests/jellyfin.nix
@@ -63,6 +63,26 @@
environment.systemPackages = with pkgs; [ ffmpeg ];
virtualisation.diskSize = 3 * 1024;
};
+
+ machineWithNetworkConfig = {
+ services.jellyfin = {
+ enable = true;
+ forceNetworkConfig = true;
+ network = {
+ localNetworkSubnets = [
+ "192.168.1.0/24"
+ "10.0.0.0/8"
+ ];
+ knownProxies = [ "127.0.0.1" ];
+ enableUPnP = false;
+ enableIPv6 = false;
+ remoteIPFilter = [ "203.0.113.5" ];
+ isRemoteIPFilterBlacklist = true;
+ };
+ };
+ environment.systemPackages = with pkgs; [ ffmpeg ];
+ virtualisation.diskSize = 3 * 1024;
+ };
};
# Documentation of the Jellyfin API: https://api.jellyfin.org/
@@ -122,6 +142,36 @@
# Verify the new encoding.xml does not have the marker (was overwritten)
machineWithForceConfig.fail("grep -q 'MARKER' /var/lib/jellyfin/config/encoding.xml")
+ # Test forceNetworkConfig and network.xml generation
+ with subtest("Force network config writes declared values and backs up on overwrite"):
+ wait_for_jellyfin(machineWithNetworkConfig)
+
+ # Verify network.xml exists and contains the declared values
+ machineWithNetworkConfig.succeed("test -f /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<string>192.168.1.0/24</string>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<string>10.0.0.0/8</string>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<string>127.0.0.1</string>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<string>203.0.113.5</string>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<IsRemoteIPFilterBlacklist>true</IsRemoteIPFilterBlacklist>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<EnableIPv6>false</EnableIPv6>' /var/lib/jellyfin/config/network.xml")
+ machineWithNetworkConfig.succeed("grep -F '<EnableUPnP>false</EnableUPnP>' /var/lib/jellyfin/config/network.xml")
+
+ # Stop service before modifying config
+ machineWithNetworkConfig.succeed("systemctl stop jellyfin.service")
+
+ # Plant a marker so we can prove the backup-and-overwrite path runs
+ machineWithNetworkConfig.succeed("echo '<!-- NETMARKER -->' > /var/lib/jellyfin/config/network.xml")
+
+ # Restart the service to trigger the backup
+ machineWithNetworkConfig.succeed("systemctl restart jellyfin.service")
+ wait_for_jellyfin(machineWithNetworkConfig)
+
+ # Verify the marked content was preserved as a timestamped backup
+ machineWithNetworkConfig.succeed("grep -q 'NETMARKER' /var/lib/jellyfin/config/network.xml.backup-*")
+
+ # Verify the new network.xml does not have the marker (was overwritten)
+ machineWithNetworkConfig.fail("grep -q 'NETMARKER' /var/lib/jellyfin/config/network.xml")
+
auth_header = 'MediaBrowser Client="NixOS Integration Tests", DeviceId="1337", Device="Apple II", Version="20.09"'
--
2.53.0

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
set -euo pipefail
DISK="${1:-}"
FLAKE_DIR="$(dirname "$(realpath "$0")")"
if [[ -z "$DISK" ]]; then
echo "Usage: $0 <disk_device>"
echo "Example: $0 /dev/nvme0n1"
echo " $0 /dev/sda"
exit 1
fi
if [[ ! -b "$DISK" ]]; then
echo "Error: $DISK is not a block device"
exit 1
fi
echo "Installing NixOS to $DISK using flake at $FLAKE_DIR"
# Create temporary directories
mkdir -p /tmp/secureboot
mkdir -p /tmp/persistent
# Function to cleanup on exit
cleanup() {
echo "Cleaning up..."
rm -rf /tmp/secureboot 2>/dev/null || true
rm -rf /tmp/persistent 2>/dev/null || true
}
trap cleanup EXIT
# Decrypt secureboot keys using the key in the repo
echo "Decrypting secureboot keys..."
if [[ ! -f "$FLAKE_DIR/usb-secrets/usb-secrets-key" ]]; then
echo "Error: usb-secrets-key not found at $FLAKE_DIR/usb-secrets/usb-secrets-key"
exit 1
fi
nix-shell -p age --run "age -d -i '$FLAKE_DIR/usb-secrets/usb-secrets-key' '$FLAKE_DIR/secrets/secureboot.tar.age'" | \
tar -x -C /tmp/secureboot
echo "Secureboot keys extracted"
# Extract persistent partition secrets
echo "Extracting persistent partition contents..."
if [[ -f "$FLAKE_DIR/secrets/persistent.tar" ]]; then
tar -xzf "$FLAKE_DIR/secrets/persistent.tar" -C /tmp/persistent
echo "Persistent partition contents extracted"
else
echo "Warning: persistent.tar not found, skipping persistent secrets"
fi
# Check if disko-install is available
if ! command -v disko-install >/dev/null 2>&1; then
echo "Running disko-install via nix..."
DISKO_INSTALL="nix run github:nix-community/disko#disko-install --"
else
DISKO_INSTALL="disko-install"
fi
echo "Running disko-install to partition, format, and install NixOS..."
# Build the extra-files arguments
EXTRA_FILES_ARGS=(
--extra-files /tmp/secureboot /etc/secureboot
--extra-files "$FLAKE_DIR/usb-secrets/usb-secrets-key" /mnt/usb-secrets/usb-secrets-key
)
# Add each top-level item from persistent separately to avoid nesting
# cp -ar creates /dst/src when copying directories, so we need to copy each item
#
# Also disko-install actually copies the files from extra-files, so we are good here
if [[ -d /tmp/persistent ]] && [[ -n "$(ls -A /tmp/persistent 2>/dev/null)" ]]; then
for item in /tmp/persistent/*; do
if [[ -e "$item" ]]; then
basename=$(basename "$item")
EXTRA_FILES_ARGS+=(--extra-files "$item" "/persistent/$basename")
fi
done
fi
# Run disko-install with secureboot keys available
sudo $DISKO_INSTALL \
--mode format \
--flake "$FLAKE_DIR#muffin" \
--disk main "$DISK" \
"${EXTRA_FILES_ARGS[@]}"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,364 @@
rec {
zpool_ssds = "tank";
zpool_hdds = "hdds";
torrents_path = "/torrents";
services_dir = "/services";
music_dir = "/${zpool_ssds}/music";
media_group = "media";
cpu_arch = "znver3";
ports = {
# Ports exposed to the internet. The flake asserts every public port
# appears in the corresponding firewall allow-list (TCP, UDP, or both).
public = {
http = {
port = 80;
proto = "tcp";
};
https = {
port = 443;
proto = "both";
}; # HTTP/3 QUIC
minecraft = {
port = 25565;
proto = "tcp";
};
syncthing_protocol = {
port = 22000;
proto = "both";
}; # QUIC
syncthing_discovery = {
port = 21027;
proto = "udp";
};
matrix_federation = {
port = 8448;
proto = "both";
}; # HTTP/3 QUIC
coturn = {
port = 3478;
proto = "both";
};
coturn_tls = {
port = 5349;
proto = "both";
};
livekit = {
port = 7880;
proto = "tcp";
};
soulseek_listen = {
port = 50300;
proto = "tcp";
};
monero = {
port = 18080;
proto = "tcp";
};
monero_rpc = {
port = 18081;
proto = "tcp";
}; # restricted public RPC
p2pool_p2p = {
port = 37889;
proto = "tcp";
};
murmur = {
port = 64738;
proto = "both";
};
};
# Ports bound to localhost / VPN only. The flake asserts none of
# these appear in the firewall allow-lists.
private = {
jellyfin = {
port = 8096;
proto = "tcp";
};
torrent = {
port = 6011;
proto = "tcp";
};
# Webhook receiver for the Jellyfin-qBittorrent monitor — Jellyfin pushes
# playback events here so throttling reacts without waiting for the poll.
jellyfin_qbittorrent_monitor_webhook = {
port = 9898;
proto = "tcp";
};
bitmagnet = {
port = 3333;
proto = "tcp";
};
gitea = {
port = 2283;
proto = "tcp";
};
immich = {
port = 2284;
proto = "tcp";
};
soulseek_web = {
port = 5030;
proto = "tcp";
};
vaultwarden = {
port = 8222;
proto = "tcp";
};
syncthing_gui = {
port = 8384;
proto = "tcp";
};
matrix = {
port = 6167;
proto = "tcp";
};
ntfy = {
port = 2586;
proto = "tcp";
};
lk_jwt = {
port = 8081;
proto = "tcp";
};
prowlarr = {
port = 9696;
proto = "tcp";
};
sonarr = {
port = 8989;
proto = "tcp";
};
radarr = {
port = 7878;
proto = "tcp";
};
bazarr = {
port = 6767;
proto = "tcp";
};
jellyseerr = {
port = 5055;
proto = "tcp";
};
monero_zmq = {
port = 18083;
proto = "tcp";
};
p2pool_stratum = {
port = 3334;
proto = "tcp";
};
firefox_syncserver = {
port = 5000;
proto = "tcp";
};
mollysocket = {
port = 8020;
proto = "tcp";
};
grafana = {
port = 3000;
proto = "tcp";
};
prometheus = {
port = 9090;
proto = "tcp";
};
prometheus_node = {
port = 9100;
proto = "tcp";
};
prometheus_apcupsd = {
port = 9162;
proto = "tcp";
};
llama_cpp = {
port = 6688;
proto = "tcp";
};
trilium = {
port = 8787;
proto = "tcp";
};
jellyfin_exporter = {
port = 9594;
proto = "tcp";
};
qbittorrent_exporter = {
port = 9561;
proto = "tcp";
};
igpu_exporter = {
port = 9563;
proto = "tcp";
};
prometheus_zfs = {
port = 9134;
proto = "tcp";
};
harmonia = {
port = 5500;
proto = "tcp";
};
};
};
https = {
certs = services_dir + "/http_certs";
domain = "sigkill.computer";
old_domain = "gardling.com"; # Redirect traffic from old domain
};
gitea = {
dir = services_dir + "/gitea";
domain = "git.${https.domain}";
};
postgres = {
socket = "/run/postgresql";
dataDir = services_dir + "/sql";
shared_buffers_m = 128; # PostgreSQL default; update if you change shared_buffers
};
immich = {
dir = services_dir + "/immich";
};
minecraft = {
parent_dir = services_dir + "/minecraft";
server_name = "main";
memory = {
heap_size_m = 4000;
large_page_size_m = 2;
};
};
torrent = {
SavePath = torrents_path;
TempPath = torrents_path + "/incomplete";
categories = {
anime = torrents_path + "/anime";
archive = torrents_path + "/archive";
audiobooks = torrents_path + "/audiobooks";
books = torrents_path + "/books";
games = torrents_path + "/games";
movies = torrents_path + "/movies";
music = torrents_path + "/music";
musicals = torrents_path + "/musicals";
tvshows = torrents_path + "/tvshows";
};
};
jellyfin = {
dataDir = services_dir + "/jellyfin";
cacheDir = services_dir + "/jellyfin_cache";
};
slskd = rec {
base = "/var/lib/slskd";
downloads = base + "/downloads";
incomplete = base + "/incomplete";
};
vaultwarden = {
path = "/var/lib/vaultwarden";
};
monero = {
dataDir = services_dir + "/monero";
};
p2pool = {
dataDir = services_dir + "/p2pool";
walletAddress = "49b6NT2k7fQHs8JvF7naUvchYwTQmRpoMMXb1KJTg5UcZVmyPJ7n6jgiH8DrvEsMg5GvMjJqPB1c1PTBAYtUTsbeHe5YMBx";
};
matrix = {
dataDir = "/var/lib/continuwuity";
domain = "matrix.${https.domain}";
};
ntfy = {
domain = "ntfy.${https.domain}";
};
mollysocket = {
domain = "mollysocket.${https.domain}";
};
livekit = {
domain = "livekit.${https.domain}";
};
syncthing = {
dataDir = services_dir + "/syncthing";
signalBackupDir = "/${zpool_ssds}/bak/signal";
grayjayBackupDir = "/${zpool_ssds}/bak/grayjay";
};
prowlarr = {
dataDir = services_dir + "/prowlarr";
};
sonarr = {
dataDir = services_dir + "/sonarr";
};
radarr = {
dataDir = services_dir + "/radarr";
};
bazarr = {
dataDir = services_dir + "/bazarr";
};
jellyseerr = {
configDir = services_dir + "/jellyseerr";
};
recyclarr = {
dataDir = services_dir + "/recyclarr";
};
firefox_syncserver = {
domain = "firefox-sync.${https.domain}";
};
grafana = {
dir = services_dir + "/grafana";
domain = "grafana.${https.domain}";
};
trilium = {
dataDir = services_dir + "/trilium";
};
media = {
moviesDir = torrents_path + "/media/movies";
tvDir = torrents_path + "/media/tv";
};
# Per-service 2MB hugepage budget.
# Each value is the service's hugepage consumption in MB, derived from
# its actual memory configuration. The kernel sysctl vm.nr_hugepages
# is set to total_pages so every service gets what it needs.
hugepages_2m = rec {
page_size_m = 2;
# RandomX dataset (2048MB) + cache (256MB) = 2304MB per instance.
# Both monerod and p2pool allocate their own full copy via MAP_HUGETLB.
randomx_instance_m = 2048 + 256;
services = {
minecraft_m = minecraft.memory.heap_size_m; # JVM heap via -XX:+UseLargePages
monerod_m = randomx_instance_m; # block verification dataset
p2pool_m = randomx_instance_m; # mining dataset
postgres_m = postgres.shared_buffers_m; # huge_pages = try (default)
};
total_pages = builtins.foldl' (a: b: a + b) 0 (builtins.attrValues services) / page_size_m;
};
}

View File

@@ -0,0 +1,115 @@
{
pkgs,
lib,
service_configs,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
radarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
sonarrUrl = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
curl = "${pkgs.curl}/bin/curl";
jq = "${pkgs.jq}/bin/jq";
# Max items to search per cycle per category (missing + cutoff) per app
maxPerCycle = 5;
searchScript = pkgs.writeShellScript "arr-search" ''
set -euo pipefail
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
search_radarr() {
local endpoint="$1"
local label="$2"
local ids
ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $RADARR_KEY" \
"${radarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending" \
| ${jq} -r '.records[].id // empty')
if [ -z "$ids" ]; then
echo "radarr: no $label items"
return
fi
local id_array
id_array=$(echo "$ids" | ${jq} -Rs '[split("\n") | .[] | select(. != "") | tonumber]')
echo "radarr: searching $label: $id_array"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $RADARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${radarrUrl}/api/v3/command" \
-d "{\"name\": \"MoviesSearch\", \"movieIds\": $id_array}" > /dev/null
}
search_sonarr() {
local endpoint="$1"
local label="$2"
local series_ids
series_ids=$(${curl} -sf --max-time 30 \
-H "X-Api-Key: $SONARR_KEY" \
"${sonarrUrl}/api/v3/wanted/$endpoint?page=1&pageSize=${builtins.toString maxPerCycle}&monitored=true&sortKey=title&sortDirection=ascending&includeSeries=true" \
| ${jq} -r '[.records[].seriesId] | unique | .[] // empty')
if [ -z "$series_ids" ]; then
echo "sonarr: no $label items"
return
fi
# search per series (sonarr searches by series, not episode)
for sid in $series_ids; do
echo "sonarr: searching $label series $sid"
${curl} -sf --max-time 60 \
-H "X-Api-Key: $SONARR_KEY" \
-H "Content-Type: application/json" \
-X POST "${sonarrUrl}/api/v3/command" \
-d "{\"name\": \"SeriesSearch\", \"seriesId\": $sid}" > /dev/null
done
}
echo "=== arr-search $(date -Iseconds) ==="
search_radarr "missing" "missing"
search_radarr "cutoff" "cutoff-unmet"
search_sonarr "missing" "missing"
search_sonarr "cutoff" "cutoff-unmet"
echo "=== done ==="
'';
in
{
systemd.services.arr-search = {
description = "Search for missing and cutoff-unmet media in Radarr/Sonarr";
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${searchScript}"; # + prefix: runs as root to read API keys from config.xml
TimeoutSec = 300;
};
};
systemd.timers.arr-search = {
description = "Periodically search for missing and cutoff-unmet media";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 03:00:00"; # daily at 3 AM
Persistent = true; # run on boot if missed
RandomizedDelaySec = "30m";
};
};
}

View File

@@ -0,0 +1,34 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_ssds [
service_configs.bazarr.dataDir
])
(lib.serviceMountWithZpool "bazarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "bazarr" [
"Z ${service_configs.bazarr.dataDir} 0700 ${config.services.bazarr.user} ${config.services.bazarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "bazarr";
port = service_configs.ports.private.bazarr.port;
auth = true;
})
];
services.bazarr = {
enable = true;
listenPort = service_configs.ports.private.bazarr.port;
};
users.users.${config.services.bazarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -0,0 +1,153 @@
{ config, service_configs, ... }:
{
services.arrInit = {
prowlarr = {
enable = true;
serviceName = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
dataDir = service_configs.prowlarr.dataDir;
apiVersion = "v1";
networkNamespacePath = "/run/netns/wg";
networkNamespaceService = "wg";
# Guarantee critical config.xml elements before startup. Prowlarr has a
# history of losing <Port> from config.xml, causing the service to run
# without binding any socket. See arr-init's configXml for details.
configXml = {
Port = service_configs.ports.private.prowlarr.port;
BindAddress = "*";
EnableSsl = false;
};
# Prowlarr runs in the wg netns; Sonarr/Radarr in the host netns.
# From host netns, Prowlarr is reachable at the wg namespace address,
# not at localhost (which resolves to the host's own netns).
# Health checks can now run — the reverse-connect is reachable.
healthChecks = true;
syncedApps = [
{
name = "Sonarr";
implementation = "Sonarr";
configContract = "SonarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.sonarr.port}";
apiKeyFrom = "${service_configs.sonarr.dataDir}/config.xml";
serviceName = "sonarr";
}
{
name = "Radarr";
implementation = "Radarr";
configContract = "RadarrSettings";
prowlarrUrl = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.prowlarr.port}";
baseUrl = "http://${config.vpnNamespaces.wg.bridgeAddress}:${builtins.toString service_configs.ports.private.radarr.port}";
apiKeyFrom = "${service_configs.radarr.dataDir}/config.xml";
serviceName = "radarr";
}
];
};
sonarr = {
enable = true;
serviceName = "sonarr";
port = service_configs.ports.private.sonarr.port;
dataDir = service_configs.sonarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.sonarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.tvDir ];
naming = {
renameEpisodes = true;
replaceIllegalCharacters = true;
standardEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
dailyEpisodeFormat = "{Series Title} - {Air-Date} - {Episode Title} {Quality Full}";
animeEpisodeFormat = "{Series Title} - S{season:00}E{episode:00} - {Episode Title} {Quality Full}";
seasonFolderFormat = "Season {season}";
seriesFolderFormat = "{Series Title}";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
tvCategory = "tvshows";
};
}
];
};
radarr = {
enable = true;
serviceName = "radarr";
port = service_configs.ports.private.radarr.port;
dataDir = service_configs.radarr.dataDir;
healthChecks = true;
configXml = {
Port = service_configs.ports.private.radarr.port;
BindAddress = "*";
EnableSsl = false;
};
rootFolders = [ service_configs.media.moviesDir ];
naming = {
renameMovies = true;
replaceIllegalCharacters = true;
standardMovieFormat = "{Movie Title} ({Release Year}) {Quality Full}";
movieFolderFormat = "{Movie Title} ({Release Year})";
};
downloadClients = [
{
name = "qBittorrent";
implementation = "QBittorrent";
configContract = "QBittorrentSettings";
serviceName = "qbittorrent";
fields = {
host = config.vpnNamespaces.wg.namespaceAddress;
port = service_configs.ports.private.torrent.port;
useSsl = false;
movieCategory = "movies";
};
}
];
};
};
services.bazarrInit = {
enable = true;
dataDir = "/var/lib/bazarr";
port = service_configs.ports.private.bazarr.port;
sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
};
services.jellyseerrInit = {
enable = true;
configDir = service_configs.jellyseerr.configDir;
radarr = {
profileName = "Remux + WEB 2160p";
dataDir = service_configs.radarr.dataDir;
port = service_configs.ports.private.radarr.port;
serviceName = "radarr";
};
sonarr = {
profileName = "WEB-2160p";
dataDir = service_configs.sonarr.dataDir;
port = service_configs.ports.private.sonarr.port;
serviceName = "sonarr";
};
};
}

View File

@@ -0,0 +1,43 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyseerr" service_configs.zpool_ssds [
service_configs.jellyseerr.configDir
])
(lib.serviceFilePerms "jellyseerr" [
"Z ${service_configs.jellyseerr.configDir} 0700 jellyseerr jellyseerr"
])
(lib.mkCaddyReverseProxy {
subdomain = "jellyseerr";
port = service_configs.ports.private.jellyseerr.port;
})
];
services.jellyseerr = {
enable = true;
port = service_configs.ports.private.jellyseerr.port;
configDir = service_configs.jellyseerr.configDir;
};
systemd.services.jellyseerr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "jellyseerr";
Group = "jellyseerr";
ReadWritePaths = [ service_configs.jellyseerr.configDir ];
};
users.users.jellyseerr = {
isSystemUser = true;
group = "jellyseerr";
home = service_configs.jellyseerr.configDir;
};
users.groups.jellyseerr = { };
}

View File

@@ -0,0 +1,60 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "prowlarr" service_configs.zpool_ssds [
service_configs.prowlarr.dataDir
])
(lib.vpnNamespaceOpenPort service_configs.ports.private.prowlarr.port "prowlarr")
(lib.serviceFilePerms "prowlarr" [
"Z ${service_configs.prowlarr.dataDir} 0700 prowlarr prowlarr"
])
(lib.mkCaddyReverseProxy {
subdomain = "prowlarr";
port = service_configs.ports.private.prowlarr.port;
auth = true;
vpn = true;
})
];
services.prowlarr = {
enable = true;
dataDir = service_configs.prowlarr.dataDir;
settings.server.port = service_configs.ports.private.prowlarr.port;
};
# The upstream prowlarr module uses DynamicUser=true which is incompatible
# with ZFS-backed persistent storage — the dynamic user can't access files
# on the ZFS mount. Override with a static user to match sonarr/radarr.
users.users.prowlarr = {
isSystemUser = true;
group = "prowlarr";
home = service_configs.prowlarr.dataDir;
};
users.groups.prowlarr = { };
# The upstream prowlarr module hardcodes root:root in tmpfiles for custom dataDirs
# (systemd.tmpfiles.settings."10-prowlarr"), which gets applied by
# systemd-tmpfiles-setup.service on every boot/deploy, resetting the directory
# ownership and making Prowlarr unable to access its SQLite databases.
# Override to use the correct user as we disable DynamicUser
systemd.tmpfiles.settings."10-prowlarr".${service_configs.prowlarr.dataDir}.d = lib.mkForce {
user = "prowlarr";
group = "prowlarr";
mode = "0700";
};
systemd.services.prowlarr.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "prowlarr";
Group = "prowlarr";
StateDirectory = lib.mkForce "";
ExecStart = lib.mkForce "${lib.getExe pkgs.prowlarr} -nobrowser -data=${service_configs.prowlarr.dataDir}";
};
}

View File

@@ -0,0 +1,36 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "radarr" service_configs.zpool_ssds [
service_configs.radarr.dataDir
])
(lib.serviceMountWithZpool "radarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "radarr" [
"Z ${service_configs.radarr.dataDir} 0700 ${config.services.radarr.user} ${config.services.radarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "radarr";
port = service_configs.ports.private.radarr.port;
auth = true;
})
];
services.radarr = {
enable = true;
dataDir = service_configs.radarr.dataDir;
settings.server.port = service_configs.ports.private.radarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.radarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -0,0 +1,224 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
let
radarrConfig = "${service_configs.radarr.dataDir}/config.xml";
sonarrConfig = "${service_configs.sonarr.dataDir}/config.xml";
configPath = "/var/lib/recyclarr/config.json";
# Runs as root (via + prefix) after the NixOS module writes config.json.
# Extracts API keys from radarr/sonarr config.xml and injects them via jq.
injectApiKeys = pkgs.writeShellScript "recyclarr-inject-api-keys" ''
RADARR_KEY=$(${lib.extractArrApiKey radarrConfig})
SONARR_KEY=$(${lib.extractArrApiKey sonarrConfig})
${pkgs.jq}/bin/jq \
--arg rk "$RADARR_KEY" \
--arg sk "$SONARR_KEY" \
'.radarr.movies.api_key = $rk | .sonarr.series.api_key = $sk' \
${configPath} > ${configPath}.tmp
mv ${configPath}.tmp ${configPath}
chown recyclarr:recyclarr ${configPath}
'';
in
{
imports = [
(lib.serviceMountWithZpool "recyclarr" service_configs.zpool_ssds [
service_configs.recyclarr.dataDir
])
];
systemd.tmpfiles.rules = [
"d ${service_configs.recyclarr.dataDir} 0755 recyclarr recyclarr -"
];
services.recyclarr = {
enable = true;
command = "sync";
schedule = "daily";
user = "recyclarr";
group = "recyclarr";
configuration = {
radarr.movies = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "radarr-quality-definition-movie"; }
{ template = "radarr-quality-profile-remux-web-2160p"; }
{ template = "radarr-custom-formats-remux-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "Remux + WEB 2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "Remux-2160p";
until_score = 10000;
};
qualities = [
{ name = "Remux-2160p"; }
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Remux-1080p"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "923b6abef9b17f937fab56cfcf89e1f1" ];
assign_scores_to = [
{ name = "Remux + WEB 2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "bfd8eb01832d646a0a89c4deb46f8564" ];
assign_scores_to = [
{
name = "Remux + WEB 2160p";
score = -10000;
}
];
}
];
};
sonarr.series = {
base_url = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
# Recyclarr is the sole authority for custom formats and scores.
# Overwrite any manually-created CFs and delete stale ones.
replace_existing_custom_formats = true;
delete_old_custom_formats = true;
include = [
{ template = "sonarr-quality-definition-series"; }
{ template = "sonarr-v4-quality-profile-web-2160p"; }
{ template = "sonarr-v4-custom-formats-web-2160p"; }
];
# Group WEB 2160p with 1080p in the same quality tier so custom
# format scores -- not quality ranking -- decide the winner.
# Native 4K with HDR/DV from good release groups scores high and
# wins; AI upscales get -10000 from the Upscaled CF and are
# blocked by min_format_score. Untagged upscales from unknown
# groups (score ~0) lose to well-scored 1080p (Tier 01 = +1750).
quality_profiles = [
{
name = "WEB-2160p";
min_format_score = 0;
reset_unmatched_scores.enabled = true;
upgrade = {
allowed = true;
until_quality = "WEB/Bluray";
until_score = 10000;
};
qualities = [
{
name = "WEB/Bluray";
qualities = [
"WEBDL-2160p"
"WEBRip-2160p"
"Bluray-1080p Remux"
"Bluray-1080p"
"WEBDL-1080p"
"WEBRip-1080p"
];
}
{ name = "HDTV-1080p"; }
{ name = "Bluray-720p"; }
{
name = "WEB 720p";
qualities = [
"WEBDL-720p"
"WEBRip-720p"
];
}
{ name = "HDTV-720p"; }
];
}
];
custom_formats = [
# DV (w/o HDR fallback) - block releases with DV that lack HDR10 fallback
{
trash_ids = [ "9b27ab6498ec0f31a3353992e19434ca" ];
assign_scores_to = [
{ name = "WEB-2160p"; }
];
}
# Upscaled - block AI upscales and other upscaled-to-2160p releases
{
trash_ids = [ "23297a736ca77c0fc8e70f8edd7ee56c" ];
assign_scores_to = [
{
name = "WEB-2160p";
score = -10000;
}
];
}
];
};
};
};
# Trigger immediate sync on deploy when recyclarr config changes.
# restartTriggers on the oneshot service are unreliable (systemd may
# no-op a restart of an inactive oneshot). Instead, embed a config
# hash in the timer unit -- NixOS restarts changed timers reliably,
# and OnActiveSec fires the sync within seconds.
systemd.timers.recyclarr = {
timerConfig.OnActiveSec = "5s";
unitConfig.X-ConfigHash = builtins.hashString "sha256" (
builtins.toJSON config.services.recyclarr.configuration
);
};
systemd.services.recyclarr = {
after = [
"network-online.target"
"radarr.service"
"sonarr.service"
];
wants = [ "network-online.target" ];
serviceConfig.ExecStartPre = [ "+${injectApiKeys}" ];
};
}

View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_ssds [
service_configs.sonarr.dataDir
])
(lib.serviceMountWithZpool "sonarr" service_configs.zpool_hdds [
service_configs.torrents_path
])
(lib.serviceFilePerms "sonarr" [
"Z ${service_configs.sonarr.dataDir} 0700 ${config.services.sonarr.user} ${config.services.sonarr.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "sonarr";
port = service_configs.ports.private.sonarr.port;
auth = true;
})
];
systemd.tmpfiles.rules = [
"d /torrents/media 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.tvDir} 2775 root ${service_configs.media_group} -"
"d ${service_configs.media.moviesDir} 2775 root ${service_configs.media_group} -"
];
services.sonarr = {
enable = true;
dataDir = service_configs.sonarr.dataDir;
settings.server.port = service_configs.ports.private.sonarr.port;
settings.update.mechanism = "external";
};
users.users.${config.services.sonarr.user}.extraGroups = [
service_configs.media_group
];
}

View File

@@ -0,0 +1,42 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
systemd.services.torrent-audit = {
description = "Audit qBittorrent for unmanaged and abandoned upgrade torrents";
after = [
"network-online.target"
"sonarr.service"
"radarr.service"
"qbittorrent.service"
];
wants = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "+${
pkgs.python3.withPackages (
ps: with ps; [
pyarr
qbittorrent-api
]
)
}/bin/python ${./torrent-audit.py}";
TimeoutSec = 300;
};
environment = {
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
RADARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.radarr.port}";
RADARR_CONFIG = "${service_configs.radarr.dataDir}/config.xml";
SONARR_URL = "http://localhost:${builtins.toString service_configs.ports.private.sonarr.port}";
SONARR_CONFIG = "${service_configs.sonarr.dataDir}/config.xml";
CATEGORIES = lib.concatStringsSep "," (builtins.attrNames service_configs.torrent.categories);
TAG_TORRENTS = "true";
};
};
}

View File

@@ -0,0 +1,382 @@
#!/usr/bin/env python3
"""
Audit qBittorrent torrents against Radarr/Sonarr.
Reports two categories:
UNMANAGED -- torrents in qBittorrent that no *arr service has ever touched.
These were added manually or by some other tool.
ABANDONED -- torrents that *arr grabbed but later replaced with a better
version. The old torrent is still seeding while the library
points to the new one.
Abandoned detection uses API cross-referencing (not filesystem hardlinks) and
verifies against the *arr's current file state:
1. HISTORY -- group imports by content unit (movieId / episodeId); the
most recent import is the keeper, older ones are candidates.
2. CURRENT -- verify against the *arr's active file mapping.
"""
import logging
import os
import sys
from collections import defaultdict
from xml.etree import ElementTree
import qbittorrentapi
from pyarr import RadarrAPI, SonarrAPI
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
stream=sys.stderr,
)
log = logging.getLogger(__name__)
def get_api_key(config_path: str) -> str:
tree = ElementTree.parse(config_path)
return tree.find(".//ApiKey").text
def paginate(arr_client, endpoint: str, page_size: int = 1000):
method = getattr(arr_client, f"get_{endpoint}")
page = 1
while True:
data = method(page=page, page_size=page_size)
yield from data["records"]
if page * page_size >= data["totalRecords"]:
break
page += 1
def get_qbit_torrents(qbit_client, category: str) -> dict[str, dict]:
torrents = qbit_client.torrents_info(category=category)
return {t["hash"].upper(): t for t in torrents}
def gib(size_bytes: int) -> str:
return f"{size_bytes / 1073741824:.1f}"
# ---------------------------------------------------------------------------
# Collect all known hashes from *arr history + queue
# ---------------------------------------------------------------------------
def collect_all_known_hashes(arr_client, page_size: int = 1000) -> set[str]:
hashes = set()
for endpoint in ("queue", "history"):
for rec in paginate(arr_client, endpoint, page_size):
did = (rec.get("downloadId") or "").upper()
if did:
hashes.add(did)
return hashes
# ---------------------------------------------------------------------------
# Unmanaged: torrents with hashes not in any *arr history/queue
# ---------------------------------------------------------------------------
def find_unmanaged(qbit_torrents: dict, known_hashes: set) -> list[dict]:
results = []
for uhash, torrent in qbit_torrents.items():
if uhash not in known_hashes:
results.append(torrent)
return sorted(results, key=lambda t: t["added_on"])
# ---------------------------------------------------------------------------
# Abandoned movies: group imports by movieId, older = abandoned
# ---------------------------------------------------------------------------
def find_movie_abandoned(radarr, qbit_movies):
log.info("Analysing Radarr import history ...")
imports_by_movie = defaultdict(list)
for rec in paginate(radarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
if not did:
continue
mid = rec.get("movieId")
if not mid:
continue
imports_by_movie[mid].append(
{"downloadId": did, "date": rec["date"]}
)
# Identify keeper (latest) and abandoned (older) hashes per movie.
abandoned_hashes: set[str] = set()
keeper_hashes: set[str] = set()
hash_to_movie: dict[str, int] = {}
for mid, events in imports_by_movie.items():
ordered = sorted(events, key=lambda e: e["date"])
keeper_hashes.add(ordered[-1]["downloadId"])
for e in ordered[:-1]:
abandoned_hashes.add(e["downloadId"])
hash_to_movie[e["downloadId"]] = mid
# A hash that is a keeper for *any* movie must not be deleted.
abandoned_hashes -= keeper_hashes
log.info("Fetching Radarr current movie state ...")
radarr_movies = {m["id"]: m for m in radarr.get_movie()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_movies.get(ahash)
if torrent is None:
continue
mid = hash_to_movie.get(ahash)
movie = radarr_movies.get(mid) if mid else None
mf = (movie or {}).get("movieFile") or {}
current_quality = (mf.get("quality") or {}).get("quality", {}).get("name", "?")
current_size = mf.get("size", 0)
status = "SAFE"
notes = []
if not movie or not movie.get("hasFile"):
notes.append("movie removed or has no file in Radarr")
status = "REVIEW"
elif torrent["size"] > current_size * 1.05:
notes.append(
f"abandoned is larger than current "
f"({gib(torrent['size'])} > {gib(current_size)} GiB)"
)
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
"current_quality": current_quality,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Abandoned TV: group imports by episodeId, a hash is abandoned only when
# it is NOT the latest import for ANY episode it covers.
# ---------------------------------------------------------------------------
def find_tv_abandoned(sonarr, qbit_tvshows):
log.info("Analysing Sonarr import history ...")
episode_imports = defaultdict(list)
all_download_ids: set[str] = set()
hash_to_series: dict[str, int] = {}
for rec in paginate(sonarr, "history"):
if rec.get("eventType") != "downloadFolderImported":
continue
did = (rec.get("downloadId") or "").upper()
eid = rec.get("episodeId")
if not did or not eid:
continue
episode_imports[eid].append({"downloadId": did, "date": rec["date"]})
all_download_ids.add(did)
sid = rec.get("seriesId")
if sid:
hash_to_series[did] = sid
# A hash is "active" if it is the latest import for *any* episode.
active_hashes: set[str] = set()
for events in episode_imports.values():
latest = max(events, key=lambda e: e["date"])
active_hashes.add(latest["downloadId"])
abandoned_hashes = all_download_ids - active_hashes
log.info("Fetching Sonarr current series state ...")
current_series = {s["id"] for s in sonarr.get_series()}
results = []
for ahash in abandoned_hashes:
torrent = qbit_tvshows.get(ahash)
if torrent is None:
continue
status = "SAFE"
notes = []
sid = hash_to_series.get(ahash)
if sid and sid not in current_series:
notes.append("series removed from Sonarr")
status = "REVIEW"
results.append(
{
"name": torrent["name"],
"size": torrent["size"],
"state": torrent["state"],
"hash": torrent["hash"],
"added_on": torrent["added_on"],
"status": status,
"notes": notes,
}
)
return sorted(results, key=lambda r: r["added_on"])
# ---------------------------------------------------------------------------
# Report
# ---------------------------------------------------------------------------
def print_section(torrents, show_status=False):
if not torrents:
print(" (none)\n")
return
total_size = sum(t["size"] for t in torrents)
for t in torrents:
prefix = f"[{t['status']:6s}] " if show_status else " "
print(f" {prefix}{t['name']}")
extra = f"{gib(t['size'])} GiB | {t['state']}"
print(f" {' ' * len(prefix)}{extra}")
for note in t.get("notes", []):
print(f" {' ' * len(prefix)}** {note}")
print()
if show_status:
safe = [t for t in torrents if t["status"] == "SAFE"]
review = [t for t in torrents if t["status"] == "REVIEW"]
print(
f" total={len(torrents)} ({gib(total_size)} GiB) | "
f"safe={len(safe)} | review={len(review)}"
)
else:
print(f" total={len(torrents)} ({gib(total_size)} GiB)")
print()
AUDIT_TAGS = {"audit:unmanaged", "audit:abandoned-safe", "audit:abandoned-review"}
def tag_torrents(qbit_client, qbit_torrents, all_known, all_abandoned):
log.info("Tagging torrents ...")
abandoned_by_hash = {t["hash"].upper(): t for t in all_abandoned}
all_hashes = []
for torrents in qbit_torrents.values():
all_hashes.extend(torrents.keys())
for h in all_hashes:
current_tags = set()
torrent_info = None
for torrents in qbit_torrents.values():
if h in torrents:
torrent_info = torrents[h]
break
if not torrent_info:
continue
existing_tags = {t.strip() for t in torrent_info.get("tags", "").split(",") if t.strip()}
existing_audit_tags = existing_tags & AUDIT_TAGS
if h in abandoned_by_hash:
status = abandoned_by_hash[h]["status"]
desired = "audit:abandoned-safe" if status == "SAFE" else "audit:abandoned-review"
elif h not in all_known:
desired = "audit:unmanaged"
else:
desired = None
tags_to_remove = existing_audit_tags - ({desired} if desired else set())
tags_to_add = ({desired} if desired else set()) - existing_audit_tags
low_hash = torrent_info["hash"]
for tag in tags_to_remove:
qbit_client.torrents_remove_tags(tags=tag, torrent_hashes=low_hash)
for tag in tags_to_add:
qbit_client.torrents_add_tags(tags=tag, torrent_hashes=low_hash)
log.info("Tagging complete")
def main():
qbit_url = os.environ["QBITTORRENT_URL"]
radarr_url = os.environ["RADARR_URL"]
radarr_config = os.environ["RADARR_CONFIG"]
sonarr_url = os.environ["SONARR_URL"]
sonarr_config = os.environ["SONARR_CONFIG"]
categories = os.environ.get("CATEGORIES", "tvshows,movies,anime").split(",")
radarr_key = get_api_key(radarr_config)
sonarr_key = get_api_key(sonarr_config)
radarr = RadarrAPI(radarr_url, radarr_key)
sonarr = SonarrAPI(sonarr_url, sonarr_key)
qbit = qbittorrentapi.Client(host=qbit_url)
log.info("Getting qBittorrent state ...")
qbit_torrents = {cat: get_qbit_torrents(qbit, cat) for cat in categories}
for cat, torrents in qbit_torrents.items():
log.info(" %s: %d torrents", cat, len(torrents))
log.info("Collecting known hashes from Sonarr ...")
sonarr_hashes = collect_all_known_hashes(sonarr)
log.info(" %d unique hashes", len(sonarr_hashes))
log.info("Collecting known hashes from Radarr ...")
radarr_hashes = collect_all_known_hashes(radarr)
log.info(" %d unique hashes", len(radarr_hashes))
all_known = sonarr_hashes | radarr_hashes
# -- Unmanaged --
print("\n========== UNMANAGED TORRENTS ==========\n")
for cat in categories:
unmanaged = find_unmanaged(qbit_torrents[cat], all_known)
print(f"--- {cat} ({len(unmanaged)} unmanaged / {len(qbit_torrents[cat])} total) ---\n")
print_section(unmanaged)
# -- Abandoned --
print("========== ABANDONED UPGRADE LEFTOVERS ==========\n")
movie_abandoned = find_movie_abandoned(
radarr, qbit_torrents.get("movies", {})
)
print(f"--- movies ({len(movie_abandoned)} abandoned) ---\n")
print_section(movie_abandoned, show_status=True)
tv_abandoned = find_tv_abandoned(
sonarr, qbit_torrents.get("tvshows", {})
)
print(f"--- tvshows ({len(tv_abandoned)} abandoned) ---\n")
print_section(tv_abandoned, show_status=True)
# -- Summary --
all_abandoned = movie_abandoned + tv_abandoned
safe = [t for t in all_abandoned if t["status"] == "SAFE"]
print("=" * 50)
print(
f"ABANDONED: {len(all_abandoned)} total ({len(safe)} safe to delete)"
)
print(f"SAFE TO RECLAIM: {gib(sum(t['size'] for t in safe))} GiB")
# -- Tagging --
if os.environ.get("TAG_TORRENTS", "").lower() in ("1", "true", "yes"):
tag_torrents(qbit, qbit_torrents, all_known, all_abandoned)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,113 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
let
prowlarrPort = toString service_configs.ports.private.prowlarr.port;
sonarrPort = toString service_configs.ports.private.sonarr.port;
radarrPort = toString service_configs.ports.private.radarr.port;
bitmagnetPort = toString service_configs.ports.private.bitmagnet.port;
bridgeAddr = config.vpnNamespaces.wg.bridgeAddress;
prowlarrConfigXml = "${service_configs.prowlarr.dataDir}/config.xml";
sonarrConfigXml = "${service_configs.sonarr.dataDir}/config.xml";
radarrConfigXml = "${service_configs.radarr.dataDir}/config.xml";
curl = "${pkgs.curl}/bin/curl";
jq = "${pkgs.jq}/bin/jq";
# Clears the escalating failure backoff for the Bitmagnet indexer across
# Prowlarr, Sonarr, and Radarr so searches resume immediately after
# Bitmagnet restarts instead of waiting hours for disable timers to expire.
recoveryScript = pkgs.writeShellScript "prowlarr-bitmagnet-recovery" ''
set -euo pipefail
wait_for() {
for _ in $(seq 1 "$2"); do
${curl} -sf --max-time 5 "$1" > /dev/null && return 0
sleep 5
done
echo "$1 not reachable, aborting" >&2; exit 1
}
# Test a Bitmagnet-named indexer to clear its failure status.
# A successful test triggers RecordSuccess() which resets the backoff.
clear_status() {
local key indexer
key=$(${lib.extractArrApiKey ''"$3"''}) || return 0
indexer=$(${curl} -sf --max-time 10 \
-H "X-Api-Key: $key" "$2/api/$1/indexer" | \
${jq} 'first(.[] | select(.name | test("Bitmagnet"; "i")))') || return 0
[ -n "$indexer" ] && [ "$indexer" != "null" ] || return 0
${curl} -sf --max-time 30 \
-H "X-Api-Key: $key" -H "Content-Type: application/json" \
-X POST "$2/api/$1/indexer/test" -d "$indexer" > /dev/null
}
wait_for "http://localhost:${bitmagnetPort}" 12
wait_for "http://localhost:${prowlarrPort}/ping" 6
# Prowlarr first downstream apps route searches through it.
clear_status v1 "http://localhost:${prowlarrPort}" "${prowlarrConfigXml}" || true
clear_status v3 "http://${bridgeAddr}:${sonarrPort}" "${sonarrConfigXml}" || true
clear_status v3 "http://${bridgeAddr}:${radarrPort}" "${radarrConfigXml}" || true
'';
in
{
imports = [
(lib.vpnNamespaceOpenPort service_configs.ports.private.bitmagnet.port "bitmagnet")
(lib.mkCaddyReverseProxy {
subdomain = "bitmagnet";
port = service_configs.ports.private.bitmagnet.port;
auth = true;
vpn = true;
})
];
services.bitmagnet = {
enable = true;
settings = {
postgres = {
host = service_configs.postgres.socket;
};
http_server = {
# TODO! make issue about this being a string and not a `port` type
port = ":" + (toString service_configs.ports.private.bitmagnet.port);
};
};
};
# The upstream default (Restart=on-failure) leaves Bitmagnet dead after
# clean exits (e.g. systemd stop during deploy). Always restart it.
systemd.services.bitmagnet.serviceConfig = {
Restart = lib.mkForce "always";
RestartSec = 10;
};
# After Bitmagnet restarts, clear the escalating failure backoff across
# Prowlarr, Sonarr, and Radarr so searches resume immediately instead of
# waiting hours for the disable timers to expire.
systemd.services.prowlarr-bitmagnet-recovery = {
description = "Clear Prowlarr/Sonarr/Radarr failure status for Bitmagnet indexer";
after = [
"bitmagnet.service"
"prowlarr.service"
"sonarr.service"
"radarr.service"
];
bindsTo = [ "bitmagnet.service" ];
wantedBy = [ "bitmagnet.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = recoveryScript;
# Same VPN namespace as Bitmagnet and Prowlarr.
NetworkNamespacePath = "/run/netns/wg";
};
};
}

View File

@@ -0,0 +1,45 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "vaultwarden" service_configs.zpool_ssds [
service_configs.vaultwarden.path
])
(lib.serviceFilePerms "vaultwarden" [
"Z ${service_configs.vaultwarden.path} 0700 vaultwarden vaultwarden"
])
(lib.mkFail2banJail {
name = "vaultwarden";
failregex = ''^.*Username or password is incorrect\. Try again\. IP: <HOST>\..*$'';
})
];
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
configurePostgres = true;
config = {
# Refer to https://github.com/dani-garcia/vaultwarden/blob/main/.env.template
DOMAIN = "https://bitwarden.${service_configs.https.domain}";
SIGNUPS_ALLOWED = false;
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = service_configs.ports.private.vaultwarden.port;
ROCKET_LOG = "critical";
};
};
services.caddy.virtualHosts."bitwarden.${service_configs.https.domain}".extraConfig = ''
encode zstd gzip
reverse_proxy :${toString config.services.vaultwarden.config.ROCKET_PORT} {
header_up X-Real-IP {remote_host}
}
'';
}

View File

@@ -0,0 +1,162 @@
{
config,
service_configs,
pkgs,
lib,
inputs,
...
}:
let
theme = pkgs.fetchFromGitHub {
owner = "kaiiiz";
repo = "hugo-theme-monochrome";
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
};
hugo-neko = pkgs.fetchFromGitHub {
owner = "ystepanoff";
repo = "hugo-neko";
rev = "5a50034acbb1ae0cec19775af64e7167ca22725e";
sha256 = "VLwr4zEeFQU/b+vj0XTLSuEiosuNFu2du4uud7m8bnw=";
};
hugoWebsite = pkgs.stdenv.mkDerivation {
pname = "hugo-site";
version = "0.1";
src = inputs.website;
nativeBuildInputs = with pkgs; [
hugo
go
git
];
installPhase = ''
rm -fr themes/theme modules/hugo-neko
cp -r ${theme} themes/theme
cp -r ${hugo-neko} modules/hugo-neko
hugo --minify -d $out;
'';
};
newDomain = service_configs.https.domain;
oldDomain = service_configs.https.old_domain;
in
{
imports = [
(lib.serviceMountWithZpool "caddy" service_configs.zpool_ssds [
config.services.caddy.dataDir
])
];
services.caddy = {
enable = true;
email = "titaniumtown@proton.me";
# Build with Njalla DNS provider for DNS-01 ACME challenges (wildcard certs)
package = pkgs.caddy.withPlugins {
plugins = [ "github.com/caddy-dns/njalla@v0.0.0-20250823094507-f709141f1fe6" ];
hash = "sha256-rrOAR6noTDpV/I/hZXxhz0OXVJKu0mFQRq87RUrpmzw=";
};
globalConfig = ''
# Wildcard cert for *.${newDomain} via DNS-01 challenge
acme_dns njalla {
api_token {env.NJALLA_API_TOKEN}
}
# On-demand TLS for old domain redirects
on_demand_tls {
ask http://localhost:9123/check
}
'';
# Internal endpoint to validate on-demand TLS requests
# Only allows certs for *.${oldDomain}
extraConfig = ''
http://localhost:9123 {
@allowed expression {query.domain}.endsWith(".${oldDomain}") || {query.domain} == "${oldDomain}" || {query.domain} == "www.${oldDomain}"
respond @allowed 200
respond 403
}
'';
virtualHosts = {
${newDomain} = {
extraConfig = ''
root * ${hugoWebsite}
file_server browse
'';
serverAliases = [ "www.${newDomain}" ];
};
# Redirect old domain (bare + www) to new domain
${oldDomain} = {
extraConfig = ''
redir https://${newDomain}{uri} permanent
'';
serverAliases = [ "www.${oldDomain}" ];
};
# Wildcard redirect for all old domain subdomains
# Uses on-demand TLS - certs issued automatically on first request
"*.${oldDomain}" = {
extraConfig = ''
tls {
on_demand
}
# {labels.2} extracts subdomain from *.gardling.com
redir https://{labels.2}.${newDomain}{uri} permanent
'';
};
};
};
# Inject Njalla API token for DNS-01 challenge
systemd.services.caddy.serviceConfig.EnvironmentFile = config.age.secrets.njalla-api-token-env.path;
systemd.tmpfiles.rules = [
"d ${config.services.caddy.dataDir} 700 ${config.services.caddy.user} ${config.services.caddy.group}"
];
systemd.packages = with pkgs; [ nssTools ];
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.https.port
# http (but really acmeCA challenges)
service_configs.ports.public.http.port
];
networking.firewall.allowedUDPPorts = [
service_configs.ports.public.https.port
];
# Protect Caddy basic auth endpoints from brute force attacks
services.fail2ban.jails.caddy-auth = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "/var/log/caddy/access-*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
# Ignore local network IPs - NAT hairpinning causes all LAN traffic to
# appear from the router IP (192.168.1.1). Banning it blocks all internal access.
ignoreip = "127.0.0.1/8 ::1 192.168.1.0/24";
};
filter.Definition = {
# Only match 401s where an Authorization header was actually sent.
# Without this, the normal HTTP Basic Auth challenge-response flow
# (browser probes without credentials, gets 401, then resends with
# credentials) counts every page visit as a "failure."
failregex = ''^.*"remote_ip":"<HOST>".*"Authorization":\["REDACTED"\].*"status":401.*$'';
ignoreregex = "";
datepattern = ''"ts":{Epoch}\.'';
};
};
}

View File

@@ -0,0 +1,39 @@
{
config,
lib,
pkgs,
service_configs,
inputs,
...
}:
let
theme = pkgs.fetchFromGitHub {
owner = "kaiiiz";
repo = "hugo-theme-monochrome";
rev = "d17e05715e91f41a842f2656e6bdd70cba73de91";
sha256 = "h9I2ukugVrldIC3SXefS0L3R245oa+TuRChOCJJgF24=";
};
hugoWebsite = pkgs.stdenv.mkDerivation {
pname = "hugo-site";
version = "0.1";
src = inputs.senior_project-website;
nativeBuildInputs = with pkgs; [
hugo
];
installPhase = ''
rm -fr themes/theme
cp -rv ${theme} themes/theme
hugo --minify -d $out;
'';
};
in
{
services.caddy.virtualHosts."senior-project.${service_configs.https.domain}".extraConfig = ''
root * ${hugoWebsite}
file_server browse
'';
}

View File

@@ -0,0 +1,7 @@
{
imports = [
./caddy.nix
# KEEP UNTIL 2028
./caddy_senior_project.nix
];
}

View File

@@ -0,0 +1,27 @@
{
config,
lib,
...
}:
{
services.ddns-updater = {
enable = true;
environment = {
PERIOD = "5m";
# ddns-updater reads config from this path at runtime
CONFIG_FILEPATH = config.age.secrets.ddns-updater-config.path;
};
};
users.users.ddns-updater = {
isSystemUser = true;
group = "ddns-updater";
};
users.groups.ddns-updater = { };
systemd.services.ddns-updater.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "ddns-updater";
Group = "ddns-updater";
};
}

View File

@@ -0,0 +1,43 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
imports = [
(lib.mkCaddyReverseProxy {
domain = service_configs.firefox_syncserver.domain;
port = service_configs.ports.private.firefox_syncserver.port;
})
];
services.firefox-syncserver = {
enable = true;
database = {
type = "postgresql";
createLocally = false;
user = "firefox_syncserver";
};
secrets = config.age.secrets.firefox-syncserver-env.path;
settings.port = service_configs.ports.private.firefox_syncserver.port;
singleNode = {
enable = true;
hostname = service_configs.firefox_syncserver.domain;
url = "https://${service_configs.firefox_syncserver.domain}";
capacity = 1;
};
};
services.postgresql = {
ensureDatabases = [ "firefox_syncserver" ];
ensureUsers = [
{
name = "firefox_syncserver";
ensureDBOwnership = true;
}
];
};
}

View File

@@ -0,0 +1,50 @@
{
config,
lib,
pkgs,
service_configs,
...
}:
{
services.gitea-actions-runner.instances.muffin = {
enable = true;
name = "muffin";
url = config.services.gitea.settings.server.ROOT_URL;
tokenFile = config.age.secrets.gitea-runner-token.path;
labels = [ "nix:host" ];
hostPackages = with pkgs; [
bash
coreutils
curl
gawk
git
git-crypt
gnugrep
gnused
jq
nix
nodejs
openssh
];
settings = {
runner = {
capacity = 1;
timeout = "6h";
};
};
};
# Override DynamicUser to use our static gitea-runner user, and ensure
# the runner doesn't start before the co-located gitea instance is ready
# (upstream can't assume locality, so this dependency is ours to add).
systemd.services."gitea-runner-muffin" = {
requires = [ "gitea.service" ];
after = [ "gitea.service" ];
serviceConfig = {
DynamicUser = lib.mkForce false;
User = "gitea-runner";
Group = "gitea-runner";
};
environment.GIT_SSH_COMMAND = "ssh -i /run/agenix/ci-deploy-key -o StrictHostKeyChecking=yes -o UserKnownHostsFile=/etc/ci-known-hosts";
};
}

View File

@@ -0,0 +1,65 @@
{
pkgs,
lib,
config,
service_configs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "gitea" service_configs.zpool_ssds [ config.services.gitea.stateDir ])
(lib.serviceFilePerms "gitea" [
"Z ${config.services.gitea.stateDir} 0700 ${config.services.gitea.user} ${config.services.gitea.group}"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.gitea.domain;
port = service_configs.ports.private.gitea.port;
})
(lib.mkFail2banJail {
name = "gitea";
failregex = "^.*Failed authentication attempt for .* from <HOST>:.*$";
})
];
services.gitea = {
enable = true;
appName = "Simon Gardling's Gitea instance";
stateDir = service_configs.gitea.dir;
database = {
type = "postgres";
socket = service_configs.postgres.socket;
};
settings = {
server = {
SSH_USER = "gitea";
DOMAIN = service_configs.gitea.domain;
ROOT_URL = "https://" + config.services.gitea.settings.server.DOMAIN;
HTTP_PORT = service_configs.ports.private.gitea.port;
LANDING_PAGE = "/explore/repos";
DISABLE_HTTP_GIT = true;
};
session = {
# https cookies or smth
COOKIE_SECURE = true;
};
# only I shall use gitea
service.DISABLE_REGISTRATION = true;
actions.ENABLED = true;
};
};
services.postgresql = {
ensureDatabases = [ config.services.gitea.user ];
ensureUsers = [
{
name = config.services.gitea.database.user;
ensureDBOwnership = true;
ensureClauses.login = true;
}
];
};
services.openssh.settings.AllowUsers = [ config.services.gitea.user ];
}

View File

@@ -0,0 +1,698 @@
{
...
}:
let
promDs = {
type = "prometheus";
uid = "prometheus";
};
dashboard = {
editable = true;
graphTooltip = 1;
schemaVersion = 39;
tags = [
"system"
"monitoring"
];
time = {
from = "now-6h";
to = "now";
};
timezone = "browser";
title = "System Overview";
uid = "system-overview";
annotations.list = [
{
name = "Jellyfin Streams";
datasource = {
type = "grafana";
uid = "-- Grafana --";
};
enable = true;
iconColor = "green";
showIn = 0;
type = "tags";
tags = [ "jellyfin" ];
}
{
name = "ZFS Scrubs";
datasource = {
type = "grafana";
uid = "-- Grafana --";
};
enable = true;
iconColor = "orange";
showIn = 0;
type = "tags";
tags = [ "zfs-scrub" ];
}
{
name = "LLM Requests";
datasource = promDs;
enable = true;
iconColor = "purple";
target = {
datasource = promDs;
expr = "llamacpp:requests_processing > 0";
instant = false;
range = true;
refId = "A";
};
titleFormat = "LLM inference";
}
];
panels = [
# -- Row 1: UPS --
{
id = 1;
type = "timeseries";
title = "UPS Power Draw";
gridPos = {
h = 8;
w = 8;
x = 0;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts";
legendFormat = "Power (W)";
refId = "A";
}
{
datasource = promDs;
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[5m:])";
legendFormat = "5m average (W)";
refId = "B";
}
];
fieldConfig = {
defaults = {
unit = "watt";
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [
{
matcher = {
id = "byFrameRefID";
options = "A";
};
properties = [
{
id = "custom.lineStyle";
value = {
fill = "dot";
};
}
{
id = "custom.fillOpacity";
value = 10;
}
{
id = "custom.lineWidth";
value = 1;
}
{
id = "custom.pointSize";
value = 1;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "B";
};
properties = [
{
id = "custom.lineWidth";
value = 4;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
];
};
}
{
id = 7;
type = "stat";
title = "Energy Usage (24h)";
gridPos = {
h = 8;
w = 4;
x = 8;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "avg_over_time((apcupsd_ups_load_percent / 100 * apcupsd_nominal_power_watts + 4.5)[24h:]) * 24 / 1000";
legendFormat = "";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "kwatth";
decimals = 2;
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 5;
}
{
color = "red";
value = 10;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "none";
};
}
{
id = 2;
type = "gauge";
title = "UPS Load";
gridPos = {
h = 8;
w = 6;
x = 12;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_ups_load_percent";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 70;
}
{
color = "red";
value = 90;
}
];
};
};
overrides = [ ];
};
options.reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
}
{
id = 3;
type = "gauge";
title = "UPS Battery";
gridPos = {
h = 8;
w = 6;
x = 18;
y = 0;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "apcupsd_battery_charge_percent";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
thresholds = {
mode = "absolute";
steps = [
{
color = "red";
value = null;
}
{
color = "yellow";
value = 20;
}
{
color = "green";
value = 50;
}
];
};
};
overrides = [ ];
};
options.reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
}
# -- Row 2: System --
{
id = 4;
type = "timeseries";
title = "CPU Temperature";
gridPos = {
h = 8;
w = 12;
x = 0;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = ''node_hwmon_temp_celsius{chip=~"pci.*"}'';
legendFormat = "CPU {{sensor}}";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "celsius";
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [ ];
};
}
{
id = 5;
type = "stat";
title = "System Uptime";
gridPos = {
h = 8;
w = 6;
x = 12;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "time() - node_boot_time_seconds";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "s";
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "none";
};
}
{
id = 6;
type = "stat";
title = "Jellyfin Active Streams";
gridPos = {
h = 8;
w = 6;
x = 18;
y = 8;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "count(jellyfin_now_playing_state) or vector(0)";
refId = "A";
}
];
fieldConfig = {
defaults = {
thresholds = {
mode = "absolute";
steps = [
{
color = "green";
value = null;
}
{
color = "yellow";
value = 3;
}
{
color = "red";
value = 6;
}
];
};
};
overrides = [ ];
};
options = {
reduceOptions = {
calcs = [ "lastNotNull" ];
fields = "";
values = false;
};
colorMode = "value";
graphMode = "area";
};
}
# -- Row 3: qBittorrent --
{
id = 11;
type = "timeseries";
title = "qBittorrent Speed";
gridPos = {
h = 8;
w = 24;
x = 0;
y = 16;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "sum(qbit_dlspeed) or vector(0)";
legendFormat = "Download";
refId = "A";
}
{
datasource = promDs;
expr = "sum(qbit_upspeed) or vector(0)";
legendFormat = "Upload";
refId = "B";
}
{
datasource = promDs;
expr = "avg_over_time((sum(qbit_dlspeed) or vector(0))[10m:])";
legendFormat = "Download (10m avg)";
refId = "C";
}
{
datasource = promDs;
expr = "avg_over_time((sum(qbit_upspeed) or vector(0))[10m:])";
legendFormat = "Upload (10m avg)";
refId = "D";
}
];
fieldConfig = {
defaults = {
unit = "binBps";
min = 0;
color.mode = "palette-classic";
custom = {
lineWidth = 1;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [
{
matcher = {
id = "byFrameRefID";
options = "A";
};
properties = [
{
id = "color";
value = {
fixedColor = "green";
mode = "fixed";
};
}
{
id = "custom.fillOpacity";
value = 5;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "B";
};
properties = [
{
id = "color";
value = {
fixedColor = "blue";
mode = "fixed";
};
}
{
id = "custom.fillOpacity";
value = 5;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "C";
};
properties = [
{
id = "color";
value = {
fixedColor = "green";
mode = "fixed";
};
}
{
id = "custom.lineWidth";
value = 3;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
{
matcher = {
id = "byFrameRefID";
options = "D";
};
properties = [
{
id = "color";
value = {
fixedColor = "blue";
mode = "fixed";
};
}
{
id = "custom.lineWidth";
value = 3;
}
{
id = "custom.fillOpacity";
value = 0;
}
];
}
];
};
}
# -- Row 4: Intel GPU --
{
id = 8;
type = "timeseries";
title = "Intel GPU Utilization";
gridPos = {
h = 8;
w = 24;
x = 0;
y = 24;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "igpu_engines_busy_percent";
legendFormat = "{{engine}}";
refId = "A";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 10;
spanNulls = true;
};
};
overrides = [ ];
};
}
# -- Row 5: Storage --
{
id = 12;
type = "timeseries";
title = "ZFS Pool Utilization";
gridPos = {
h = 8;
w = 12;
x = 0;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "zfs_pool_allocated_bytes{pool=\"tank\"} / zfs_pool_size_bytes{pool=\"tank\"} * 100";
legendFormat = "tank";
refId = "A";
}
{
datasource = promDs;
expr = "zfs_pool_allocated_bytes{pool=\"hdds\"} / zfs_pool_size_bytes{pool=\"hdds\"} * 100";
legendFormat = "hdds";
refId = "B";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
{
id = 13;
type = "timeseries";
title = "Boot Drive Partitions";
gridPos = {
h = 8;
w = 12;
x = 12;
y = 32;
};
datasource = promDs;
targets = [
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/boot\"} - node_filesystem_avail_bytes{mountpoint=\"/boot\"}) / node_filesystem_size_bytes{mountpoint=\"/boot\"} * 100";
legendFormat = "/boot";
refId = "A";
}
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/persistent\"} - node_filesystem_avail_bytes{mountpoint=\"/persistent\"}) / node_filesystem_size_bytes{mountpoint=\"/persistent\"} * 100";
legendFormat = "/persistent";
refId = "B";
}
{
datasource = promDs;
expr = "(node_filesystem_size_bytes{mountpoint=\"/nix\"} - node_filesystem_avail_bytes{mountpoint=\"/nix\"}) / node_filesystem_size_bytes{mountpoint=\"/nix\"} * 100";
legendFormat = "/nix";
refId = "C";
}
];
fieldConfig = {
defaults = {
unit = "percent";
min = 0;
max = 100;
color.mode = "palette-classic";
custom = {
lineWidth = 2;
fillOpacity = 20;
spanNulls = true;
};
};
overrides = [ ];
};
}
];
};
in
{
environment.etc."grafana-dashboards/system-overview.json" = {
text = builtins.toJSON dashboard;
mode = "0444";
};
}

View File

@@ -0,0 +1,10 @@
{
imports = [
./grafana.nix
./prometheus.nix
./dashboard.nix
./exporters.nix
./jellyfin-annotations.nix
./zfs-scrub-annotations.nix
];
}

View File

@@ -0,0 +1,112 @@
{
config,
pkgs,
inputs,
service_configs,
lib,
...
}:
let
jellyfinExporterPort = service_configs.ports.private.jellyfin_exporter.port;
qbitExporterPort = service_configs.ports.private.qbittorrent_exporter.port;
igpuExporterPort = service_configs.ports.private.igpu_exporter.port;
in
{
# -- Jellyfin Prometheus Exporter --
# Replaces custom jellyfin-collector.nix textfile timer.
# Exposes per-session metrics (jellyfin_now_playing_state) and library stats.
systemd.services.jellyfin-exporter =
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable)
{
description = "Prometheus exporter for Jellyfin";
after = [
"network.target"
"jellyfin.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = lib.getExe (
pkgs.writeShellApplication {
name = "jellyfin-exporter-wrapper";
runtimeInputs = [ pkgs.jellyfin-exporter ];
text = ''
exec jellyfin_exporter \
--jellyfin.address=http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port} \
--jellyfin.token="$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")" \
--web.listen-address=127.0.0.1:${toString jellyfinExporterPort}
'';
}
);
Restart = "on-failure";
RestartSec = "10s";
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
MemoryDenyWriteExecute = true;
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
};
};
# -- qBittorrent Prometheus Exporter --
# Replaces custom qbittorrent-collector.nix textfile timer.
# Exposes per-torrent metrics (qbit_dlspeed, qbit_upspeed) and aggregate stats.
# qBittorrent runs in a VPN namespace; the exporter reaches it via namespace address.
systemd.services.qbittorrent-exporter =
lib.mkIf (config.services.grafana.enable && config.services.qbittorrent.enable)
{
description = "Prometheus exporter for qBittorrent";
after = [
"network.target"
"qbittorrent.service"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart =
lib.getExe' inputs.qbittorrent-metrics-exporter.packages.${pkgs.system}.default
"qbittorrent-metrics-exporter";
Restart = "on-failure";
RestartSec = "10s";
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
};
environment = {
HOST = "127.0.0.1";
PORT = toString qbitExporterPort;
SCRAPE_INTERVAL = "15";
BACKEND = "in-memory";
# qBittorrent has AuthSubnetWhitelist=0.0.0.0/0, so no real password needed.
# The exporter still expects the env var to be set.
QBITTORRENT_PASSWORD = "unused";
QBITTORRENT_USERNAME = "admin";
TORRENT_HOSTS = "qbit:main=http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}|http://${config.vpnNamespaces.wg.namespaceAddress}:${toString config.services.qbittorrent.webuiPort}";
RUST_LOG = "warn";
};
};
# -- Intel GPU Prometheus Exporter --
# Replaces custom intel-gpu-collector.nix + intel-gpu-collector.py textfile timer.
# Exposes engine busy%, frequency, and RC6 metrics via /metrics.
# Requires privileged access to GPU debug interfaces (intel_gpu_top).
systemd.services.igpu-exporter = lib.mkIf config.services.grafana.enable {
description = "Prometheus exporter for Intel integrated GPU";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.intel-gpu-tools ];
serviceConfig = {
ExecStart = lib.getExe pkgs.igpu-exporter;
Restart = "on-failure";
RestartSec = "10s";
# intel_gpu_top requires root-level access to GPU debug interfaces
ProtectHome = true;
PrivateTmp = true;
};
environment = {
PORT = toString igpuExporterPort;
REFRESH_PERIOD_MS = "30000";
};
};
}

View File

@@ -0,0 +1,103 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "grafana" service_configs.zpool_ssds [
service_configs.grafana.dir
])
(lib.serviceFilePerms "grafana" [
"Z ${service_configs.grafana.dir} 0700 grafana grafana"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.grafana.domain;
port = service_configs.ports.private.grafana.port;
auth = true;
})
];
services.grafana = {
enable = true;
dataDir = service_configs.grafana.dir;
settings = {
server = {
http_addr = "127.0.0.1";
http_port = service_configs.ports.private.grafana.port;
domain = service_configs.grafana.domain;
root_url = "https://${service_configs.grafana.domain}";
};
database = {
type = "postgres";
host = service_configs.postgres.socket;
user = "grafana";
};
"auth.anonymous" = {
enabled = true;
org_role = "Admin";
};
"auth.basic".enabled = false;
"auth".disable_login_form = true;
analytics.reporting_enabled = false;
feature_toggles.enable = "dataConnectionsConsole=false";
users.default_theme = "dark";
# Disable unused built-in integrations
alerting.enabled = false;
"unified_alerting".enabled = false;
explore.enabled = false;
news.news_feed_enabled = false;
plugins = {
enable_alpha = false;
plugin_admin_enabled = false;
};
};
provision = {
datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "Prometheus";
type = "prometheus";
url = "http://127.0.0.1:${toString service_configs.ports.private.prometheus.port}";
access = "proxy";
isDefault = true;
editable = false;
uid = "prometheus";
}
];
};
dashboards.settings.providers = [
{
name = "system";
type = "file";
options.path = "/etc/grafana-dashboards";
disableDeletion = true;
updateIntervalSeconds = 60;
}
];
};
};
services.postgresql = {
ensureDatabases = [ "grafana" ];
ensureUsers = [
{
name = "grafana";
ensureDBOwnership = true;
ensureClauses.login = true;
}
];
};
}

View File

@@ -0,0 +1,18 @@
{
config,
service_configs,
lib,
...
}:
lib.mkIf (config.services.grafana.enable && config.services.jellyfin.enable) (
lib.mkGrafanaAnnotationService {
name = "jellyfin";
description = "Jellyfin stream annotation service for Grafana";
script = ./jellyfin-annotations.py;
environment = {
JELLYFIN_URL = "http://127.0.0.1:${toString service_configs.ports.private.jellyfin.port}";
POLL_INTERVAL = "30";
};
loadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
}
)

View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python3
import json
import os
import sys
import time
import urllib.request
from pathlib import Path
JELLYFIN_URL = os.environ.get("JELLYFIN_URL", "http://127.0.0.1:8096")
GRAFANA_URL = os.environ.get("GRAFANA_URL", "http://127.0.0.1:3000")
STATE_FILE = os.environ.get("STATE_FILE", "/var/lib/jellyfin-annotations/state.json")
POLL_INTERVAL = int(os.environ.get("POLL_INTERVAL", "30"))
def get_api_key():
cred_dir = os.environ.get("CREDENTIALS_DIRECTORY")
if cred_dir:
return Path(cred_dir, "jellyfin-api-key").read_text().strip()
for p in ["/run/agenix/jellyfin-api-key"]:
if Path(p).exists():
return Path(p).read_text().strip()
sys.exit("ERROR: Cannot find jellyfin-api-key")
def http_json(method, url, body=None):
data = json.dumps(body).encode() if body is not None else None
req = urllib.request.Request(
url,
data=data,
headers={"Content-Type": "application/json", "Accept": "application/json"},
method=method,
)
with urllib.request.urlopen(req, timeout=5) as resp:
return json.loads(resp.read())
def get_active_sessions(api_key):
try:
req = urllib.request.Request(
f"{JELLYFIN_URL}/Sessions?api_key={api_key}",
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=5) as resp:
sessions = json.loads(resp.read())
return [s for s in sessions if s.get("NowPlayingItem")]
except Exception as e:
print(f"Error fetching sessions: {e}", file=sys.stderr)
return None
def _codec(name):
if not name:
return ""
aliases = {"h264": "H.264", "h265": "H.265", "hevc": "H.265", "av1": "AV1",
"vp9": "VP9", "vp8": "VP8", "mpeg4": "MPEG-4", "mpeg2video": "MPEG-2",
"aac": "AAC", "ac3": "AC3", "eac3": "EAC3", "dts": "DTS",
"truehd": "TrueHD", "mp3": "MP3", "opus": "Opus", "flac": "FLAC",
"vorbis": "Vorbis"}
return aliases.get(name.lower(), name.upper())
def _res(width, height):
if not height:
return ""
common = {2160: "4K", 1440: "1440p", 1080: "1080p", 720: "720p",
480: "480p", 360: "360p"}
return common.get(height, f"{height}p")
def _channels(n):
labels = {1: "Mono", 2: "Stereo", 6: "5.1", 7: "6.1", 8: "7.1"}
return labels.get(n, f"{n}ch") if n else ""
def format_label(session):
user = session.get("UserName", "Unknown")
item = session.get("NowPlayingItem", {}) or {}
transcode = session.get("TranscodingInfo") or {}
play_state = session.get("PlayState") or {}
client = session.get("Client", "")
device = session.get("DeviceName", "")
name = item.get("Name", "Unknown")
series = item.get("SeriesName", "")
season = item.get("ParentIndexNumber")
episode = item.get("IndexNumber")
media_type = item.get("Type", "")
if series and season and episode:
title = f"{series} S{season:02d}E{episode:02d} \u2013 {name}"
elif series:
title = f"{series} \u2013 {name}"
elif media_type == "Movie":
title = f"{name} (movie)"
else:
title = name
play_method = play_state.get("PlayMethod", "")
if play_method == "DirectPlay":
method = "Direct Play"
elif play_method == "DirectStream":
method = "Direct Stream"
elif play_method == "Transcode" or transcode:
method = "Transcode"
else:
method = "Direct Play"
media_streams = item.get("MediaStreams") or []
video_streams = [s for s in media_streams if s.get("Type") == "Video"]
audio_streams = [s for s in media_streams if s.get("Type") == "Audio"]
default_audio = next((s for s in audio_streams if s.get("IsDefault")), None)
audio_stream = default_audio or (audio_streams[0] if audio_streams else {})
video_stream = video_streams[0] if video_streams else {}
src_vcodec = _codec(video_stream.get("Codec", ""))
src_res = _res(video_stream.get("Width") or item.get("Width"),
video_stream.get("Height") or item.get("Height"))
src_acodec = _codec(audio_stream.get("Codec", ""))
src_channels = _channels(audio_stream.get("Channels"))
is_video_direct = transcode.get("IsVideoDirect", True)
is_audio_direct = transcode.get("IsAudioDirect", True)
if transcode and not is_video_direct:
dst_vcodec = _codec(transcode.get("VideoCodec", ""))
dst_res = _res(transcode.get("Width"), transcode.get("Height")) or src_res
if src_vcodec and dst_vcodec and src_vcodec != dst_vcodec:
video_part = f"{src_vcodec}\u2192{dst_vcodec} {dst_res}".strip()
else:
video_part = f"{dst_vcodec or src_vcodec} {dst_res}".strip()
else:
video_part = f"{src_vcodec} {src_res}".strip()
if transcode and not is_audio_direct:
dst_acodec = _codec(transcode.get("AudioCodec", ""))
dst_channels = _channels(transcode.get("AudioChannels")) or src_channels
if src_acodec and dst_acodec and src_acodec != dst_acodec:
audio_part = f"{src_acodec}\u2192{dst_acodec} {dst_channels}".strip()
else:
audio_part = f"{dst_acodec or src_acodec} {dst_channels}".strip()
else:
audio_part = f"{src_acodec} {src_channels}".strip()
bitrate = transcode.get("Bitrate") or item.get("Bitrate")
bitrate_part = f"{bitrate / 1_000_000:.1f} Mbps" if bitrate else ""
reasons = transcode.get("TranscodeReasons") or []
reason_part = f"[{', '.join(reasons)}]" if reasons else ""
stream_parts = [p for p in [method, video_part, audio_part, bitrate_part, reason_part] if p]
client_str = " \u00b7 ".join(filter(None, [client, device]))
lines = [f"{user}: {title}", " | ".join(stream_parts)]
if client_str:
lines.append(client_str)
return "\n".join(lines)
def load_state():
try:
with open(STATE_FILE) as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return {}
def save_state(state):
os.makedirs(os.path.dirname(STATE_FILE), exist_ok=True)
tmp = STATE_FILE + ".tmp"
with open(tmp, "w") as f:
json.dump(state, f)
os.replace(tmp, STATE_FILE)
def grafana_post(label, start_ms):
try:
result = http_json(
"POST",
f"{GRAFANA_URL}/api/annotations",
{"time": start_ms, "text": label, "tags": ["jellyfin"]},
)
return result.get("id")
except Exception as e:
print(f"Error posting annotation: {e}", file=sys.stderr)
return None
def grafana_close(grafana_id, end_ms):
try:
http_json(
"PATCH",
f"{GRAFANA_URL}/api/annotations/{grafana_id}",
{"timeEnd": end_ms},
)
except Exception as e:
print(f"Error closing annotation {grafana_id}: {e}", file=sys.stderr)
def main():
api_key = get_api_key()
state = load_state()
while True:
now_ms = int(time.time() * 1000)
sessions = get_active_sessions(api_key)
if sessions is not None:
current_ids = {s["Id"] for s in sessions}
for s in sessions:
sid = s["Id"]
if sid not in state:
label = format_label(s)
grafana_id = grafana_post(label, now_ms)
if grafana_id is not None:
state[sid] = {
"grafana_id": grafana_id,
"label": label,
"start_ms": now_ms,
}
save_state(state)
for sid in [k for k in state if k not in current_ids]:
info = state.pop(sid)
grafana_close(info["grafana_id"], now_ms)
save_state(state)
time.sleep(POLL_INTERVAL)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,110 @@
{
service_configs,
lib,
...
}:
let
textfileDir = "/var/lib/prometheus-node-exporter-textfiles";
in
{
imports = [
(lib.serviceMountWithZpool "prometheus" service_configs.zpool_ssds [
"/var/lib/prometheus"
])
(lib.serviceFilePerms "prometheus" [
"Z /var/lib/prometheus 0700 prometheus prometheus"
])
];
services.prometheus = {
enable = true;
port = service_configs.ports.private.prometheus.port;
listenAddress = "127.0.0.1";
stateDir = "prometheus";
retentionTime = "0d"; # 0 disables time-based retention (keep forever)
exporters = {
node = {
enable = true;
port = service_configs.ports.private.prometheus_node.port;
listenAddress = "127.0.0.1";
enabledCollectors = [
"hwmon"
"systemd"
"textfile"
];
extraFlags = [
"--collector.textfile.directory=${textfileDir}"
];
};
apcupsd = {
enable = true;
port = service_configs.ports.private.prometheus_apcupsd.port;
listenAddress = "127.0.0.1";
apcupsdAddress = "127.0.0.1:3551";
};
zfs = {
enable = true;
port = service_configs.ports.private.prometheus_zfs.port;
listenAddress = "127.0.0.1";
};
};
scrapeConfigs = [
{
job_name = "prometheus";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus.port}" ]; }
];
}
{
job_name = "node";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_node.port}" ]; }
];
}
{
job_name = "apcupsd";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_apcupsd.port}" ]; }
];
}
{
job_name = "llama-cpp";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.llama_cpp.port}" ]; }
];
}
{
job_name = "jellyfin";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.jellyfin_exporter.port}" ]; }
];
}
{
job_name = "qbittorrent";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.qbittorrent_exporter.port}" ]; }
];
}
{
job_name = "igpu";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.igpu_exporter.port}" ]; }
];
}
{
job_name = "zfs";
static_configs = [
{ targets = [ "127.0.0.1:${toString service_configs.ports.private.prometheus_zfs.port}" ]; }
];
}
];
};
systemd.tmpfiles.rules = [
"d ${textfileDir} 0755 root root -"
];
}

View File

@@ -0,0 +1,36 @@
{
config,
pkgs,
service_configs,
lib,
...
}:
let
grafanaUrl = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
script = pkgs.writeShellApplication {
name = "zfs-scrub-annotations";
runtimeInputs = with pkgs; [
curl
jq
coreutils
gnugrep
gnused
config.boot.zfs.package
];
text = builtins.readFile ./zfs-scrub-annotations.sh;
};
in
lib.mkIf (config.services.grafana.enable && config.services.zfs.autoScrub.enable) {
systemd.services.zfs-scrub = {
environment = {
GRAFANA_URL = grafanaUrl;
STATE_DIR = "/run/zfs-scrub-annotations";
};
serviceConfig = {
RuntimeDirectory = "zfs-scrub-annotations";
ExecStartPre = [ "-${lib.getExe script} start" ];
ExecStopPost = [ "${lib.getExe script} stop" ];
};
};
}

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# ZFS scrub annotation script for Grafana
# Usage: zfs-scrub-annotations.sh {start|stop}
# Required env: GRAFANA_URL, STATE_DIR
# Required on PATH: zpool, curl, jq, paste, date, grep, sed
set -euo pipefail
ACTION="${1:-}"
GRAFANA_URL="${GRAFANA_URL:?GRAFANA_URL required}"
STATE_DIR="${STATE_DIR:?STATE_DIR required}"
case "$ACTION" in
start)
POOLS=$(zpool list -H -o name | paste -sd ', ')
NOW_MS=$(date +%s%3N)
RESPONSE=$(curl -sf --max-time 5 \
-X POST "$GRAFANA_URL/api/annotations" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg text "ZFS scrub: $POOLS" --argjson time "$NOW_MS" \
'{time: $time, text: $text, tags: ["zfs-scrub"]}')" \
) || exit 0
echo "$RESPONSE" | jq -r '.id' > "$STATE_DIR/annotation-id"
;;
stop)
ANN_ID=$(cat "$STATE_DIR/annotation-id" 2>/dev/null) || exit 0
[ -z "$ANN_ID" ] && exit 0
NOW_MS=$(date +%s%3N)
RESULTS=""
while IFS= read -r pool; do
scan_line=$(zpool status "$pool" | grep "scan:" | sed 's/^[[:space:]]*//')
RESULTS="${RESULTS}${pool}: ${scan_line}"$'\n'
done < <(zpool list -H -o name)
TEXT=$(printf "ZFS scrub completed\n%s" "$RESULTS")
curl -sf --max-time 5 \
-X PATCH "$GRAFANA_URL/api/annotations/$ANN_ID" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg text "$TEXT" --argjson timeEnd "$NOW_MS" \
'{timeEnd: $timeEnd, text: $text}')" || true
rm -f "$STATE_DIR/annotation-id"
;;
*)
echo "Usage: $0 {start|stop}" >&2
exit 1
;;
esac

View File

@@ -0,0 +1,16 @@
{
service_configs,
inputs,
pkgs,
...
}:
let
graphing-calculator =
inputs.ytbn-graphing-software.packages.${pkgs.stdenv.targetPlatform.system}.web;
in
{
services.caddy.virtualHosts."graphing.${service_configs.https.domain}".extraConfig = ''
root * ${graphing-calculator}
file_server browse
'';
}

View File

@@ -0,0 +1,38 @@
{
config,
lib,
service_configs,
...
}:
{
imports = [
(lib.serviceFilePerms "harmonia" [
"Z /run/agenix/harmonia-sign-key 0400 harmonia harmonia"
])
];
services.harmonia = {
enable = true;
signKeyPaths = [ config.age.secrets.harmonia-sign-key.path ];
settings.bind = "127.0.0.1:${toString service_configs.ports.private.harmonia.port}";
};
# serve latest deploy store paths (unauthenticated — just a path string)
# CI writes to /var/lib/dotfiles-deploy/<hostname> after building
services.caddy.virtualHosts."nix-cache.${service_configs.https.domain}".extraConfig = ''
handle_path /deploy/* {
root * /var/lib/dotfiles-deploy
file_server
}
handle {
import ${config.age.secrets.nix-cache-auth.path}
reverse_proxy :${toString service_configs.ports.private.harmonia.port}
}
'';
# directory for CI to record latest deploy store paths
systemd.tmpfiles.rules = [
"d /var/lib/dotfiles-deploy 0755 gitea-runner gitea-runner"
];
}

View File

@@ -0,0 +1,50 @@
{
service_configs,
pkgs,
config,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "immich-server" service_configs.zpool_ssds [
config.services.immich.mediaLocation
])
(lib.serviceMountWithZpool "immich-machine-learning" service_configs.zpool_ssds [
config.services.immich.mediaLocation
])
(lib.serviceFilePerms "immich-server" [
"Z ${config.services.immich.mediaLocation} 0770 ${config.services.immich.user} ${config.services.immich.group}"
])
(lib.mkCaddyReverseProxy {
subdomain = "immich";
port = service_configs.ports.private.immich.port;
})
(lib.mkFail2banJail {
name = "immich";
unitName = "immich-server.service";
failregex = "^.*Failed login attempt for user .* from ip address <HOST>.*$";
})
];
services.immich = {
enable = true;
mediaLocation = service_configs.immich.dir;
port = service_configs.ports.private.immich.port;
# openFirewall = true;
host = "0.0.0.0";
database = {
createDB = false;
};
};
environment.systemPackages = with pkgs; [
immich-go
];
users.users.${config.services.immich.user}.extraGroups = [
"video"
"render"
];
}

View File

@@ -0,0 +1,6 @@
{
imports = [
./jellyfin.nix
./jellyfin-qbittorrent-monitor.nix
];
}

View File

@@ -0,0 +1,127 @@
{
pkgs,
service_configs,
config,
lib,
...
}:
let
webhookPlugin = import ./jellyfin-webhook-plugin.nix { inherit pkgs lib; };
jellyfinPort = service_configs.ports.private.jellyfin.port;
webhookPort = service_configs.ports.private.jellyfin_qbittorrent_monitor_webhook.port;
in
lib.mkIf config.services.jellyfin.enable {
# Materialise the Jellyfin Webhook plugin into Jellyfin's plugins dir before
# Jellyfin starts. Jellyfin rewrites meta.json at runtime, so a read-only
# nix-store symlink would EACCES -- we copy instead.
#
# `wantedBy = [ "jellyfin.service" ]` alone is insufficient on initial rollout:
# if jellyfin is already running at activation time, systemd won't start the
# oneshot until the next jellyfin restart. `restartTriggers` on jellyfin pinned
# to the plugin package + install script forces that restart whenever either
# changes, which invokes this unit via the `before`/`wantedBy` chain.
systemd.services.jellyfin-webhook-install = {
before = [ "jellyfin.service" ];
wantedBy = [ "jellyfin.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
User = config.services.jellyfin.user;
Group = config.services.jellyfin.group;
ExecStart = webhookPlugin.mkInstallScript {
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
};
};
};
systemd.services.jellyfin.restartTriggers = [
webhookPlugin.package
(webhookPlugin.mkInstallScript {
pluginsDir = "${config.services.jellyfin.dataDir}/plugins";
})
];
# After Jellyfin starts, POST the plugin configuration so the webhook
# targets the monitor's receiver. Idempotent; runs on every boot.
systemd.services.jellyfin-webhook-configure = {
after = [ "jellyfin.service" ];
wants = [ "jellyfin.service" ];
before = [ "jellyfin-qbittorrent-monitor.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
DynamicUser = true;
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
ExecStart = webhookPlugin.mkConfigureScript {
jellyfinUrl = "http://127.0.0.1:${toString jellyfinPort}";
webhooks = [
{
name = "qBittorrent Monitor";
uri = "http://127.0.0.1:${toString webhookPort}/";
notificationTypes = [
"PlaybackStart"
"PlaybackProgress"
"PlaybackStop"
];
}
];
};
};
};
systemd.services."jellyfin-qbittorrent-monitor" = {
description = "Monitor Jellyfin streaming and control qBittorrent rate limits";
after = [
"network.target"
"jellyfin.service"
"qbittorrent.service"
"jellyfin-webhook-configure.service"
];
wants = [ "jellyfin-webhook-configure.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "jellyfin-monitor-start" ''
export JELLYFIN_API_KEY=$(cat $CREDENTIALS_DIRECTORY/jellyfin-api-key)
exec ${
pkgs.python3.withPackages (ps: with ps; [ requests ])
}/bin/python ${./jellyfin-qbittorrent-monitor.py}
'';
Restart = "always";
RestartSec = "10s";
# Security hardening
DynamicUser = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
# Load credentials from agenix secrets
LoadCredential = "jellyfin-api-key:${config.age.secrets.jellyfin-api-key.path}";
};
environment = {
JELLYFIN_URL = "http://localhost:${builtins.toString jellyfinPort}";
QBITTORRENT_URL = "http://${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString service_configs.ports.private.torrent.port}";
CHECK_INTERVAL = "30";
# Bandwidth budget configuration
TOTAL_BANDWIDTH_BUDGET = "30000000"; # 30 Mbps in bits per second
SERVICE_BUFFER = "5000000"; # 5 Mbps reserved for other services (bps)
DEFAULT_STREAM_BITRATE = "10000000"; # 10 Mbps fallback when bitrate unknown (bps)
MIN_TORRENT_SPEED = "100"; # KB/s - below this, pause torrents instead
STREAM_BITRATE_HEADROOM = "1.1"; # multiplier per stream for bitrate fluctuations
# Webhook receiver: Jellyfin Webhook plugin POSTs events here to throttle immediately.
WEBHOOK_BIND = "127.0.0.1";
WEBHOOK_PORT = toString webhookPort;
};
};
}

View File

@@ -0,0 +1,504 @@
#!/usr/bin/env python3
import requests
import time
import logging
import sys
import signal
import json
import ipaddress
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
class ServiceUnavailable(Exception):
"""Raised when a monitored service is temporarily unavailable."""
pass
class JellyfinQBittorrentMonitor:
def __init__(
self,
jellyfin_url="http://localhost:8096",
qbittorrent_url="http://localhost:8080",
check_interval=30,
jellyfin_api_key=None,
streaming_start_delay=10,
streaming_stop_delay=60,
total_bandwidth_budget=30000000,
service_buffer=5000000,
default_stream_bitrate=10000000,
min_torrent_speed=100,
stream_bitrate_headroom=1.1,
webhook_port=0,
webhook_bind="127.0.0.1",
):
self.jellyfin_url = jellyfin_url
self.qbittorrent_url = qbittorrent_url
self.check_interval = check_interval
self.jellyfin_api_key = jellyfin_api_key
self.total_bandwidth_budget = total_bandwidth_budget
self.service_buffer = service_buffer
self.default_stream_bitrate = default_stream_bitrate
self.min_torrent_speed = min_torrent_speed
self.stream_bitrate_headroom = stream_bitrate_headroom
self.last_streaming_state = None
self.current_state = "unlimited"
self.torrents_paused = False
self.last_alt_limits = None
self.running = True
self.session = requests.Session() # Use session for cookies
self.last_active_streams = []
# Hysteresis settings to prevent rapid switching
self.streaming_start_delay = streaming_start_delay
self.streaming_stop_delay = streaming_stop_delay
self.last_state_change = 0
# Webhook receiver: allows Jellyfin to push events instead of waiting for the poll
self.webhook_port = webhook_port
self.webhook_bind = webhook_bind
self.wake_event = threading.Event()
self.webhook_server = None
# Local network ranges (RFC 1918 private networks + localhost)
self.local_networks = [
ipaddress.ip_network("10.0.0.0/8"),
ipaddress.ip_network("172.16.0.0/12"),
ipaddress.ip_network("192.168.0.0/16"),
ipaddress.ip_network("127.0.0.0/8"),
ipaddress.ip_network("::1/128"), # IPv6 localhost
ipaddress.ip_network("fe80::/10"), # IPv6 link-local
]
def is_local_ip(self, ip_address: str) -> bool:
"""Check if an IP address is from a local network"""
try:
ip = ipaddress.ip_address(ip_address)
return any(ip in network for network in self.local_networks)
except ValueError:
logger.warning(f"Invalid IP address format: {ip_address}")
return True # Treat invalid IPs as local for safety
def signal_handler(self, signum, frame):
logger.info("Received shutdown signal, cleaning up...")
self.running = False
if self.webhook_server is not None:
# shutdown() blocks until serve_forever returns; run from a thread so we don't deadlock
threading.Thread(target=self.webhook_server.shutdown, daemon=True).start()
self.restore_normal_limits()
sys.exit(0)
def wake(self) -> None:
"""Signal the main loop to re-evaluate state immediately."""
self.wake_event.set()
def sleep_or_wake(self, seconds: float) -> None:
"""Wait up to `seconds`, returning early if a webhook wakes the loop."""
self.wake_event.wait(seconds)
self.wake_event.clear()
def start_webhook_server(self) -> None:
"""Start a background HTTP server that wakes the monitor on any POST."""
if not self.webhook_port:
return
monitor = self
class WebhookHandler(BaseHTTPRequestHandler):
def do_POST(self): # noqa: N802
length = int(self.headers.get("Content-Length", "0") or "0")
body = self.rfile.read(min(length, 65536)) if length else b""
event = "unknown"
try:
if body:
event = json.loads(body).get("NotificationType", "unknown")
except (json.JSONDecodeError, ValueError):
pass
logger.info(f"Webhook received: {event}")
self.send_response(204)
self.end_headers()
monitor.wake()
def log_message(self, format, *args):
return # suppress default access log
self.webhook_server = HTTPServer(
(self.webhook_bind, self.webhook_port), WebhookHandler
)
threading.Thread(
target=self.webhook_server.serve_forever, daemon=True, name="webhook-server"
).start()
logger.info(
f"Webhook receiver listening on http://{self.webhook_bind}:{self.webhook_port}"
)
def check_jellyfin_sessions(self) -> list[dict]:
headers = (
{"X-Emby-Token": self.jellyfin_api_key} if self.jellyfin_api_key else {}
)
try:
response = requests.get(
f"{self.jellyfin_url}/Sessions", headers=headers, timeout=10
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to check Jellyfin sessions: {e}")
raise ServiceUnavailable(f"Jellyfin unavailable: {e}") from e
try:
sessions = response.json()
except json.JSONDecodeError as e:
logger.error(f"Failed to parse Jellyfin response: {e}")
raise ServiceUnavailable(f"Jellyfin returned invalid JSON: {e}") from e
active_streams = []
for session in sessions:
if (
"NowPlayingItem" in session
and not session.get("PlayState", {}).get("IsPaused", True)
and not self.is_local_ip(session.get("RemoteEndPoint", ""))
):
item = session["NowPlayingItem"]
item_type = item.get("Type", "").lower()
if item_type in ["movie", "episode", "video"]:
user = session.get("UserName", "Unknown")
stream_name = f"{user}: {item.get('Name', 'Unknown')}"
if session.get("TranscodingInfo") and session[
"TranscodingInfo"
].get("Bitrate"):
bitrate = session["TranscodingInfo"]["Bitrate"]
elif item.get("Bitrate"):
bitrate = item["Bitrate"]
elif item.get("MediaSources", [{}])[0].get("Bitrate"):
bitrate = item["MediaSources"][0]["Bitrate"]
else:
bitrate = self.default_stream_bitrate
bitrate = min(int(bitrate), 100_000_000)
# Add headroom to account for bitrate fluctuations
bitrate = int(bitrate * self.stream_bitrate_headroom)
active_streams.append({"name": stream_name, "bitrate_bps": bitrate})
return active_streams
def check_qbittorrent_alternate_limits(self) -> bool:
try:
response = self.session.get(
f"{self.qbittorrent_url}/api/v2/transfer/speedLimitsMode", timeout=10
)
if response.status_code == 200:
return response.text.strip() == "1"
else:
logger.warning(
f"SpeedLimitsMode endpoint returned HTTP {response.status_code}"
)
raise ServiceUnavailable(
f"qBittorrent returned HTTP {response.status_code}"
)
except requests.exceptions.RequestException as e:
logger.error(f"SpeedLimitsMode endpoint failed: {e}")
raise ServiceUnavailable(f"qBittorrent unavailable: {e}") from e
def use_alt_limits(self, enable: bool) -> None:
action = "enabled" if enable else "disabled"
try:
current_throttle = self.check_qbittorrent_alternate_limits()
if current_throttle == enable:
logger.debug(
f"Alternate speed limits already {action}, no action needed"
)
return
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/transfer/toggleSpeedLimitsMode",
timeout=10,
)
response.raise_for_status()
new_state = self.check_qbittorrent_alternate_limits()
if new_state == enable:
logger.info(f"Alternate speed limits {action}")
else:
logger.warning(
f"Toggle may have failed: expected {enable}, got {new_state}"
)
except ServiceUnavailable:
logger.warning(
f"qBittorrent unavailable, cannot {action} alternate speed limits"
)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to {action} alternate speed limits: {e}")
def pause_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/stop",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to pause torrents: {e}")
def resume_all_torrents(self) -> None:
try:
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/torrents/start",
data={"hashes": "all"},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logger.error(f"Failed to resume torrents: {e}")
def set_alt_speed_limits(self, dl_kbs: float, ul_kbs: float) -> None:
try:
payload = {
"alt_dl_limit": int(dl_kbs * 1024),
"alt_up_limit": int(ul_kbs * 1024),
}
response = self.session.post(
f"{self.qbittorrent_url}/api/v2/app/setPreferences",
data={"json": json.dumps(payload)},
timeout=10,
)
response.raise_for_status()
self.last_alt_limits = (dl_kbs, ul_kbs)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to set alternate speed limits: {e}")
def restore_normal_limits(self) -> None:
if self.torrents_paused:
logger.info("Resuming all torrents before shutdown...")
self.resume_all_torrents()
self.torrents_paused = False
if self.current_state != "unlimited":
logger.info("Restoring normal speed limits before shutdown...")
self.use_alt_limits(False)
self.current_state = "unlimited"
def sync_qbittorrent_state(self) -> None:
try:
if self.current_state == "unlimited":
actual_state = self.check_qbittorrent_alternate_limits()
if actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed OFF, got ON. Re-syncing..."
)
self.use_alt_limits(False)
elif self.current_state == "throttled":
if self.last_alt_limits:
self.set_alt_speed_limits(*self.last_alt_limits)
actual_state = self.check_qbittorrent_alternate_limits()
if not actual_state:
logger.warning(
"qBittorrent state mismatch detected: expected alt speed ON, got OFF. Re-syncing..."
)
self.use_alt_limits(True)
elif self.current_state == "paused":
self.pause_all_torrents()
self.torrents_paused = True
except ServiceUnavailable:
pass
def should_change_state(self, new_streaming_state: bool) -> bool:
"""Apply hysteresis to prevent rapid state changes"""
now = time.time()
if new_streaming_state == self.last_streaming_state:
return False
time_since_change = now - self.last_state_change
if new_streaming_state and not self.last_streaming_state:
if time_since_change >= self.streaming_start_delay:
self.last_state_change = now
return True
else:
remaining = self.streaming_start_delay - time_since_change
logger.info(
f"Streaming started - waiting {remaining:.1f}s before enforcing limits"
)
elif not new_streaming_state and self.last_streaming_state:
if time_since_change >= self.streaming_stop_delay:
self.last_state_change = now
return True
else:
remaining = self.streaming_stop_delay - time_since_change
logger.info(
f"Streaming stopped - waiting {remaining:.1f}s before restoring unlimited mode"
)
return False
def run(self):
logger.info("Starting Jellyfin-qBittorrent monitor")
logger.info(f"Jellyfin URL: {self.jellyfin_url}")
logger.info(f"qBittorrent URL: {self.qbittorrent_url}")
logger.info(f"Check interval: {self.check_interval}s")
logger.info(f"Streaming start delay: {self.streaming_start_delay}s")
logger.info(f"Streaming stop delay: {self.streaming_stop_delay}s")
logger.info(f"Total bandwidth budget: {self.total_bandwidth_budget} bps")
logger.info(f"Service buffer: {self.service_buffer} bps")
logger.info(f"Default stream bitrate: {self.default_stream_bitrate} bps")
logger.info(f"Minimum torrent speed: {self.min_torrent_speed} KB/s")
logger.info(f"Stream bitrate headroom: {self.stream_bitrate_headroom}x")
if self.webhook_port:
logger.info(f"Webhook receiver: {self.webhook_bind}:{self.webhook_port}")
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.start_webhook_server()
while self.running:
try:
self.sync_qbittorrent_state()
try:
active_streams = self.check_jellyfin_sessions()
except ServiceUnavailable:
logger.warning("Jellyfin unavailable, maintaining current state")
self.sleep_or_wake(self.check_interval)
continue
streaming_active = len(active_streams) > 0
if active_streams:
for stream in active_streams:
logger.debug(
f"Active stream: {stream['name']} ({stream['bitrate_bps']} bps)"
)
if active_streams != self.last_active_streams:
if streaming_active:
stream_names = ", ".join(
stream["name"] for stream in active_streams
)
logger.info(
f"Active streams ({len(active_streams)}): {stream_names}"
)
elif len(active_streams) == 0 and self.last_streaming_state:
logger.info("No active streaming sessions")
if self.should_change_state(streaming_active):
self.last_streaming_state = streaming_active
streaming_state = bool(self.last_streaming_state)
total_streaming_bps = sum(
stream["bitrate_bps"] for stream in active_streams
)
remaining_bps = (
self.total_bandwidth_budget
- self.service_buffer
- total_streaming_bps
)
remaining_kbs = max(0, remaining_bps) / 8 / 1024
if not streaming_state:
desired_state = "unlimited"
elif streaming_active:
if remaining_kbs >= self.min_torrent_speed:
desired_state = "throttled"
else:
desired_state = "paused"
else:
desired_state = self.current_state
if desired_state != self.current_state:
if desired_state == "unlimited":
action = "resume torrents, disable alt speed"
elif desired_state == "throttled":
action = (
"set alt limits "
f"dl={int(remaining_kbs)}KB/s ul={int(remaining_kbs)}KB/s, enable alt speed"
)
else:
action = "pause torrents"
logger.info(
"State change %s -> %s | streams=%d total_bps=%d remaining_bps=%d action=%s",
self.current_state,
desired_state,
len(active_streams),
total_streaming_bps,
remaining_bps,
action,
)
if desired_state == "unlimited":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.use_alt_limits(False)
elif desired_state == "throttled":
if self.torrents_paused:
self.resume_all_torrents()
self.torrents_paused = False
self.set_alt_speed_limits(remaining_kbs, remaining_kbs)
self.use_alt_limits(True)
else:
if not self.torrents_paused:
self.pause_all_torrents()
self.torrents_paused = True
self.current_state = desired_state
self.last_active_streams = active_streams
self.sleep_or_wake(self.check_interval)
except KeyboardInterrupt:
break
except Exception as e:
logger.error(f"Unexpected error in monitoring loop: {e}")
self.sleep_or_wake(self.check_interval)
self.restore_normal_limits()
logger.info("Monitor stopped")
if __name__ == "__main__":
import os
# Configuration from environment variables
jellyfin_url = os.getenv("JELLYFIN_URL", "http://localhost:8096")
qbittorrent_url = os.getenv("QBITTORRENT_URL", "http://localhost:8080")
check_interval = int(os.getenv("CHECK_INTERVAL", "30"))
jellyfin_api_key = os.getenv("JELLYFIN_API_KEY")
streaming_start_delay = int(os.getenv("STREAMING_START_DELAY", "10"))
streaming_stop_delay = int(os.getenv("STREAMING_STOP_DELAY", "60"))
total_bandwidth_budget = int(os.getenv("TOTAL_BANDWIDTH_BUDGET", "30000000"))
service_buffer = int(os.getenv("SERVICE_BUFFER", "5000000"))
default_stream_bitrate = int(os.getenv("DEFAULT_STREAM_BITRATE", "10000000"))
min_torrent_speed = int(os.getenv("MIN_TORRENT_SPEED", "100"))
stream_bitrate_headroom = float(os.getenv("STREAM_BITRATE_HEADROOM", "1.1"))
webhook_port = int(os.getenv("WEBHOOK_PORT", "0"))
webhook_bind = os.getenv("WEBHOOK_BIND", "127.0.0.1")
monitor = JellyfinQBittorrentMonitor(
jellyfin_url=jellyfin_url,
qbittorrent_url=qbittorrent_url,
check_interval=check_interval,
jellyfin_api_key=jellyfin_api_key,
streaming_start_delay=streaming_start_delay,
streaming_stop_delay=streaming_stop_delay,
total_bandwidth_budget=total_bandwidth_budget,
service_buffer=service_buffer,
default_stream_bitrate=default_stream_bitrate,
min_torrent_speed=min_torrent_speed,
stream_bitrate_headroom=stream_bitrate_headroom,
webhook_port=webhook_port,
webhook_bind=webhook_bind,
)
monitor.run()

View File

@@ -0,0 +1,105 @@
{ pkgs, lib }:
let
pluginVersion = "18.0.0.0";
# GUID from the plugin's meta.json; addresses it on /Plugins/<guid>/Configuration.
pluginGuid = "71552a5a-5c5c-4350-a2ae-ebe451a30173";
package = pkgs.stdenvNoCC.mkDerivation {
pname = "jellyfin-plugin-webhook";
version = pluginVersion;
src = pkgs.fetchurl {
url = "https://repo.jellyfin.org/files/plugin/webhook/webhook_${pluginVersion}.zip";
hash = "sha256-LFFojiPnBGl9KJ0xVyPBnCmatcaeVbllRwRkz5Z3dqI=";
};
nativeBuildInputs = [ pkgs.unzip ];
unpackPhase = ''unzip "$src"'';
installPhase = ''
mkdir -p "$out"
cp *.dll meta.json "$out/"
'';
dontFixup = true; # managed .NET assemblies must not be patched
};
# Minimal Handlebars template, base64 encoded. The monitor only needs the POST;
# NotificationType is parsed for the debug log line.
# Decoded: {"NotificationType":"{{NotificationType}}"}
templateB64 = "eyJOb3RpZmljYXRpb25UeXBlIjoie3tOb3RpZmljYXRpb25UeXBlfX0ifQ==";
# Build a PluginConfiguration payload accepted by Jellyfin's JSON deserializer.
# Each webhook is `{ name, uri, notificationTypes }`.
mkConfigJson =
webhooks:
builtins.toJSON {
ServerUrl = "";
GenericOptions = map (w: {
NotificationTypes = w.notificationTypes;
WebhookName = w.name;
WebhookUri = w.uri;
EnableMovies = true;
EnableEpisodes = true;
EnableVideos = true;
EnableWebhook = true;
Template = templateB64;
Headers = [
{
Key = "Content-Type";
Value = "application/json";
}
];
}) webhooks;
};
# Oneshot that POSTs the plugin configuration. Retries past the window
# between Jellyfin API health and plugin registration.
mkConfigureScript =
{ jellyfinUrl, webhooks }:
pkgs.writeShellScript "jellyfin-webhook-configure" ''
set -euo pipefail
export PATH=${
lib.makeBinPath [
pkgs.coreutils
pkgs.curl
]
}
URL=${lib.escapeShellArg jellyfinUrl}
AUTH="Authorization: MediaBrowser Token=\"$(cat "$CREDENTIALS_DIRECTORY/jellyfin-api-key")\""
CONFIG=${lib.escapeShellArg (mkConfigJson webhooks)}
for _ in $(seq 1 120); do curl -sf -o /dev/null "$URL/health" && break; sleep 1; done
curl -sf -o /dev/null "$URL/health"
for _ in $(seq 1 60); do
if printf '%s' "$CONFIG" | curl -sf -X POST \
-H "$AUTH" -H "Content-Type: application/json" --data-binary @- \
"$URL/Plugins/${pluginGuid}/Configuration"; then
echo "Jellyfin webhook plugin configured"; exit 0
fi
sleep 1
done
echo "Failed to configure webhook plugin" >&2; exit 1
'';
# Materialise a writable copy of the plugin. Jellyfin rewrites meta.json at
# runtime, so a read-only nix-store symlink would EACCES.
mkInstallScript =
{ pluginsDir }:
pkgs.writeShellScript "jellyfin-webhook-install" ''
set -euo pipefail
export PATH=${lib.makeBinPath [ pkgs.coreutils ]}
dst=${lib.escapeShellArg "${pluginsDir}/Webhook_${pluginVersion}"}
mkdir -p ${lib.escapeShellArg pluginsDir}
rm -rf "$dst" && mkdir -p "$dst"
cp ${package}/*.dll ${package}/meta.json "$dst/"
chmod u+rw "$dst"/*
'';
in
{
inherit
package
pluginVersion
pluginGuid
mkConfigureScript
mkInstallScript
;
}

View File

@@ -0,0 +1,66 @@
{
pkgs,
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "jellyfin" service_configs.zpool_ssds [
config.services.jellyfin.dataDir
config.services.jellyfin.cacheDir
])
(lib.serviceFilePerms "jellyfin" [
"Z ${config.services.jellyfin.dataDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
"Z ${config.services.jellyfin.cacheDir} 0700 ${config.services.jellyfin.user} ${config.services.jellyfin.group}"
])
];
services.jellyfin = {
enable = true;
package = pkgs.jellyfin.override { jellyfin-ffmpeg = (lib.optimizePackage pkgs.jellyfin-ffmpeg); };
inherit (service_configs.jellyfin) dataDir cacheDir;
};
services.caddy.virtualHosts."jellyfin.${service_configs.https.domain}".extraConfig = ''
reverse_proxy :${builtins.toString service_configs.ports.private.jellyfin.port} {
# Disable response buffering for streaming. Caddy's default partial
# buffering delays fMP4-HLS segments and direct-play responses where
# Content-Length is known (so auto-flush doesn't trigger).
flush_interval -1
transport http {
# Localhost: compression wastes CPU re-encoding already-compressed media.
compression off
}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
request_body {
max_size 4096MB
}
'';
users.users.${config.services.jellyfin.user}.extraGroups = [
"video"
"render"
service_configs.media_group
];
# Protect Jellyfin login from brute force attacks
services.fail2ban.jails.jellyfin = {
enabled = true;
settings = {
backend = "auto";
port = "http,https";
logpath = "${config.services.jellyfin.dataDir}/log/log_*.log";
# defaults: maxretry=5, findtime=10m, bantime=10m
};
filter.Definition = {
failregex = ''^.*Authentication request for .* has been denied \(IP: "<ADDR>"\)\..*$'';
ignoreregex = "";
};
};
}

View File

@@ -0,0 +1,103 @@
{
pkgs,
service_configs,
config,
inputs,
lib,
utils,
...
}:
let
cfg = config.services.llama-cpp;
modelUrl = "https://huggingface.co/bartowski/google_gemma-4-E2B-it-GGUF/resolve/main/google_gemma-4-E2B-it-IQ2_M.gguf";
modelAlias = lib.removeSuffix ".gguf" (baseNameOf modelUrl);
in
{
imports = [
(lib.mkCaddyReverseProxy {
subdomain = "llm";
port = service_configs.ports.private.llama_cpp.port;
})
];
services.llama-cpp = {
enable = true;
model = toString (
pkgs.fetchurl {
url = modelUrl;
sha256 = "17e869ac54d0e59faa884d5319fc55ad84cd866f50f0b3073fbb25accc875a23";
}
);
port = service_configs.ports.private.llama_cpp.port;
host = "0.0.0.0";
package = lib.optimizePackage (
inputs.llamacpp.packages.${pkgs.system}.vulkan.overrideAttrs (old: {
patches = (old.patches or [ ]) ++ [
];
})
);
extraFlags = [
"-ngl"
"999"
"-c"
"65536"
"-ctk"
"turbo3"
"-ctv"
"turbo3"
"-fa"
"on"
"--api-key-file"
config.age.secrets.llama-cpp-api-key.path
"--metrics"
"--alias"
modelAlias
"-b"
"4096"
"-ub"
"4096"
"--parallel"
"2"
];
};
# have to do this in order to get vulkan to work
systemd.services.llama-cpp.serviceConfig.DynamicUser = lib.mkForce false;
# ANV driver's turbo3 shader compilation exceeds the default 8 MB thread stack.
systemd.services.llama-cpp.serviceConfig.LimitSTACK = lib.mkForce "67108864"; # 64 MB soft+hard
# llama-server tries to create ~/.cache; ProtectSystem=strict + impermanent
# root make /root read-only. Give it a writable cache dir and point HOME there.
systemd.services.llama-cpp.serviceConfig.CacheDirectory = "llama-cpp";
systemd.services.llama-cpp.environment.HOME = "/var/cache/llama-cpp";
# turbo3 KV cache quantization runs a 14-barrier WHT butterfly per 128-element
# workgroup in SET_ROWS. With 4 concurrent slots and batch=4096, the combined
# GPU dispatch can exceed the default i915 CCS engine preempt timeout (7.5s),
# causing GPU HANG -> ErrorDeviceLost. Increase compute engine timeouts.
# Note: batch<4096 is not viable -- GDN chunked mode needs a larger compute
# buffer at smaller batch sizes, exceeding the A380's 6 GB VRAM.
# '+' prefix runs as root regardless of service User=.
systemd.services.llama-cpp.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "set-gpu-compute-timeout" ''
for f in /sys/class/drm/card*/engine/ccs*/preempt_timeout_ms; do
[ -w "$f" ] && echo 30000 > "$f"
done
for f in /sys/class/drm/card*/engine/ccs*/heartbeat_interval_ms; do
[ -w "$f" ] && echo 10000 > "$f"
done
''}"
];
# upstream module hardcodes --log-disable; override ExecStart to keep logs
# so we can see prompt processing progress via journalctl
systemd.services.llama-cpp.serviceConfig.ExecStart = lib.mkForce (
"${cfg.package}/bin/llama-server"
+ " --host ${cfg.host}"
+ " --port ${toString cfg.port}"
+ " -m ${cfg.model}"
+ " ${utils.escapeSystemdExecArgs cfg.extraFlags}"
);
}

View File

@@ -0,0 +1,59 @@
{
config,
lib,
service_configs,
...
}:
{
services.coturn = {
enable = true;
realm = service_configs.https.domain;
use-auth-secret = true;
static-auth-secret-file = config.age.secrets.coturn-auth-secret.path;
listening-port = service_configs.ports.public.coturn.port;
tls-listening-port = service_configs.ports.public.coturn_tls.port;
no-cli = true;
# recommended security settings from Synapse's coturn docs
extraConfig = ''
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255
denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
'';
};
# coturn needs these ports open
networking.firewall = {
allowedTCPPorts = [
service_configs.ports.public.coturn.port
service_configs.ports.public.coturn_tls.port
];
allowedUDPPorts = [
service_configs.ports.public.coturn.port
service_configs.ports.public.coturn_tls.port
];
# relay port range
allowedUDPPortRanges = [
{
from = config.services.coturn.min-port;
to = config.services.coturn.max-port;
}
];
};
}

View File

@@ -0,0 +1,7 @@
{
imports = [
./matrix.nix
./coturn.nix
./livekit.nix
];
}

View File

@@ -0,0 +1,51 @@
{
service_configs,
...
}:
let
keyFile = ../../secrets/livekit_keys;
in
{
services.livekit = {
enable = true;
inherit keyFile;
openFirewall = true;
settings = {
port = service_configs.ports.public.livekit.port;
bind_addresses = [ "127.0.0.1" ];
rtc = {
port_range_start = 50100;
port_range_end = 50200;
use_external_ip = true;
};
# Disable LiveKit's built-in TURN; coturn is already running
turn = {
enabled = false;
};
logging = {
level = "info";
};
};
};
services.lk-jwt-service = {
enable = true;
inherit keyFile;
livekitUrl = "wss://${service_configs.livekit.domain}";
port = service_configs.ports.private.lk_jwt.port;
};
services.caddy.virtualHosts."${service_configs.livekit.domain}".extraConfig = ''
@jwt path /sfu/get /healthz
handle @jwt {
reverse_proxy :${builtins.toString service_configs.ports.private.lk_jwt.port}
}
handle {
reverse_proxy :${builtins.toString service_configs.ports.public.livekit.port}
}
'';
}

View File

@@ -0,0 +1,73 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "continuwuity" service_configs.zpool_ssds [
"/var/lib/private/continuwuity"
])
(lib.serviceFilePerms "continuwuity" [
"Z /var/lib/private/continuwuity 0770 ${config.services.matrix-continuwuity.user} ${config.services.matrix-continuwuity.group}"
])
(lib.mkCaddyReverseProxy {
domain = service_configs.matrix.domain;
port = service_configs.ports.private.matrix.port;
})
];
services.matrix-continuwuity = {
enable = true;
settings.global = {
port = [ service_configs.ports.private.matrix.port ];
server_name = service_configs.https.domain;
allow_registration = true;
registration_token_file = config.age.secrets.matrix-reg-token.path;
new_user_displayname_suffix = "";
trusted_servers = [
"matrix.org"
"constellatory.net"
"tchncs.de"
"envs.net"
];
address = [
"0.0.0.0"
];
# TURN server config (coturn)
turn_secret_file = config.age.secrets.matrix-turn-secret.path;
turn_uris = [
"turn:${service_configs.https.domain}?transport=udp"
"turn:${service_configs.https.domain}?transport=tcp"
];
turn_ttl = 86400;
};
};
services.caddy.virtualHosts.${service_configs.https.domain}.extraConfig = lib.mkBefore ''
header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `{"m.server": "${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.https.port}"}`
respond /.well-known/matrix/client `{"m.server":{"base_url":"https://${service_configs.matrix.domain}"},"m.homeserver":{"base_url":"https://${service_configs.matrix.domain}"},"org.matrix.msc3575.proxy":{"base_url":"https://${config.services.matrix-continuwuity.settings.global.server_name}"},"org.matrix.msc4143.rtc_foci":[{"type":"livekit","livekit_service_url":"https://${service_configs.livekit.domain}"}]}`
'';
# Exact duplicate for federation port
services.caddy.virtualHosts."${service_configs.matrix.domain}:${builtins.toString service_configs.ports.public.matrix_federation.port}".extraConfig =
config.services.caddy.virtualHosts."${service_configs.matrix.domain}".extraConfig;
# for federation
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.matrix_federation.port
];
# for federation
networking.firewall.allowedUDPPorts = [
service_configs.ports.public.matrix_federation.port
];
}

View File

@@ -0,0 +1,192 @@
{
pkgs,
service_configs,
lib,
config,
inputs,
...
}:
{
imports = [
(lib.serviceMountWithZpool "minecraft-server-${service_configs.minecraft.server_name}"
service_configs.zpool_ssds
[
"${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}"
]
)
inputs.nix-minecraft.nixosModules.minecraft-servers
(lib.serviceFilePerms "minecraft-server-${service_configs.minecraft.server_name}" [
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 700 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
"Z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web 750 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
# Allow caddy (in minecraft group) to traverse to squaremap/web for map.gardling.com
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name} 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
"z ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap 710 ${config.services.minecraft-servers.user} ${config.services.minecraft-servers.group}"
])
];
boot.kernel.sysctl = {
# Disable autogroup for better scheduling of game server threads
"kernel.sched_autogroup_enabled" = 0;
};
services.minecraft-servers = {
enable = true;
eula = true;
dataDir = service_configs.minecraft.parent_dir;
openFirewall = true;
servers.${service_configs.minecraft.server_name} = {
enable = true;
package = pkgs.fabricServers.fabric-26_1_2.override { jre_headless = pkgs.openjdk25_headless; };
jvmOpts = lib.concatStringsSep " " [
# Memory
"-Xmx${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
"-Xms${builtins.toString service_configs.minecraft.memory.heap_size_m}M"
# GC
"-XX:+UseZGC"
"-XX:+ZGenerational"
# added in new minecraft version
"-XX:+UseCompactObjectHeaders"
"-XX:+UseStringDeduplication"
# Base JVM optimizations (brucethemoose/Minecraft-Performance-Flags-Benchmarks)
"-XX:+UnlockExperimentalVMOptions"
"-XX:+UnlockDiagnosticVMOptions"
"-XX:+AlwaysActAsServerClassMachine"
"-XX:+AlwaysPreTouch"
"-XX:+DisableExplicitGC"
"-XX:+UseNUMA"
"-XX:+PerfDisableSharedMem"
"-XX:+UseFastUnorderedTimeStamps"
"-XX:+UseCriticalJavaThreadPriority"
"-XX:ThreadPriorityPolicy=1"
"-XX:AllocatePrefetchStyle=3"
"-XX:-DontCompileHugeMethods"
"-XX:MaxNodeLimit=240000"
"-XX:NodeLimitFudgeFactor=8000"
"-XX:ReservedCodeCacheSize=400M"
"-XX:NonNMethodCodeHeapSize=12M"
"-XX:ProfiledCodeHeapSize=194M"
"-XX:NonProfiledCodeHeapSize=194M"
"-XX:NmethodSweepActivity=1"
"-XX:+UseVectorCmov"
# Large pages (requires vm.nr_hugepages sysctl)
"-XX:+UseLargePages"
"-XX:LargePageSizeInBytes=${builtins.toString service_configs.minecraft.memory.large_page_size_m}M"
];
serverProperties = {
server-port = service_configs.ports.public.minecraft.port;
enforce-whitelist = true;
gamemode = "survival";
white-list = true;
difficulty = "easy";
motd = "A Minecraft Server";
view-distance = 10;
simulation-distance = 6;
sync-chunk-writes = false;
spawn-protection = 0;
};
whitelist = import ../secrets/minecraft-whitelist.nix;
symlinks = {
"mods" = pkgs.linkFarmFromDrvs "mods" (
with pkgs;
builtins.attrValues {
FabricApi = fetchurl {
url = "https://cdn.modrinth.com/data/P7dR8mSH/versions/fm7UYECV/fabric-api-0.145.4%2B26.1.2.jar";
sha512 = "ffd5ef62a745f76cd2e5481252cb7bc67006c809b4f436827d05ea22c01d19279e94a3b24df3d57e127af1cd08440b5de6a92a4ea8f39b2dcbbe1681275564c3";
};
# No 26.1.2 version available
# FerriteCore = fetchurl {
# url = "https://cdn.modrinth.com/data/uXXizFIs/versions/d5ddUdiB/ferritecore-9.0.0-fabric.jar";
# sha512 = "d81fa97e11784c19d42f89c2f433831d007603dd7193cee45fa177e4a6a9c52b384b198586e04a0f7f63cd996fed713322578bde9a8db57e1188854ae5cbe584";
# };
Lithium = fetchurl {
url = "https://cdn.modrinth.com/data/gvQqBUqZ/versions/v2xoRvRP/lithium-fabric-0.24.1%2Bmc26.1.2.jar";
sha512 = "8711bc8c6f39be4c8511becb7a68e573ced56777bd691639f2fc62299b35bb4ccd2efe4a39bd9c308084b523be86a5f5c4bf921ab85f7a22bf075d8ea2359621";
};
NoChatReports = fetchurl {
url = "https://cdn.modrinth.com/data/qQyHxfxd/versions/2yrLNE3S/NoChatReports-FABRIC-26.1-v2.19.0.jar";
sha512 = "94d58a1a4cde4e3b1750bdf724e65c5f4ff3436c2532f36a465d497d26bf59f5ac996cddbff8ecdfed770c319aa2f2dcc9c7b2d19a35651c2a7735c5b2124dad";
};
squaremap = fetchurl {
url = "https://cdn.modrinth.com/data/PFb7ZqK6/versions/UBN6MFvH/squaremap-fabric-mc26.1.2-1.3.13.jar";
sha512 = "97bc130184b5d0ddc4ff98a15acef6203459d982e0e2afbd49a2976d546c55a86ef22b841378b51dd782be9b2cfbe4cfa197717f2b7f6800fd8b4ff4df6e564f";
};
scalablelux = fetchurl {
url = "https://cdn.modrinth.com/data/Ps1zyz6x/versions/gYbHVCz8/ScalableLux-0.2.0%2Bfabric.2b63825-all.jar";
sha512 = "48565a4d8a1cbd623f0044086d971f2c0cf1c40e1d0b6636a61d41512f4c1c1ddff35879d9dba24b088a670ee254e2d5842d13a30b6d76df23706fa94ea4a58b";
};
c2me = fetchurl {
url = "https://cdn.modrinth.com/data/VSNURh3q/versions/yrNQQ1AQ/c2me-fabric-mc26.1.2-0.3.7%2Balpha.0.65.jar";
sha512 = "6666ebaa3bfa403e386776590fc845b7c306107d37ebc7b1be3b057893fbf9f933abb2314c171d7fe19c177cf8823cb47fdc32040d34a9704f5ab656dd5d93f8";
};
# No 26.1 version available
# krypton = fetchurl {
# url = "https://cdn.modrinth.com/data/fQEb0iXm/versions/O9LmWYR7/krypton-0.2.10.jar";
# sha512 = "4dcd7228d1890ddfc78c99ff284b45f9cf40aae77ef6359308e26d06fa0d938365255696af4cc12d524c46c4886cdcd19268c165a2bf0a2835202fe857da5cab";
# };
# No 26.1.2 version available
# disconnect-packet-fix = fetchurl {
# url = "https://cdn.modrinth.com/data/rd9rKuJT/versions/x9gVeaTU/disconnect-packet-fix-fabric-2.1.0.jar";
# sha512 = "bf84d02bdcd737706df123e452dd31ef535580fa4ced6af1e4ceea022fef94e4764775253e970b8caa1292e2fa00eb470557f70b290fafdb444479fa801b07a1";
# };
packet-fixer = fetchurl {
url = "https://cdn.modrinth.com/data/c7m1mi73/versions/M8PqPQr4/packetfixer-fabric-3.3.4-26.1.2.jar";
sha512 = "698020edba2a1fd80bb282bfd4832a00d6447b08eaafbc2e16a8f3bf89e187fc9a622c92dfe94ae140dd485fc0220a86890f12158ec08054e473fef8337829bc";
};
# mVUS fork: upstream ModernFix no longer ships Fabric builds
modernfix = fetchurl {
url = "https://cdn.modrinth.com/data/TjSm1wrD/versions/dqQ7mabN/modernfix-5.26.2-build.1.jar";
sha512 = "fbef93c2dabf7bcd0ccd670226dfc4958f7ebe5d8c2b1158e88a65e6954a40f595efd58401d2a3dbb224660dca5952199cf64df29100e7bd39b1b1941290b57b";
};
debugify = fetchurl {
url = "https://cdn.modrinth.com/data/QwxR6Gcd/versions/mfTTfiKn/debugify-26.1.2%2B1.0.jar";
sha512 = "63db82f2163b9f7fc27ebea999ffcd7a961054435b3ed7d8bf32d905b5f60ce81715916b7fd4e9509dd23703d5492059f3ce7e5f176402f8ed4f985a415553f4";
};
}
);
};
};
};
systemd.services.minecraft-server-main = {
serviceConfig = {
Nice = -5;
IOSchedulingPriority = 0;
LimitMEMLOCK = "infinity"; # Required for large pages
};
};
services.caddy.virtualHosts = lib.mkIf (config.services.caddy.enable) {
"map.${service_configs.https.domain}".extraConfig = ''
root * ${service_configs.minecraft.parent_dir}/${service_configs.minecraft.server_name}/squaremap/web
file_server browse
'';
};
users.users = lib.mkIf (config.services.caddy.enable) {
${config.services.caddy.user}.extraGroups = [
# for `map.gardling.com`
config.services.minecraft-servers.group
];
};
}

View File

@@ -0,0 +1,36 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "mollysocket" service_configs.zpool_ssds [
"/var/lib/private/mollysocket"
])
(lib.serviceFilePerms "mollysocket" [
"Z /var/lib/private/mollysocket 0700 root root"
])
];
services.mollysocket = {
enable = true;
settings = {
host = "127.0.0.1";
port = service_configs.ports.private.mollysocket.port;
# Explicitly allow our self-hosted ntfy instance.
# Local-network endpoints are denied by default for security.
allowed_endpoints = [ "https://${service_configs.ntfy.domain}" ];
# allowed_uuids set via MOLLY_ALLOWED_UUIDS in environmentFile
};
environmentFile = config.age.secrets.mollysocket-env.path;
};
services.caddy.virtualHosts."${service_configs.mollysocket.domain}".extraConfig = ''
reverse_proxy h2c://127.0.0.1:${builtins.toString service_configs.ports.private.mollysocket.port}
'';
}

View File

@@ -0,0 +1,8 @@
{
imports = [
./monero.nix
./p2pool.nix
./xmrig.nix
./xmrig-auto-pause.nix
];
}

View File

@@ -0,0 +1,37 @@
{
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "monero" service_configs.zpool_ssds [
service_configs.monero.dataDir
])
(lib.serviceFilePerms "monero" [
"Z ${service_configs.monero.dataDir} 0700 monero monero"
])
];
services.monero = {
enable = true;
dataDir = service_configs.monero.dataDir;
rpc = {
address = "0.0.0.0";
port = service_configs.ports.public.monero_rpc.port;
restricted = true;
};
extraConfig = ''
p2p-bind-port=${builtins.toString service_configs.ports.public.monero.port}
zmq-pub=tcp://127.0.0.1:${builtins.toString service_configs.ports.private.monero_zmq.port}
db-sync-mode=fast:async:1000000000bytes
public-node=1
confirm-external-bind=1
'';
};
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.monero.port
service_configs.ports.public.monero_rpc.port
];
}

View File

@@ -0,0 +1,39 @@
{
config,
service_configs,
lib,
...
}:
{
imports = [
(lib.serviceMountWithZpool "p2pool" service_configs.zpool_ssds [
service_configs.p2pool.dataDir
])
(lib.serviceFilePerms "p2pool" [
"Z ${service_configs.p2pool.dataDir} 0700 p2pool p2pool"
])
];
services.p2pool = {
enable = true;
dataDir = service_configs.p2pool.dataDir;
walletAddress = service_configs.p2pool.walletAddress;
sidechain = "nano";
host = "127.0.0.1";
rpcPort = service_configs.ports.public.monero_rpc.port;
zmqPort = service_configs.ports.private.monero_zmq.port;
extraArgs = [
" --stratum 0.0.0.0:${builtins.toString service_configs.ports.private.p2pool_stratum.port}"
];
};
# Ensure p2pool starts after monero is ready
systemd.services.p2pool = {
after = [ "monero.service" ];
wants = [ "monero.service" ];
};
networking.firewall.allowedTCPPorts = [
service_configs.ports.public.p2pool_p2p.port
];
}

View File

@@ -0,0 +1,39 @@
{
config,
lib,
pkgs,
...
}:
lib.mkIf config.services.xmrig.enable {
systemd.services.xmrig-auto-pause = {
description = "Auto-pause xmrig when other services need CPU";
after = [ "xmrig.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.python3}/bin/python3 ${./xmrig-auto-pause.py}";
Restart = "always";
RestartSec = "10s";
NoNewPrivileges = true;
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
RestrictAddressFamilies = [
"AF_UNIX" # systemctl talks to systemd over D-Bus unix socket
];
MemoryDenyWriteExecute = true;
StateDirectory = "xmrig-auto-pause";
};
environment = {
POLL_INTERVAL = "3";
GRACE_PERIOD = "15";
# Background services (qbittorrent, bitmagnet, postgresql, etc.) produce
# 15-25% non-nice CPU during normal operation. The stop threshold must
# sit above transient spikes; the resume threshold must be below the
# steady-state floor to avoid restarting xmrig while services are active.
CPU_STOP_THRESHOLD = "40";
CPU_RESUME_THRESHOLD = "10";
STARTUP_COOLDOWN = "10";
STATE_DIR = "/var/lib/xmrig-auto-pause";
};
};
}

Some files were not shown because too many files have changed in this diff Show More