284 lines
8.9 KiB
Nix
284 lines
8.9 KiB
Nix
{
|
|
inputs,
|
|
pkgs,
|
|
service_configs,
|
|
...
|
|
}:
|
|
inputs.nixpkgs.lib.extend (
|
|
final: prev:
|
|
let
|
|
lib = prev;
|
|
in
|
|
{
|
|
optimizeWithFlags =
|
|
pkg: flags:
|
|
pkg.overrideAttrs (old: {
|
|
env = (old.env or { }) // {
|
|
NIX_CFLAGS_COMPILE =
|
|
(old.env.NIX_CFLAGS_COMPILE or old.NIX_CFLAGS_COMPILE or "")
|
|
+ " "
|
|
+ (lib.concatStringsSep " " flags);
|
|
};
|
|
});
|
|
|
|
optimizePackage =
|
|
pkg:
|
|
final.optimizeWithFlags pkg [
|
|
"-O3"
|
|
"-march=${service_configs.cpu_arch}"
|
|
"-mtune=${service_configs.cpu_arch}"
|
|
];
|
|
|
|
vpnNamespaceOpenPort =
|
|
port: service:
|
|
{ ... }:
|
|
{
|
|
vpnNamespaces.wg = {
|
|
portMappings = [
|
|
{
|
|
from = port;
|
|
to = port;
|
|
}
|
|
];
|
|
|
|
openVPNPorts = [
|
|
{
|
|
port = port;
|
|
protocol = "both";
|
|
}
|
|
];
|
|
};
|
|
systemd.services.${service}.vpnConfinement = {
|
|
enable = true;
|
|
vpnNamespace = "wg";
|
|
};
|
|
};
|
|
|
|
serviceMountWithZpool =
|
|
serviceName: zpool: dirs:
|
|
{ pkgs, config, ... }:
|
|
{
|
|
systemd.services."${serviceName}-mounts" = {
|
|
wants = [ "zfs.target" ] ++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
|
after = lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
|
before = [ "${serviceName}.service" ];
|
|
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
RemainAfterExit = true;
|
|
ExecStart = [
|
|
(lib.getExe (
|
|
pkgs.writeShellApplication {
|
|
name = "ensure-zfs-mounts-with-pool-${serviceName}-${zpool}";
|
|
runtimeInputs = with pkgs; [
|
|
gawk
|
|
coreutils
|
|
config.boot.zfs.package
|
|
];
|
|
|
|
text = ''
|
|
set -euo pipefail
|
|
|
|
echo "Ensuring ZFS mounts for service: ${serviceName} (pool: ${zpool})"
|
|
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
|
|
|
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
|
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
|
|
|
# Additional runtime check: verify paths are on correct zpool
|
|
${lib.optionalString (zpool != "") ''
|
|
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
|
|
|
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
|
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# shellcheck disable=SC2043
|
|
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
|
echo "Checking: $target"
|
|
|
|
# Find dataset that has this mountpoint
|
|
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
|
|
|
if [ -z "$dataset" ]; then
|
|
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Extract pool name from dataset (first part before /)
|
|
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
|
|
|
if [ "$actual_pool" != "${zpool}" ]; then
|
|
echo "ERROR: ZFS pool mismatch for $target" >&2
|
|
echo " Expected pool: ${zpool}" >&2
|
|
echo " Actual pool: $actual_pool" >&2
|
|
echo " Dataset: $dataset" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "$target is on $dataset (pool: $actual_pool)"
|
|
done
|
|
|
|
echo "All paths verified successfully on pool '${zpool}'"
|
|
''}
|
|
|
|
echo "Mount validation completed for ${serviceName} (pool: ${zpool})"
|
|
'';
|
|
}
|
|
))
|
|
];
|
|
};
|
|
};
|
|
|
|
systemd.services.${serviceName} = {
|
|
wants = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
after = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
requires = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
};
|
|
|
|
# assert that the pool is even enabled
|
|
#assertions = lib.optionals (zpool != "") [
|
|
# {
|
|
# assertion = builtins.elem zpool config.boot.zfs.extraPools;
|
|
# message = "${zpool} is not enabled in `boot.zfs.extraPools`";
|
|
# }
|
|
#];
|
|
};
|
|
|
|
serviceFilePerms =
|
|
serviceName: tmpfilesRules:
|
|
{ pkgs, ... }:
|
|
let
|
|
confFile = pkgs.writeText "${serviceName}-file-perms.conf" (
|
|
lib.concatStringsSep "\n" tmpfilesRules
|
|
);
|
|
in
|
|
{
|
|
systemd.services."${serviceName}-file-perms" = {
|
|
after = [ "${serviceName}-mounts.service" ];
|
|
before = [ "${serviceName}.service" ];
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
RemainAfterExit = true;
|
|
ExecStart = "${pkgs.systemd}/bin/systemd-tmpfiles --create ${confFile}";
|
|
};
|
|
};
|
|
|
|
systemd.services.${serviceName} = {
|
|
wants = [ "${serviceName}-file-perms.service" ];
|
|
after = [ "${serviceName}-file-perms.service" ];
|
|
};
|
|
};
|
|
# Creates a Caddy virtualHost with reverse_proxy to a local or VPN-namespaced port.
|
|
# Use `subdomain` for "<name>.${domain}" or `domain` for a full custom domain.
|
|
# Exactly one of `subdomain` or `domain` must be provided.
|
|
mkCaddyReverseProxy =
|
|
{
|
|
subdomain ? null,
|
|
domain ? null,
|
|
port,
|
|
auth ? false,
|
|
vpn ? false,
|
|
}:
|
|
assert (subdomain != null) != (domain != null);
|
|
{ config, ... }:
|
|
let
|
|
vhostDomain = if domain != null then domain else "${subdomain}.${service_configs.https.domain}";
|
|
upstream =
|
|
if vpn then
|
|
"${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString port}"
|
|
else
|
|
":${builtins.toString port}";
|
|
in
|
|
{
|
|
services.caddy.virtualHosts."${vhostDomain}".extraConfig = lib.concatStringsSep "\n" (
|
|
lib.optional auth "import ${config.age.secrets.caddy_auth.path}" ++ [ "reverse_proxy ${upstream}" ]
|
|
);
|
|
};
|
|
|
|
# Creates a fail2ban jail with systemd journal backend.
|
|
# Covers the common pattern: journal-based detection, http/https ports, default thresholds.
|
|
mkFail2banJail =
|
|
{
|
|
name,
|
|
unitName ? "${name}.service",
|
|
failregex,
|
|
}:
|
|
{ ... }:
|
|
{
|
|
services.fail2ban.jails.${name} = {
|
|
enabled = true;
|
|
settings = {
|
|
backend = "systemd";
|
|
port = "http,https";
|
|
# defaults: maxretry=5, findtime=10m, bantime=10m
|
|
};
|
|
filter.Definition = {
|
|
inherit failregex;
|
|
ignoreregex = "";
|
|
journalmatch = "_SYSTEMD_UNIT=${unitName}";
|
|
};
|
|
};
|
|
};
|
|
|
|
# Creates a hardened Grafana annotation daemon service.
|
|
# Provides DynamicUser, sandboxing, state directory, and GRAFANA_URL/STATE_FILE automatically.
|
|
mkGrafanaAnnotationService =
|
|
{
|
|
name,
|
|
description,
|
|
script,
|
|
after ? [ ],
|
|
environment ? { },
|
|
loadCredential ? null,
|
|
}:
|
|
{
|
|
systemd.services."${name}-annotations" = {
|
|
inherit description;
|
|
after = [
|
|
"network.target"
|
|
"grafana.service"
|
|
]
|
|
++ after;
|
|
wantedBy = [ "multi-user.target" ];
|
|
serviceConfig = {
|
|
ExecStart = "${pkgs.python3}/bin/python3 ${script}";
|
|
Restart = "always";
|
|
RestartSec = "10s";
|
|
DynamicUser = true;
|
|
StateDirectory = "${name}-annotations";
|
|
NoNewPrivileges = true;
|
|
ProtectSystem = "strict";
|
|
ProtectHome = true;
|
|
PrivateTmp = true;
|
|
RestrictAddressFamilies = [
|
|
"AF_INET"
|
|
"AF_INET6"
|
|
];
|
|
MemoryDenyWriteExecute = true;
|
|
}
|
|
// lib.optionalAttrs (loadCredential != null) {
|
|
LoadCredential = loadCredential;
|
|
};
|
|
environment = {
|
|
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
|
STATE_FILE = "/var/lib/${name}-annotations/state.json";
|
|
}
|
|
// environment;
|
|
};
|
|
};
|
|
|
|
# Shell command to extract an API key from an *arr config.xml file.
|
|
# Returns a string suitable for $() command substitution in shell scripts.
|
|
extractArrApiKey =
|
|
configXmlPath: "${lib.getExe pkgs.gnugrep} -oP '(?<=<ApiKey>)[^<]+' ${configXmlPath}";
|
|
}
|
|
)
|