new site-config.nix holds values previously duplicated across hosts:
domain, old_domain, contact_email, timezone, binary_cache (url + pubkey),
dns_servers, lan (cidr + gateway), hosts.{muffin,yarn} (ip/alias/ssh_host_key),
ssh_keys.{laptop,desktop,ci_deploy}.
threaded through specialArgs on all three hosts + home-manager extraSpecialArgs +
homeConfigurations.primary + serverLib. service-configs.nix now takes
{ site_config } as a function arg and drops its https namespace; per-service
domains (gitea/matrix/ntfy/mollysocket/livekit/firefox-sync/grafana) are
derived from site_config.domain. ~15 service files and 6 vm tests migrated.
breakage fixes rolled in:
- home/progs/zen/dark-reader.nix: 5 stale *.gardling.com entries in
disabledFor rewritten to *.sigkill.computer (caddy 301s the old names so
these never fired and the new sigkill urls were getting dark-reader applied)
- modules/desktop-common.nix: drop unused hugepagesz=1G/hugepages=3
kernelParams (no consumer on mreow or yarn; xmrig on muffin still reserves
its own via services/monero/xmrig.nix)
verification: muffin toplevel is bit-identical to pre-refactor baseline.
mreow/yarn toplevels differ only in boot.json kernelParams + darkreader
storage.js (nix-diff verified). deployGuardTest and fail2banVaultwardenTest
(latter exercises site_config.domain via bitwarden.nix) pass.
290 lines
9.0 KiB
Nix
290 lines
9.0 KiB
Nix
{
|
|
inputs,
|
|
pkgs,
|
|
service_configs,
|
|
site_config,
|
|
lib ? inputs.nixpkgs-stable.lib,
|
|
...
|
|
}:
|
|
lib.extend (
|
|
final: prev:
|
|
let
|
|
lib = prev;
|
|
in
|
|
{
|
|
optimizeWithFlags =
|
|
pkg: flags:
|
|
pkg.overrideAttrs (old: {
|
|
env = (old.env or { }) // {
|
|
NIX_CFLAGS_COMPILE =
|
|
(old.env.NIX_CFLAGS_COMPILE or old.NIX_CFLAGS_COMPILE or "")
|
|
+ " "
|
|
+ (lib.concatStringsSep " " flags);
|
|
};
|
|
});
|
|
|
|
optimizePackage =
|
|
pkg:
|
|
final.optimizeWithFlags pkg [
|
|
"-O3"
|
|
"-march=${service_configs.cpu_arch}"
|
|
"-mtune=${service_configs.cpu_arch}"
|
|
];
|
|
|
|
vpnNamespaceOpenPort =
|
|
port: service:
|
|
{ ... }:
|
|
{
|
|
vpnNamespaces.wg = {
|
|
portMappings = [
|
|
{
|
|
from = port;
|
|
to = port;
|
|
}
|
|
];
|
|
|
|
openVPNPorts = [
|
|
{
|
|
port = port;
|
|
protocol = "both";
|
|
}
|
|
];
|
|
};
|
|
systemd.services.${service}.vpnConfinement = {
|
|
enable = true;
|
|
vpnNamespace = "wg";
|
|
};
|
|
};
|
|
|
|
serviceMountWithZpool =
|
|
serviceName: zpool: dirs:
|
|
{ pkgs, config, ... }:
|
|
{
|
|
systemd.services."${serviceName}-mounts" = {
|
|
wants = [
|
|
"zfs.target"
|
|
"zfs-mount.service"
|
|
]
|
|
++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
|
after = [ "zfs-mount.service" ] ++ lib.optionals (zpool != "") [ "zfs-import-${zpool}.service" ];
|
|
before = [ "${serviceName}.service" ];
|
|
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
RemainAfterExit = true;
|
|
ExecStart = [
|
|
(lib.getExe (
|
|
pkgs.writeShellApplication {
|
|
name = "ensure-zfs-mounts-with-pool-${serviceName}-${zpool}";
|
|
runtimeInputs = with pkgs; [
|
|
gawk
|
|
coreutils
|
|
config.boot.zfs.package
|
|
];
|
|
|
|
text = ''
|
|
set -euo pipefail
|
|
|
|
echo "Ensuring ZFS mounts for service: ${serviceName} (pool: ${zpool})"
|
|
echo "Directories: ${lib.strings.concatStringsSep ", " dirs}"
|
|
|
|
# Validate mounts exist (ensureZfsMounts already has proper PATH)
|
|
${lib.getExe pkgs.ensureZfsMounts} ${lib.strings.concatStringsSep " " dirs}
|
|
|
|
# Additional runtime check: verify paths are on correct zpool
|
|
${lib.optionalString (zpool != "") ''
|
|
echo "Verifying ZFS mountpoints are on pool '${zpool}'..."
|
|
|
|
if ! zfs_list_output=$(zfs list -H -o name,mountpoint 2>&1); then
|
|
echo "ERROR: Failed to query ZFS datasets: $zfs_list_output" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# shellcheck disable=SC2043
|
|
for target in ${lib.strings.concatStringsSep " " dirs}; do
|
|
echo "Checking: $target"
|
|
|
|
# Find dataset that has this mountpoint
|
|
dataset=$(echo "$zfs_list_output" | awk -v target="$target" '$2 == target {print $1; exit}')
|
|
|
|
if [ -z "$dataset" ]; then
|
|
echo "ERROR: No ZFS dataset found for mountpoint: $target" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Extract pool name from dataset (first part before /)
|
|
actual_pool=$(echo "$dataset" | cut -d'/' -f1)
|
|
|
|
if [ "$actual_pool" != "${zpool}" ]; then
|
|
echo "ERROR: ZFS pool mismatch for $target" >&2
|
|
echo " Expected pool: ${zpool}" >&2
|
|
echo " Actual pool: $actual_pool" >&2
|
|
echo " Dataset: $dataset" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "$target is on $dataset (pool: $actual_pool)"
|
|
done
|
|
|
|
echo "All paths verified successfully on pool '${zpool}'"
|
|
''}
|
|
|
|
echo "Mount validation completed for ${serviceName} (pool: ${zpool})"
|
|
'';
|
|
}
|
|
))
|
|
];
|
|
};
|
|
};
|
|
|
|
systemd.services.${serviceName} = {
|
|
wants = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
after = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
requires = [
|
|
"${serviceName}-mounts.service"
|
|
];
|
|
};
|
|
|
|
# assert that the pool is even enabled
|
|
#assertions = lib.optionals (zpool != "") [
|
|
# {
|
|
# assertion = builtins.elem zpool config.boot.zfs.extraPools;
|
|
# message = "${zpool} is not enabled in `boot.zfs.extraPools`";
|
|
# }
|
|
#];
|
|
};
|
|
|
|
serviceFilePerms =
|
|
serviceName: tmpfilesRules:
|
|
{ pkgs, ... }:
|
|
let
|
|
confFile = pkgs.writeText "${serviceName}-file-perms.conf" (
|
|
lib.concatStringsSep "\n" tmpfilesRules
|
|
);
|
|
in
|
|
{
|
|
systemd.services."${serviceName}-file-perms" = {
|
|
after = [ "${serviceName}-mounts.service" ];
|
|
before = [ "${serviceName}.service" ];
|
|
serviceConfig = {
|
|
Type = "oneshot";
|
|
RemainAfterExit = true;
|
|
ExecStart = "${pkgs.systemd}/bin/systemd-tmpfiles --create ${confFile}";
|
|
};
|
|
};
|
|
|
|
systemd.services.${serviceName} = {
|
|
wants = [ "${serviceName}-file-perms.service" ];
|
|
after = [ "${serviceName}-file-perms.service" ];
|
|
};
|
|
};
|
|
# Creates a Caddy virtualHost with reverse_proxy to a local or VPN-namespaced port.
|
|
# Use `subdomain` for "<name>.${domain}" or `domain` for a full custom domain.
|
|
# Exactly one of `subdomain` or `domain` must be provided.
|
|
mkCaddyReverseProxy =
|
|
{
|
|
subdomain ? null,
|
|
domain ? null,
|
|
port,
|
|
auth ? false,
|
|
vpn ? false,
|
|
}:
|
|
assert (subdomain != null) != (domain != null);
|
|
{ config, ... }:
|
|
let
|
|
vhostDomain = if domain != null then domain else "${subdomain}.${site_config.domain}";
|
|
upstream =
|
|
if vpn then
|
|
"${config.vpnNamespaces.wg.namespaceAddress}:${builtins.toString port}"
|
|
else
|
|
":${builtins.toString port}";
|
|
in
|
|
{
|
|
services.caddy.virtualHosts."${vhostDomain}".extraConfig = lib.concatStringsSep "\n" (
|
|
lib.optional auth "import ${config.age.secrets.caddy_auth.path}" ++ [ "reverse_proxy ${upstream}" ]
|
|
);
|
|
};
|
|
|
|
# Creates a fail2ban jail with systemd journal backend.
|
|
# Covers the common pattern: journal-based detection, http/https ports, default thresholds.
|
|
mkFail2banJail =
|
|
{
|
|
name,
|
|
unitName ? "${name}.service",
|
|
failregex,
|
|
}:
|
|
{ ... }:
|
|
{
|
|
services.fail2ban.jails.${name} = {
|
|
enabled = true;
|
|
settings = {
|
|
backend = "systemd";
|
|
port = "http,https";
|
|
# defaults: maxretry=5, findtime=10m, bantime=10m
|
|
};
|
|
filter.Definition = {
|
|
inherit failregex;
|
|
ignoreregex = "";
|
|
journalmatch = "_SYSTEMD_UNIT=${unitName}";
|
|
};
|
|
};
|
|
};
|
|
|
|
# Creates a hardened Grafana annotation daemon service.
|
|
# Provides DynamicUser, sandboxing, state directory, and GRAFANA_URL/STATE_FILE automatically.
|
|
mkGrafanaAnnotationService =
|
|
{
|
|
name,
|
|
description,
|
|
script,
|
|
after ? [ ],
|
|
environment ? { },
|
|
loadCredential ? null,
|
|
}:
|
|
{
|
|
systemd.services."${name}-annotations" = {
|
|
inherit description;
|
|
after = [
|
|
"network.target"
|
|
"grafana.service"
|
|
]
|
|
++ after;
|
|
wantedBy = [ "multi-user.target" ];
|
|
serviceConfig = {
|
|
ExecStart = "${pkgs.python3}/bin/python3 ${script}";
|
|
Restart = "always";
|
|
RestartSec = "10s";
|
|
DynamicUser = true;
|
|
StateDirectory = "${name}-annotations";
|
|
NoNewPrivileges = true;
|
|
ProtectSystem = "strict";
|
|
ProtectHome = true;
|
|
PrivateTmp = true;
|
|
RestrictAddressFamilies = [
|
|
"AF_INET"
|
|
"AF_INET6"
|
|
];
|
|
MemoryDenyWriteExecute = true;
|
|
}
|
|
// lib.optionalAttrs (loadCredential != null) {
|
|
LoadCredential = loadCredential;
|
|
};
|
|
environment = {
|
|
GRAFANA_URL = "http://127.0.0.1:${toString service_configs.ports.private.grafana.port}";
|
|
STATE_FILE = "/var/lib/${name}-annotations/state.json";
|
|
}
|
|
// environment;
|
|
};
|
|
};
|
|
|
|
# Shell command to extract an API key from an *arr config.xml file.
|
|
# Returns a string suitable for $() command substitution in shell scripts.
|
|
extractArrApiKey =
|
|
configXmlPath: "${lib.getExe pkgs.gnugrep} -oP '(?<=<ApiKey>)[^<]+' ${configXmlPath}";
|
|
}
|
|
)
|