zfs: tune hdds pool

This commit is contained in:
2026-03-28 01:21:48 -07:00
parent 84898a1ee7
commit 2409d1b01b
2 changed files with 26 additions and 0 deletions

View File

@@ -10,6 +10,21 @@
boot.kernelParams = [
"zfs.zfs_txg_timeout=120" # longer TXG open time = larger sequential writes
# vdev I/O scheduler: feed more concurrent reads to the block scheduler so
# mq-deadline has a larger pool of requests to sort and merge into elevator sweeps.
# Default async_read_max is 3 — far too few for effective coalescence.
# 32 was empirically optimal (64 overwhelmed the drives, 3 gave near-zero merges).
"zfs.zfs_vdev_async_read_max_active=32"
"zfs.zfs_vdev_async_read_min_active=4"
# Merge reads within 128 KiB of each other (default 32 KiB). On HDDs, reading a
# 128 KiB gap is far cheaper than a mechanical seek (~8 ms).
"zfs.zfs_vdev_read_gap_limit=131072"
# Allow ZFS to aggregate I/Os up to 4 MiB (default 1 MiB), matching the
# libtorrent piece extent size for larger sequential disk operations.
"zfs.zfs_vdev_aggregation_limit=4194304"
];
boot.supportedFilesystems = [ "zfs" ];