zfs: tune hdds pool
This commit is contained in:
@@ -21,4 +21,15 @@
|
||||
|
||||
hardware.cpu.amd.updateMicrocode = true;
|
||||
hardware.enableRedistributableFirmware = true;
|
||||
|
||||
# HDD I/O tuning for torrent seeding workload (high-concurrency random reads).
|
||||
# mq-deadline sorts requests into elevator sweeps, reducing seek distance.
|
||||
# Aggressive deadlines (15s) let the scheduler accumulate more ops before dispatching,
|
||||
# maximizing coalescence — latency is irrelevant since torrent peers tolerate 30-60s.
|
||||
# fifo_batch=128 keeps sweeps long; writes_starved=16 heavily favors reads.
|
||||
# 4 MiB readahead matches libtorrent piece extent affinity for sequential prefetch.
|
||||
services.udev.extraRules = ''
|
||||
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="1", ATTR{queue/scheduler}="mq-deadline", ATTR{queue/read_ahead_kb}="4096", ATTR{queue/nr_requests}="512"
|
||||
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="1", ENV{ID_ATA_ROTATION_RATE_RPM}!="0", RUN+="${pkgs.bash}/bin/bash -c 'echo 15000 > /sys$devpath/queue/iosched/read_expire; echo 15000 > /sys$devpath/queue/iosched/write_expire; echo 128 > /sys$devpath/queue/iosched/fifo_batch; echo 16 > /sys$devpath/queue/iosched/writes_starved; echo 4096 > /sys$devpath/queue/max_sectors_kb 2>/dev/null || true'"
|
||||
'';
|
||||
}
|
||||
|
||||
@@ -10,6 +10,21 @@
|
||||
|
||||
boot.kernelParams = [
|
||||
"zfs.zfs_txg_timeout=120" # longer TXG open time = larger sequential writes
|
||||
|
||||
# vdev I/O scheduler: feed more concurrent reads to the block scheduler so
|
||||
# mq-deadline has a larger pool of requests to sort and merge into elevator sweeps.
|
||||
# Default async_read_max is 3 — far too few for effective coalescence.
|
||||
# 32 was empirically optimal (64 overwhelmed the drives, 3 gave near-zero merges).
|
||||
"zfs.zfs_vdev_async_read_max_active=32"
|
||||
"zfs.zfs_vdev_async_read_min_active=4"
|
||||
|
||||
# Merge reads within 128 KiB of each other (default 32 KiB). On HDDs, reading a
|
||||
# 128 KiB gap is far cheaper than a mechanical seek (~8 ms).
|
||||
"zfs.zfs_vdev_read_gap_limit=131072"
|
||||
|
||||
# Allow ZFS to aggregate I/Os up to 4 MiB (default 1 MiB), matching the
|
||||
# libtorrent piece extent size for larger sequential disk operations.
|
||||
"zfs.zfs_vdev_aggregation_limit=4194304"
|
||||
];
|
||||
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
|
||||
Reference in New Issue
Block a user