zfs: tune hdds pool
This commit is contained in:
@@ -10,6 +10,21 @@
|
||||
|
||||
boot.kernelParams = [
|
||||
"zfs.zfs_txg_timeout=120" # longer TXG open time = larger sequential writes
|
||||
|
||||
# vdev I/O scheduler: feed more concurrent reads to the block scheduler so
|
||||
# mq-deadline has a larger pool of requests to sort and merge into elevator sweeps.
|
||||
# Default async_read_max is 3 — far too few for effective coalescence.
|
||||
# 32 was empirically optimal (64 overwhelmed the drives, 3 gave near-zero merges).
|
||||
"zfs.zfs_vdev_async_read_max_active=32"
|
||||
"zfs.zfs_vdev_async_read_min_active=4"
|
||||
|
||||
# Merge reads within 128 KiB of each other (default 32 KiB). On HDDs, reading a
|
||||
# 128 KiB gap is far cheaper than a mechanical seek (~8 ms).
|
||||
"zfs.zfs_vdev_read_gap_limit=131072"
|
||||
|
||||
# Allow ZFS to aggregate I/Os up to 4 MiB (default 1 MiB), matching the
|
||||
# libtorrent piece extent size for larger sequential disk operations.
|
||||
"zfs.zfs_vdev_aggregation_limit=4194304"
|
||||
];
|
||||
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
|
||||
Reference in New Issue
Block a user