3 # This file and its contents are supplied under the terms of the
4 # Common Development and Distribution License ("CDDL"), version 1.0.
5 # You may only use this file in accordance with the terms of version
8 # A full copy of the text of the CDDL should have accompanied this
9 # source. A copy of the CDDL is also available via the Internet at
10 # http://www.illumos.org/license/CDDL.
14 # Copyright 2019 Joyent, Inc.
17 . $STF_SUITE/include/libtest.shlib
18 . $STF_SUITE/tests/functional/refreserv/refreserv.cfg
22 # raidz refreservation=auto picks worst raidz vdev
25 # 1. Create a pool with a single raidz vdev
26 # 2. For each block size [512b, 1k, 128k] or [4k, 8k, 128k]
28 # - remember its refreservation
29 # - destroy the volume
31 # 4. Recreate the pool with one more disk in the vdev, then repeat steps
35 # 1. This test will use up to 14 disks but can cover the key concepts with
37 # 2. If the disks are a mixture of 4Kn and 512n/512e, failures are likely.
40 verify_runnable "global"
42 typeset -a alldisks=($DISKS)
44 # The larger the volsize, the better zvol_volsize_to_reservation() is at
45 # guessing the right number - though it is horrible with tiny blocks. At 10M on
46 # ashift=12, the estimate may be over 26% too high.
51 default_cleanup_noexit
52 default_setup_noexit "${alldisks[0]}"
55 log_assert "raidz refreservation=auto picks worst raidz vdev"
58 poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
60 # Testing tiny block sizes on ashift=12 pools causes so much size inflation
61 # that small test disks may fill before creating small volumes. However,
62 # testing 512b and 1K blocks on ashift=9 pools is an ok approximation for
63 # testing the problems that arise from 4K and 8K blocks on ashift=12 pools.
65 bps=$(diskinfo -v ${alldisks[0]} | awk '/sectorsize/ { print $1 }')
67 bps=$(lsblk -nrdo min-io /dev/${alldisks[0]})
77 log_fail "bytes/sector: $bps != (512|4096)"
80 log_note "Testing in ashift=${allshifts[0]} mode"
85 # Determine the refreservation for a $volsize MiB volume on each raidz type at
86 # various block sizes.
88 for parity in 1 2 3; do
90 typeset -A sizes["$raid"]
92 # Ensure we hit scenarios with and without skip blocks
93 for ndisks in $((parity * 2)) $((parity * 2 + 1)); do
94 typeset -a disks=(${alldisks[0..$((ndisks - 1))]})
96 if (( ${#disks[@]} < ndisks )); then
97 log_note "Too few disks to test $raid-$ndisks"
101 typeset -A sizes["$raid"]["$ndisks"]
103 log_must zpool create "$TESTPOOL" "$raid" "${disks[@]}"
105 for bits in "${allshifts[@]}"; do
107 log_note "Gathering refreservation for $raid-$ndisks" \
110 vol=$TESTPOOL/$TESTVOL
111 log_must zfs create -V ${volsize}m \
112 -o volblocksize=$vbs "$vol"
114 refres=$(zfs get -Hpo value refreservation "$vol")
115 log_must test -n "$refres"
116 sizes["$raid"]["$ndisks"]["$vbs"]=$refres
118 log_must_busy zfs destroy "$vol"
121 log_must_busy zpool destroy "$TESTPOOL"
125 # A little extra info is always helpful when diagnosing problems. To
126 # pretty-print what you find in the log, do this in ksh:
127 # typeset -A sizes=(...)
129 log_note "sizes=$(print -C sizes)"
132 # Helper function for checking that refreservation is calculated properly in
133 # multi-vdev pools. "Properly" is defined as assuming that all vdevs are as
134 # space inefficient as the worst one.
136 function check_vdevs {
140 typeset -a disks1 disks2
141 typeset vbs vol refres refres1 refres2 expect
143 disks1=(${alldisks[0..$((nd1 - 1))]})
144 disks2=(${alldisks[$nd1..$((nd1 + nd2 - 1))]})
145 if (( ${#disks2[@]} < nd2 )); then
146 log_note "Too few disks to test $raid-$nd1 + $raid=$nd2"
150 log_must zpool create -f "$TESTPOOL" \
151 "$raid" "${disks1[@]}" "$raid" "${disks2[@]}"
153 for bits in "${allshifts[@]}"; do
155 log_note "Verifying $raid-$nd1 $raid-$nd2 volblocksize=$vbs"
157 vol=$TESTPOOL/$TESTVOL
158 log_must zfs create -V ${volsize}m -o volblocksize=$vbs "$vol"
159 refres=$(zfs get -Hpo value refreservation "$vol")
160 log_must test -n "$refres"
162 refres1=${sizes["$raid"]["$nd1"]["$vbs"]}
163 refres2=${sizes["$raid"]["$nd2"]["$vbs"]}
165 if (( refres1 > refres2 )); then
166 log_note "Expecting refres ($refres) to match refres" \
167 "from $raid-$nd1 ($refres1)"
168 log_must test "$refres" -eq "$refres1"
170 log_note "Expecting refres ($refres) to match refres" \
171 "from $raid-$nd1 ($refres2)"
172 log_must test "$refres" -eq "$refres2"
175 log_must zfs destroy "$vol"
178 log_must zpool destroy "$TESTPOOL"
182 # Verify that multi-vdev pools use the last optimistic size for all the
183 # permutations within a particular raidz variant.
185 for raid in "${!sizes[@]}"; do
186 # ksh likes to create a [0] item for us. Thanks, ksh!
187 [[ $raid == "0" ]] && continue
189 for nd1 in "${!sizes["$raid"][@]}"; do
190 # And with an empty array we get one key, ''. Thanks, ksh!
191 [[ $nd1 == "0" || -z "$nd1" ]] && continue
193 for nd2 in "${!sizes["$raid"][@]}"; do
194 [[ $nd2 == "0" || -z "$nd2" ]] && continue
196 check_vdevs "$raid" "$nd1" "$nd2"
201 log_pass "raidz refreservation=auto picks worst raidz vdev"