4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
24 # Copyright (c) 2012, 2020, Delphix. All rights reserved.
25 # Copyright (c) 2017, Tim Chase. All rights reserved.
26 # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
27 # Copyright (c) 2017, Lawrence Livermore National Security LLC.
28 # Copyright (c) 2017, Datto Inc. All rights reserved.
29 # Copyright (c) 2017, Open-E Inc. All rights reserved.
30 # Use is subject to license terms.
33 . ${STF_TOOLS}/include/logapi.shlib
34 . ${STF_SUITE}/include/math.shlib
35 . ${STF_SUITE}/include/blkdev.shlib
37 . ${STF_SUITE}/include/tunables.cfg
40 # Apply constrained path when available. This is required since the
41 # PATH may have been modified by sudo's secure_path behavior.
43 if [ -n "$STF_PATH" ]; then
48 # Generic dot version comparison function
50 # Returns success when version $1 is greater than or equal to $2.
52 function compare_version_gte
54 if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
61 # Linux kernel version comparison function
63 # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
65 # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
67 function linux_version
71 [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
73 typeset version=$(echo $ver | cut -d '.' -f 1)
74 typeset major=$(echo $ver | cut -d '.' -f 2)
75 typeset minor=$(echo $ver | cut -d '.' -f 3)
77 [[ -z "$version" ]] && version=0
78 [[ -z "$major" ]] && major=0
79 [[ -z "$minor" ]] && minor=0
81 echo $((version * 10000 + major * 100 + minor))
84 # Determine if this is a Linux test system
86 # Return 0 if platform Linux, 1 if otherwise
90 if [[ $(uname -o) == "GNU/Linux" ]]; then
97 # Determine if this is an illumos test system
99 # Return 0 if platform illumos, 1 if otherwise
102 if [[ $(uname -o) == "illumos" ]]; then
109 # Determine if this is a FreeBSD test system
111 # Return 0 if platform FreeBSD, 1 if otherwise
115 if [[ $(uname -o) == "FreeBSD" ]]; then
122 # Determine if this is a DilOS test system
124 # Return 0 if platform DilOS, 1 if otherwise
129 [[ -f /etc/os-release ]] && . /etc/os-release
130 if [[ $ID == "dilos" ]]; then
137 # Determine if this is a 32-bit system
139 # Return 0 if platform is 32-bit, 1 if otherwise
143 if [[ $(getconf LONG_BIT) == "32" ]]; then
150 # Determine if kmemleak is enabled
152 # Return 0 if kmemleak is enabled, 1 if otherwise
156 if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
163 # Determine whether a dataset is mounted
166 # $2 filesystem type; optional - defaulted to zfs
168 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
173 [[ -z $fstype ]] && fstype=zfs
174 typeset out dir name ret
178 if [[ "$1" == "/"* ]] ; then
179 for out in $(zfs mount | awk '{print $2}'); do
180 [[ $1 == $out ]] && return 0
183 for out in $(zfs mount | awk '{print $1}'); do
184 [[ $1 == $out ]] && return 0
190 mount -pt $fstype | while read dev dir _t _flags; do
191 [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
194 out=$(df -F $fstype $1 2>/dev/null)
196 (($ret != 0)) && return $ret
204 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
208 out=$(df -t $fstype $1 2>/dev/null)
212 if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
213 link=$(readlink -f $ZVOL_DEVDIR/$1)
214 [[ -n "$link" ]] && \
215 mount | grep -q "^$link" && \
224 # Return 0 if a dataset is mounted; 1 otherwise
227 # $2 filesystem type; optional - defaulted to zfs
232 (($? == 0)) && return 0
236 # Return 0 if a dataset is unmounted; 1 otherwise
239 # $2 filesystem type; optional - defaulted to zfs
244 (($? == 1)) && return 0
254 echo $1 | sed "s/,/ /g"
257 function default_setup
259 default_setup_noexit "$@"
264 function default_setup_no_mountpoint
266 default_setup_noexit "$1" "$2" "$3" "yes"
272 # Given a list of disks, setup storage pools and datasets.
274 function default_setup_noexit
279 typeset no_mountpoint=$4
280 log_note begin default_setup_noexit
282 if is_global_zone; then
283 if poolexists $TESTPOOL ; then
284 destroy_pool $TESTPOOL
286 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
287 log_must zpool create -f $TESTPOOL $disklist
292 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
293 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
295 log_must zfs create $TESTPOOL/$TESTFS
296 if [[ -z $no_mountpoint ]]; then
297 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
300 if [[ -n $container ]]; then
301 rm -rf $TESTDIR1 || \
302 log_unresolved Could not remove $TESTDIR1
303 mkdir -p $TESTDIR1 || \
304 log_unresolved Could not create $TESTDIR1
306 log_must zfs create $TESTPOOL/$TESTCTR
307 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
308 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
309 if [[ -z $no_mountpoint ]]; then
310 log_must zfs set mountpoint=$TESTDIR1 \
311 $TESTPOOL/$TESTCTR/$TESTFS1
315 if [[ -n $volume ]]; then
316 if is_global_zone ; then
317 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
320 log_must zfs create $TESTPOOL/$TESTVOL
326 # Given a list of disks, setup a storage pool, file system and
329 function default_container_setup
333 default_setup "$disklist" "true"
337 # Given a list of disks, setup a storage pool,file system
340 function default_volume_setup
344 default_setup "$disklist" "" "true"
348 # Given a list of disks, setup a storage pool,file system,
349 # a container and a volume.
351 function default_container_volume_setup
355 default_setup "$disklist" "true" "true"
359 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
362 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
363 # $2 snapshot name. Default, $TESTSNAP
365 function create_snapshot
367 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
368 typeset snap=${2:-$TESTSNAP}
370 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
371 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
373 if snapexists $fs_vol@$snap; then
374 log_fail "$fs_vol@$snap already exists."
376 datasetexists $fs_vol || \
377 log_fail "$fs_vol must exist."
379 log_must zfs snapshot $fs_vol@$snap
383 # Create a clone from a snapshot, default clone name is $TESTCLONE.
385 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
386 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
388 function create_clone # snapshot clone
390 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
391 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
394 log_fail "Snapshot name is undefined."
396 log_fail "Clone name is undefined."
398 log_must zfs clone $snap $clone
402 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
405 # $1 Existing filesystem or volume name. Default, $TESTFS
406 # $2 Existing snapshot name. Default, $TESTSNAP
407 # $3 bookmark name. Default, $TESTBKMARK
409 function create_bookmark
411 typeset fs_vol=${1:-$TESTFS}
412 typeset snap=${2:-$TESTSNAP}
413 typeset bkmark=${3:-$TESTBKMARK}
415 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
416 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
417 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
419 if bkmarkexists $fs_vol#$bkmark; then
420 log_fail "$fs_vol#$bkmark already exists."
422 datasetexists $fs_vol || \
423 log_fail "$fs_vol must exist."
424 snapexists $fs_vol@$snap || \
425 log_fail "$fs_vol@$snap must exist."
427 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
431 # Create a temporary clone result of an interrupted resumable 'zfs receive'
432 # $1 Destination filesystem name. Must not exist, will be created as the result
433 # of this function along with its %recv temporary clone
434 # $2 Source filesystem name. Must not exist, will be created and destroyed
436 function create_recv_clone
439 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
440 typeset snap="$sendfs@snap1"
441 typeset incr="$sendfs@snap2"
442 typeset mountpoint="$TESTDIR/create_recv_clone"
443 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
445 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
447 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
448 datasetexists $sendfs && log_fail "Send filesystem must not exist."
450 log_must zfs create -o mountpoint="$mountpoint" $sendfs
451 log_must zfs snapshot $snap
452 log_must eval "zfs send $snap | zfs recv -u $recvfs"
453 log_must mkfile 1m "$mountpoint/data"
454 log_must zfs snapshot $incr
455 log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
456 iflag=fullblock > $sendfile"
457 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
458 destroy_dataset "$sendfs" "-r"
459 log_must rm -f "$sendfile"
461 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
462 log_fail "Error creating temporary $recvfs/%recv clone"
466 function default_mirror_setup
468 default_mirror_setup_noexit $1 $2 $3
474 # Given a pair of disks, set up a storage pool and dataset for the mirror
475 # @parameters: $1 the primary side of the mirror
476 # $2 the secondary side of the mirror
477 # @uses: ZPOOL ZFS TESTPOOL TESTFS
478 function default_mirror_setup_noexit
480 readonly func="default_mirror_setup_noexit"
484 [[ -z $primary ]] && \
485 log_fail "$func: No parameters passed"
486 [[ -z $secondary ]] && \
487 log_fail "$func: No secondary partition passed"
488 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
489 log_must zpool create -f $TESTPOOL mirror $@
490 log_must zfs create $TESTPOOL/$TESTFS
491 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
495 # create a number of mirrors.
496 # We create a number($1) of 2 way mirrors using the pairs of disks named
497 # on the command line. These mirrors are *not* mounted
498 # @parameters: $1 the number of mirrors to create
499 # $... the devices to use to create the mirrors on
500 # @uses: ZPOOL ZFS TESTPOOL
501 function setup_mirrors
503 typeset -i nmirrors=$1
506 while ((nmirrors > 0)); do
507 log_must test -n "$1" -a -n "$2"
508 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
509 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
511 ((nmirrors = nmirrors - 1))
516 # create a number of raidz pools.
517 # We create a number($1) of 2 raidz pools using the pairs of disks named
518 # on the command line. These pools are *not* mounted
519 # @parameters: $1 the number of pools to create
520 # $... the devices to use to create the pools on
521 # @uses: ZPOOL ZFS TESTPOOL
522 function setup_raidzs
524 typeset -i nraidzs=$1
527 while ((nraidzs > 0)); do
528 log_must test -n "$1" -a -n "$2"
529 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
530 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
532 ((nraidzs = nraidzs - 1))
537 # Destroy the configured testpool mirrors.
538 # the mirrors are of the form ${TESTPOOL}{number}
539 # @uses: ZPOOL ZFS TESTPOOL
540 function destroy_mirrors
542 default_cleanup_noexit
548 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
549 # $1 the list of disks
551 function default_raidz_setup
553 typeset disklist="$*"
554 disks=(${disklist[*]})
556 if [[ ${#disks[*]} -lt 2 ]]; then
557 log_fail "A raid-z requires a minimum of two disks."
560 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
561 log_must zpool create -f $TESTPOOL raidz $disklist
562 log_must zfs create $TESTPOOL/$TESTFS
563 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
569 # Common function used to cleanup storage pools and datasets.
571 # Invoked at the start of the test suite to ensure the system
572 # is in a known state, and also at the end of each set of
573 # sub-tests to ensure errors from one set of tests doesn't
574 # impact the execution of the next set.
576 function default_cleanup
578 default_cleanup_noexit
584 # Utility function used to list all available pool names.
586 # NOTE: $KEEP is a variable containing pool names, separated by a newline
587 # character, that must be excluded from the returned list.
589 function get_all_pools
591 zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
594 function default_cleanup_noexit
598 # Destroying the pool will also destroy any
599 # filesystems it contains.
601 if is_global_zone; then
602 zfs unmount -a > /dev/null 2>&1
603 ALL_POOLS=$(get_all_pools)
604 # Here, we loop through the pools we're allowed to
605 # destroy, only destroying them if it's safe to do
607 while [ ! -z ${ALL_POOLS} ]
609 for pool in ${ALL_POOLS}
611 if safe_to_destroy_pool $pool ;
616 ALL_POOLS=$(get_all_pools)
622 for fs in $(zfs list -H -o name \
623 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
624 destroy_dataset "$fs" "-Rf"
627 # Need cleanup here to avoid garbage dir left.
628 for fs in $(zfs list -H -o name); do
629 [[ $fs == /$ZONE_POOL ]] && continue
630 [[ -d $fs ]] && log_must rm -rf $fs/*
634 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
637 for fs in $(zfs list -H -o name); do
638 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
639 log_must zfs set reservation=none $fs
640 log_must zfs set recordsize=128K $fs
641 log_must zfs set mountpoint=/$fs $fs
643 enc=$(get_prop encryption $fs)
644 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
645 [[ "$enc" == "off" ]]; then
646 log_must zfs set checksum=on $fs
648 log_must zfs set compression=off $fs
649 log_must zfs set atime=on $fs
650 log_must zfs set devices=off $fs
651 log_must zfs set exec=on $fs
652 log_must zfs set setuid=on $fs
653 log_must zfs set readonly=off $fs
654 log_must zfs set snapdir=hidden $fs
655 log_must zfs set aclmode=groupmask $fs
656 log_must zfs set aclinherit=secure $fs
661 [[ -d $TESTDIR ]] && \
662 log_must rm -rf $TESTDIR
665 if is_mpath_device $disk1; then
669 rm -f $TEST_BASE_DIR/{err,out}
674 # Common function used to cleanup storage pools, file systems
677 function default_container_cleanup
679 if ! is_global_zone; then
683 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
685 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
687 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
688 destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
690 [[ -e $TESTDIR1 ]] && \
691 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
697 # Common function used to cleanup snapshot of file system or volume. Default to
698 # delete the file system's snapshot
702 function destroy_snapshot
704 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
706 if ! snapexists $snap; then
707 log_fail "'$snap' does not exist."
711 # For the sake of the value which come from 'get_prop' is not equal
712 # to the really mountpoint when the snapshot is unmounted. So, firstly
713 # check and make sure this snapshot's been mounted in current system.
716 if ismounted $snap; then
717 mtpt=$(get_prop mountpoint $snap)
719 log_fail "get_prop mountpoint $snap failed."
722 destroy_dataset "$snap"
723 [[ $mtpt != "" && -d $mtpt ]] && \
724 log_must rm -rf $mtpt
728 # Common function used to cleanup clone.
732 function destroy_clone
734 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
736 if ! datasetexists $clone; then
737 log_fail "'$clone' does not existed."
740 # With the same reason in destroy_snapshot
742 if ismounted $clone; then
743 mtpt=$(get_prop mountpoint $clone)
745 log_fail "get_prop mountpoint $clone failed."
748 destroy_dataset "$clone"
749 [[ $mtpt != "" && -d $mtpt ]] && \
750 log_must rm -rf $mtpt
754 # Common function used to cleanup bookmark of file system or volume. Default
755 # to delete the file system's bookmark.
759 function destroy_bookmark
761 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
763 if ! bkmarkexists $bkmark; then
764 log_fail "'$bkmarkp' does not existed."
767 destroy_dataset "$bkmark"
770 # Return 0 if a snapshot exists; $? otherwise
776 zfs list -H -t snapshot "$1" > /dev/null 2>&1
781 # Return 0 if a bookmark exists; $? otherwise
785 function bkmarkexists
787 zfs list -H -t bookmark "$1" > /dev/null 2>&1
792 # Return 0 if a hold exists; $? otherwise
799 zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
804 # Set a property to a certain value on a dataset.
805 # Sets a property of the dataset to the value as passed in.
807 # $1 dataset who's property is being set
809 # $3 value to set property to
811 # 0 if the property could be set.
812 # non-zero otherwise.
815 function dataset_setprop
817 typeset fn=dataset_setprop
820 log_note "$fn: Insufficient parameters (need 3, had $#)"
824 output=$(zfs set $2=$3 $1 2>&1)
827 log_note "Setting property on $1 failed."
828 log_note "property $2=$3"
829 log_note "Return Code: $rv"
830 log_note "Output: $output"
837 # Assign suite defined dataset properties.
838 # This function is used to apply the suite's defined default set of
839 # properties to a dataset.
840 # @parameters: $1 dataset to use
841 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
843 # 0 if the dataset has been altered.
844 # 1 if no pool name was passed in.
845 # 2 if the dataset could not be found.
846 # 3 if the dataset could not have it's properties set.
848 function dataset_set_defaultproperties
852 [[ -z $dataset ]] && return 1
856 for confset in $(zfs list); do
857 if [[ $dataset = $confset ]]; then
862 [[ $found -eq 0 ]] && return 2
863 if [[ -n $COMPRESSION_PROP ]]; then
864 dataset_setprop $dataset compression $COMPRESSION_PROP || \
866 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
868 if [[ -n $CHECKSUM_PROP ]]; then
869 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
871 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
877 # Check a numeric assertion
878 # @parameter: $@ the assertion to check
879 # @output: big loud notice if assertion failed
884 (($@)) || log_fail "$@"
888 # Function to format partition size of a disk
889 # Given a disk cxtxdx reduces all partitions
892 function zero_partitions #<whole_disk_name>
898 gpart destroy -F $diskname
900 DSK=$DEV_DSKDIR/$diskname
901 DSK=$(echo $DSK | sed -e "s|//|/|g")
902 log_must parted $DSK -s -- mklabel gpt
903 blockdev --rereadpt $DSK 2>/dev/null
906 for i in 0 1 3 4 5 6 7
908 log_must set_partition $i "" 0mb $diskname
916 # Given a slice, size and disk, this function
917 # formats the slice to the specified size.
918 # Size should be specified with units as per
919 # the `format` command requirements eg. 100mb 3gb
921 # NOTE: This entire interface is problematic for the Linux parted utility
922 # which requires the end of the partition to be specified. It would be
923 # best to retire this interface and replace it with something more flexible.
924 # At the moment a best effort is made.
926 # arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
927 function set_partition
929 typeset -i slicenum=$1
932 typeset disk=${4#$DEV_DSKDIR/}
933 disk=${disk#$DEV_RDSKDIR/}
937 if [[ -z $size || -z $disk ]]; then
938 log_fail "The size or disk name is unspecified."
940 disk=$DEV_DSKDIR/$disk
941 typeset size_mb=${size%%[mMgG]}
943 size_mb=${size_mb%%[mMgG][bB]}
944 if [[ ${size:1:1} == 'g' ]]; then
945 ((size_mb = size_mb * 1024))
948 # Create GPT partition table when setting slice 0 or
949 # when the device doesn't already contain a GPT label.
950 parted $disk -s -- print 1 >/dev/null
952 if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
953 parted $disk -s -- mklabel gpt
954 if [[ $? -ne 0 ]]; then
955 log_note "Failed to create GPT partition table on $disk"
960 # When no start is given align on the first cylinder.
961 if [[ -z "$start" ]]; then
965 # Determine the cylinder size for the device and using
966 # that calculate the end offset in cylinders.
967 typeset -i cly_size_kb=0
968 cly_size_kb=$(parted -m $disk -s -- \
969 unit cyl print | head -3 | tail -1 | \
970 awk -F '[:k.]' '{print $4}')
971 ((end = (size_mb * 1024 / cly_size_kb) + start))
974 mkpart part$slicenum ${start}cyl ${end}cyl
976 if [[ $ret_val -ne 0 ]]; then
977 log_note "Failed to create partition $slicenum on $disk"
981 blockdev --rereadpt $disk 2>/dev/null
982 block_device_wait $disk
985 if [[ -z $size || -z $disk ]]; then
986 log_fail "The size or disk name is unspecified."
988 disk=$DEV_DSKDIR/$disk
990 if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
991 gpart destroy -F $disk >/dev/null 2>&1
992 gpart create -s GPT $disk
993 if [[ $? -ne 0 ]]; then
994 log_note "Failed to create GPT partition table on $disk"
999 typeset index=$((slicenum + 1))
1001 if [[ -n $start ]]; then
1004 gpart add -t freebsd-zfs $start -s $size -i $index $disk
1005 if [[ $ret_val -ne 0 ]]; then
1006 log_note "Failed to create partition $slicenum on $disk"
1010 block_device_wait $disk
1013 if [[ -z $slicenum || -z $size || -z $disk ]]; then
1014 log_fail "The slice, size or disk name is unspecified."
1017 typeset format_file=/var/tmp/format_in.$$
1019 echo "partition" >$format_file
1020 echo "$slicenum" >> $format_file
1021 echo "" >> $format_file
1022 echo "" >> $format_file
1023 echo "$start" >> $format_file
1024 echo "$size" >> $format_file
1025 echo "label" >> $format_file
1026 echo "" >> $format_file
1027 echo "q" >> $format_file
1028 echo "q" >> $format_file
1030 format -e -s -d $disk -f $format_file
1036 if [[ $ret_val -ne 0 ]]; then
1037 log_note "Unable to format $disk slice $slicenum to $size"
1044 # Delete all partitions on all disks - this is specifically for the use of multipath
1045 # devices which currently can only be used in the test suite as raw/un-partitioned
1046 # devices (ie a zpool cannot be created on a whole mpath device that has partitions)
1048 function delete_partitions
1052 if [[ -z $DISKSARRAY ]]; then
1058 for disk in $DISKSARRAY; do
1059 for (( part = 1; part < MAX_PARTITIONS; part++ )); do
1060 typeset partition=${disk}${SLICE_PREFIX}${part}
1061 parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
1062 if lsblk | grep -qF ${partition}; then
1063 log_fail "Partition ${partition} not deleted"
1065 log_note "Partition ${partition} deleted"
1069 elif is_freebsd; then
1070 for disk in $DISKSARRAY; do
1071 if gpart destroy -F $disk; then
1072 log_note "Partitions for ${disk} deleted"
1074 log_fail "Partitions for ${disk} not deleted"
1081 # Get the end cyl of the given slice
1083 function get_endslice #<disk> <slice>
1087 if [[ -z $disk || -z $slice ]] ; then
1088 log_fail "The disk name or slice number is unspecified."
1093 endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
1094 grep "part${slice}" | \
1095 awk '{print $3}' | \
1097 ((endcyl = (endcyl + 1)))
1100 disk=${disk#/dev/zvol/}
1102 slice=$((slice + 1))
1103 endcyl=$(gpart show $disk | \
1104 awk -v slice=$slice '$3 == slice { print $1 + $2 }')
1107 disk=${disk#/dev/dsk/}
1108 disk=${disk#/dev/rdsk/}
1112 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
1113 grep "sectors\/cylinder" | \
1116 if ((ratio == 0)); then
1120 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
1121 nawk -v token="$slice" '{if ($1==token) print $6}')
1123 ((endcyl = (endcyl + 1) / ratio))
1132 # Given a size,disk and total slice number, this function formats the
1133 # disk slices from 0 to the total slice number with the same specified
1136 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
1139 typeset slice_size=$1
1140 typeset disk_name=$2
1141 typeset total_slices=$3
1144 zero_partitions $disk_name
1145 while ((i < $total_slices)); do
1152 log_must set_partition $i "$cyl" $slice_size $disk_name
1153 cyl=$(get_endslice $disk_name $i)
1159 # This function continues to write to a filenum number of files into dirnum
1160 # number of directories until either file_write returns an error or the
1161 # maximum number of files per directory have been written.
1164 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
1166 # Return value: 0 on success
1170 # destdir: is the directory where everything is to be created under
1171 # dirnum: the maximum number of subdirectories to use, -1 no limit
1172 # filenum: the maximum number of files per subdirectory
1173 # bytes: number of bytes to write
1174 # num_writes: number of types to write out bytes
1175 # data: the data that will be written
1178 # fill_fs /testdir 20 25 1024 256 0
1180 # Note: bytes * num_writes equals the size of the testfile
1182 function fill_fs # destdir dirnum filenum bytes num_writes data
1184 typeset destdir=${1:-$TESTDIR}
1185 typeset -i dirnum=${2:-50}
1186 typeset -i filenum=${3:-50}
1187 typeset -i bytes=${4:-8192}
1188 typeset -i num_writes=${5:-10240}
1189 typeset data=${6:-0}
1191 mkdir -p $destdir/{1..$dirnum}
1192 for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
1193 file_write -o create -f $f -b $bytes -c $num_writes -d $data \
1200 # Simple function to get the specified property. If unable to
1201 # get the property then exits.
1203 # Note property is in 'parsable' format (-p)
1205 function get_prop # property dataset
1211 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
1212 if [[ $? -ne 0 ]]; then
1213 log_note "Unable to get $prop property for dataset " \
1223 # Simple function to get the specified property of pool. If unable to
1224 # get the property then exits.
1226 # Note property is in 'parsable' format (-p)
1228 function get_pool_prop # property pool
1234 if poolexists $pool ; then
1235 prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
1237 if [[ $? -ne 0 ]]; then
1238 log_note "Unable to get $prop property for pool " \
1243 log_note "Pool $pool not exists."
1251 # Return 0 if a pool exists; $? otherwise
1259 if [[ -z $pool ]]; then
1260 log_note "No pool name given."
1264 zpool get name "$pool" > /dev/null 2>&1
1268 # Return 0 if all the specified datasets exist; $? otherwise
1271 function datasetexists
1273 if (($# == 0)); then
1274 log_note "No dataset name given."
1278 while (($# > 0)); do
1279 zfs get name $1 > /dev/null 2>&1 || \
1287 # return 0 if none of the specified datasets exists, otherwise return 1.
1290 function datasetnonexists
1292 if (($# == 0)); then
1293 log_note "No dataset name given."
1297 while (($# > 0)); do
1298 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
1306 function is_shared_freebsd
1310 pgrep -q mountd && showmount -E | grep -qx $fs
1313 function is_shared_illumos
1318 for mtpt in `share | awk '{print $2}'` ; do
1319 if [[ $mtpt == $fs ]] ; then
1324 typeset stat=$(svcs -H -o STA nfs/server:default)
1325 if [[ $stat != "ON" ]]; then
1326 log_note "Current nfs/server status: $stat"
1332 function is_shared_linux
1337 for mtpt in `share | awk '{print $1}'` ; do
1338 if [[ $mtpt == $fs ]] ; then
1346 # Given a mountpoint, or a dataset name, determine if it is shared via NFS.
1348 # Returns 0 if shared, 1 otherwise.
1355 if [[ $fs != "/"* ]] ; then
1356 if datasetnonexists "$fs" ; then
1359 mtpt=$(get_prop mountpoint "$fs")
1361 none|legacy|-) return 1
1370 FreeBSD) is_shared_freebsd "$fs" ;;
1371 Linux) is_shared_linux "$fs" ;;
1372 *) is_shared_illumos "$fs" ;;
1376 function is_exported_illumos
1381 for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
1382 if [[ $mtpt == $fs ]] ; then
1390 function is_exported_freebsd
1395 for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
1396 if [[ $mtpt == $fs ]] ; then
1404 function is_exported_linux
1409 for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
1410 if [[ $mtpt == $fs ]] ; then
1419 # Given a mountpoint, or a dataset name, determine if it is exported via
1420 # the os-specific NFS exports file.
1422 # Returns 0 if exported, 1 otherwise.
1424 function is_exported
1429 if [[ $fs != "/"* ]] ; then
1430 if datasetnonexists "$fs" ; then
1433 mtpt=$(get_prop mountpoint "$fs")
1435 none|legacy|-) return 1
1444 FreeBSD) is_exported_freebsd "$fs" ;;
1445 Linux) is_exported_linux "$fs" ;;
1446 *) is_exported_illumos "$fs" ;;
1451 # Given a dataset name determine if it is shared via SMB.
1453 # Returns 0 if shared, 1 otherwise.
1455 function is_shared_smb
1460 if datasetnonexists "$fs" ; then
1463 fs=$(echo $fs | sed 's@/@_@g')
1467 for mtpt in `net usershare list | awk '{print $1}'` ; do
1468 if [[ $mtpt == $fs ]] ; then
1474 log_note "Currently unsupported by the test framework"
1480 # Given a mountpoint, determine if it is not shared via NFS.
1482 # Returns 0 if not shared, 1 otherwise.
1489 if (($? == 0)); then
1497 # Given a dataset determine if it is not shared via SMB.
1499 # Returns 0 if not shared, 1 otherwise.
1501 function not_shared_smb
1506 if (($? == 0)); then
1514 # Helper function to unshare a mountpoint.
1516 function unshare_fs #fs
1520 is_shared $fs || is_shared_smb $fs
1521 if (($? == 0)); then
1522 zfs unshare $fs || log_fail "zfs unshare $fs failed"
1529 # Helper function to share a NFS mountpoint.
1531 function share_nfs #fs
1537 if (($? != 0)); then
1538 log_must share "*:$fs"
1542 if (($? != 0)); then
1543 log_must share -F nfs $fs
1551 # Helper function to unshare a NFS mountpoint.
1553 function unshare_nfs #fs
1559 if (($? == 0)); then
1560 log_must unshare -u "*:$fs"
1564 if (($? == 0)); then
1565 log_must unshare -F nfs $fs
1573 # Helper function to show NFS shares.
1575 function showshares_nfs
1587 # Helper function to show SMB shares.
1589 function showshares_smb
1604 elif is_freebsd; then
1607 log_unsupported "Unknown platform"
1610 if [[ $? -ne 0 ]]; then
1611 log_unsupported "The NFS utilities are not installed"
1616 # Check NFS server status and trigger it online.
1618 function setup_nfs_server
1620 # Cannot share directory in non-global zone.
1622 if ! is_global_zone; then
1623 log_note "Cannot trigger NFS server by sharing in LZ."
1629 # Re-synchronize /var/lib/nfs/etab with /etc/exports and
1630 # /etc/exports.d./* to provide a clean test environment.
1634 log_note "NFS server must be started prior to running ZTS."
1636 elif is_freebsd; then
1637 kill -s HUP $(cat /var/run/mountd.pid)
1639 log_note "NFS server must be started prior to running ZTS."
1643 typeset nfs_fmri="svc:/network/nfs/server:default"
1644 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1646 # Only really sharing operation can enable NFS server
1647 # to online permanently.
1649 typeset dummy=/tmp/dummy
1651 if [[ -d $dummy ]]; then
1652 log_must rm -rf $dummy
1655 log_must mkdir $dummy
1656 log_must share $dummy
1659 # Waiting for fmri's status to be the final status.
1660 # Otherwise, in transition, an asterisk (*) is appended for
1661 # instances, unshare will reverse status to 'DIS' again.
1663 # Waiting for 1's at least.
1667 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1674 log_must unshare $dummy
1675 log_must rm -rf $dummy
1678 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1682 # To verify whether calling process is in global zone
1684 # Return 0 if in global zone, 1 in non-global zone
1686 function is_global_zone
1688 if is_linux || is_freebsd; then
1691 typeset cur_zone=$(zonename 2>/dev/null)
1692 if [[ $cur_zone != "global" ]]; then
1700 # Verify whether test is permitted to run from
1701 # global zone, local zone, or both
1703 # $1 zone limit, could be "global", "local", or "both"(no limit)
1705 # Return 0 if permitted, otherwise exit with log_unsupported
1707 function verify_runnable # zone limit
1711 [[ -z $limit ]] && return 0
1713 if is_global_zone ; then
1717 local) log_unsupported "Test is unable to run from "\
1720 *) log_note "Warning: unknown limit $limit - " \
1728 global) log_unsupported "Test is unable to run from "\
1731 *) log_note "Warning: unknown limit $limit - " \
1742 # Return 0 if create successfully or the pool exists; $? otherwise
1743 # Note: In local zones, this function should return 0 silently.
1746 # $2-n - [keyword] devs_list
1748 function create_pool #pool devs_list
1750 typeset pool=${1%%/*}
1754 if [[ -z $pool ]]; then
1755 log_note "Missing pool name."
1759 if poolexists $pool ; then
1763 if is_global_zone ; then
1764 [[ -d /$pool ]] && rm -rf /$pool
1765 log_must zpool create -f $pool $@
1771 # Return 0 if destroy successfully or the pool exists; $? otherwise
1772 # Note: In local zones, this function should return 0 silently.
1775 # Destroy pool with the given parameters.
1777 function destroy_pool #pool
1779 typeset pool=${1%%/*}
1782 if [[ -z $pool ]]; then
1783 log_note "No pool name given."
1787 if is_global_zone ; then
1788 if poolexists "$pool" ; then
1789 mtpt=$(get_prop mountpoint "$pool")
1791 # At times, syseventd/udev activity can cause attempts
1792 # to destroy a pool to fail with EBUSY. We retry a few
1793 # times allowing failures before requiring the destroy
1795 log_must_busy zpool destroy -f $pool
1798 log_must rm -rf $mtpt
1800 log_note "Pool does not exist. ($pool)"
1808 # Return 0 if created successfully; $? otherwise
1811 # $2-n - dataset options
1813 function create_dataset #dataset dataset_options
1819 if [[ -z $dataset ]]; then
1820 log_note "Missing dataset name."
1824 if datasetexists $dataset ; then
1825 destroy_dataset $dataset
1828 log_must zfs create $@ $dataset
1833 # Return 0 if destroy successfully or the dataset exists; $? otherwise
1834 # Note: In local zones, this function should return 0 silently.
1837 # $2 - custom arguments for zfs destroy
1838 # Destroy dataset with the given parameters.
1840 function destroy_dataset #dataset #args
1844 typeset args=${2:-""}
1846 if [[ -z $dataset ]]; then
1847 log_note "No dataset name given."
1851 if is_global_zone ; then
1852 if datasetexists "$dataset" ; then
1853 mtpt=$(get_prop mountpoint "$dataset")
1854 log_must_busy zfs destroy $args $dataset
1857 log_must rm -rf $mtpt
1859 log_note "Dataset does not exist. ($dataset)"
1868 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1869 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1870 # and a zvol device to the zone.
1873 # $2 zone root directory prefix
1876 function zfs_zones_setup #zone_name zone_root zone_ip
1878 typeset zone_name=${1:-$(hostname)-z}
1879 typeset zone_root=${2:-"/zone_root"}
1880 typeset zone_ip=${3:-"10.1.1.10"}
1881 typeset prefix_ctr=$ZONE_CTR
1882 typeset pool_name=$ZONE_POOL
1886 # Create pool and 5 container within it
1888 [[ -d /$pool_name ]] && rm -rf /$pool_name
1889 log_must zpool create -f $pool_name $DISKS
1890 while ((i < cntctr)); do
1891 log_must zfs create $pool_name/$prefix_ctr$i
1896 log_must zfs create -V 1g $pool_name/zone_zvol
1900 # If current system support slog, add slog device for pool
1902 if verify_slog_support ; then
1903 typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
1904 log_must mkfile $MINVDEVSIZE $sdevs
1905 log_must zpool add $pool_name log mirror $sdevs
1908 # this isn't supported just yet.
1909 # Create a filesystem. In order to add this to
1910 # the zone, it must have it's mountpoint set to 'legacy'
1911 # log_must zfs create $pool_name/zfs_filesystem
1912 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1914 [[ -d $zone_root ]] && \
1915 log_must rm -rf $zone_root/$zone_name
1916 [[ ! -d $zone_root ]] && \
1917 log_must mkdir -p -m 0700 $zone_root/$zone_name
1919 # Create zone configure file and configure the zone
1921 typeset zone_conf=/tmp/zone_conf.$$
1922 echo "create" > $zone_conf
1923 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1924 echo "set autoboot=true" >> $zone_conf
1926 while ((i < cntctr)); do
1927 echo "add dataset" >> $zone_conf
1928 echo "set name=$pool_name/$prefix_ctr$i" >> \
1930 echo "end" >> $zone_conf
1934 # add our zvol to the zone
1935 echo "add device" >> $zone_conf
1936 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1937 echo "end" >> $zone_conf
1939 # add a corresponding zvol rdsk to the zone
1940 echo "add device" >> $zone_conf
1941 echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
1942 echo "end" >> $zone_conf
1944 # once it's supported, we'll add our filesystem to the zone
1945 # echo "add fs" >> $zone_conf
1946 # echo "set type=zfs" >> $zone_conf
1947 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1948 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1949 # echo "end" >> $zone_conf
1951 echo "verify" >> $zone_conf
1952 echo "commit" >> $zone_conf
1953 log_must zonecfg -z $zone_name -f $zone_conf
1954 log_must rm -f $zone_conf
1957 zoneadm -z $zone_name install
1958 if (($? == 0)); then
1959 log_note "SUCCESS: zoneadm -z $zone_name install"
1961 log_fail "FAIL: zoneadm -z $zone_name install"
1964 # Install sysidcfg file
1966 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1967 echo "system_locale=C" > $sysidcfg
1968 echo "terminal=dtterm" >> $sysidcfg
1969 echo "network_interface=primary {" >> $sysidcfg
1970 echo "hostname=$zone_name" >> $sysidcfg
1971 echo "}" >> $sysidcfg
1972 echo "name_service=NONE" >> $sysidcfg
1973 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1974 echo "security_policy=NONE" >> $sysidcfg
1975 echo "timezone=US/Eastern" >> $sysidcfg
1978 log_must zoneadm -z $zone_name boot
1982 # Reexport TESTPOOL & TESTPOOL(1-4)
1984 function reexport_pool
1989 while ((i < cntctr)); do
1991 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1992 if ! ismounted $TESTPOOL; then
1993 log_must zfs mount $TESTPOOL
1996 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1997 if eval ! ismounted \$TESTPOOL$i; then
1998 log_must eval zfs mount \$TESTPOOL$i
2006 # Verify a given disk or pool state
2008 # Return 0 is pool/disk matches expected state, 1 otherwise
2010 function check_state # pool disk state{online,offline,degraded}
2013 typeset disk=${2#$DEV_DSKDIR/}
2016 [[ -z $pool ]] || [[ -z $state ]] \
2017 && log_fail "Arguments invalid or missing"
2019 if [[ -z $disk ]]; then
2020 #check pool state only
2021 zpool get -H -o value health $pool \
2022 | grep -i "$state" > /dev/null 2>&1
2024 zpool status -v $pool | grep "$disk" \
2025 | grep -i "$state" > /dev/null 2>&1
2032 # Get the mountpoint of snapshot
2033 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
2036 function snapshot_mountpoint
2038 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
2040 if [[ $dataset != *@* ]]; then
2041 log_fail "Error name of snapshot '$dataset'."
2044 typeset fs=${dataset%@*}
2045 typeset snap=${dataset#*@}
2047 if [[ -z $fs || -z $snap ]]; then
2048 log_fail "Error name of snapshot '$dataset'."
2051 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
2055 # Given a device and 'ashift' value verify it's correctly set on every label
2057 function verify_ashift # device ashift
2062 zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
2078 # Given a pool and file system, this function will verify the file system
2079 # using the zdb internal tool. Note that the pool is exported and imported
2080 # to ensure it has consistent state.
2082 function verify_filesys # pool filesystem dir
2085 typeset filesys="$2"
2086 typeset zdbout="/tmp/zdbout.$$"
2091 typeset search_path=""
2093 log_note "Calling zdb to verify filesystem '$filesys'"
2094 zfs unmount -a > /dev/null 2>&1
2095 log_must zpool export $pool
2097 if [[ -n $dirs ]] ; then
2098 for dir in $dirs ; do
2099 search_path="$search_path -d $dir"
2103 log_must zpool import $search_path $pool
2105 zdb -cudi $filesys > $zdbout 2>&1
2106 if [[ $? != 0 ]]; then
2107 log_note "Output: zdb -cudi $filesys"
2109 log_fail "zdb detected errors with: '$filesys'"
2112 log_must zfs mount -a
2113 log_must rm -rf $zdbout
2117 # Given a pool issue a scrub and verify that no checksum errors are reported.
2119 function verify_pool
2121 typeset pool=${1:-$TESTPOOL}
2123 log_must zpool scrub $pool
2124 log_must wait_scrubbed $pool
2126 typeset -i cksum=$(zpool status $pool | awk '
2128 isvdev { errors += $NF }
2129 /CKSUM$/ { isvdev = 1 }
2130 END { print errors }
2132 if [[ $cksum != 0 ]]; then
2133 log_must zpool status -v
2134 log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
2139 # Given a pool, and this function list all disks in the pool
2141 function get_disklist # pool
2145 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
2146 grep -v "\-\-\-\-\-" | \
2147 egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
2153 # Given a pool, and this function list all disks in the pool with their full
2154 # path (like "/dev/sda" instead of "sda").
2156 function get_disklist_fullpath # pool
2165 # This function kills a given list of processes after a time period. We use
2166 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
2167 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
2168 # would be listed as FAIL, which we don't want : we're happy with stress tests
2169 # running for a certain amount of time, then finishing.
2171 # @param $1 the time in seconds after which we should terminate these processes
2172 # @param $2..$n the processes we wish to terminate.
2174 function stress_timeout
2176 typeset -i TIMEOUT=$1
2180 log_note "Waiting for child processes($cpids). " \
2181 "It could last dozens of minutes, please be patient ..."
2182 log_must sleep $TIMEOUT
2184 log_note "Killing child processes after ${TIMEOUT} stress timeout."
2186 for pid in $cpids; do
2187 ps -p $pid > /dev/null 2>&1
2188 if (($? == 0)); then
2189 log_must kill -USR1 $pid
2195 # Verify a given hotspare disk is inuse or avail
2197 # Return 0 is pool/disk matches expected state, 1 otherwise
2199 function check_hotspare_state # pool disk state{inuse,avail}
2202 typeset disk=${2#$DEV_DSKDIR/}
2205 cur_state=$(get_device_state $pool $disk "spares")
2207 if [[ $state != ${cur_state} ]]; then
2214 # Wait until a hotspare transitions to a given state or times out.
2216 # Return 0 when pool/disk matches expected state, 1 on timeout.
2218 function wait_hotspare_state # pool disk state timeout
2221 typeset disk=${2#*$DEV_DSKDIR/}
2223 typeset timeout=${4:-60}
2226 while [[ $i -lt $timeout ]]; do
2227 if check_hotspare_state $pool $disk $state; then
2239 # Verify a given slog disk is inuse or avail
2241 # Return 0 is pool/disk matches expected state, 1 otherwise
2243 function check_slog_state # pool disk state{online,offline,unavail}
2246 typeset disk=${2#$DEV_DSKDIR/}
2249 cur_state=$(get_device_state $pool $disk "logs")
2251 if [[ $state != ${cur_state} ]]; then
2258 # Verify a given vdev disk is inuse or avail
2260 # Return 0 is pool/disk matches expected state, 1 otherwise
2262 function check_vdev_state # pool disk state{online,offline,unavail}
2265 typeset disk=${2#*$DEV_DSKDIR/}
2268 cur_state=$(get_device_state $pool $disk)
2270 if [[ $state != ${cur_state} ]]; then
2277 # Wait until a vdev transitions to a given state or times out.
2279 # Return 0 when pool/disk matches expected state, 1 on timeout.
2281 function wait_vdev_state # pool disk state timeout
2284 typeset disk=${2#*$DEV_DSKDIR/}
2286 typeset timeout=${4:-60}
2289 while [[ $i -lt $timeout ]]; do
2290 if check_vdev_state $pool $disk $state; then
2302 # Check the output of 'zpool status -v <pool>',
2303 # and to see if the content of <token> contain the <keyword> specified.
2305 # Return 0 is contain, 1 otherwise
2307 function check_pool_status # pool token keyword <verbose>
2312 typeset verbose=${4:-false}
2314 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
2315 ($1==token) {print $0}')
2316 if [[ $verbose == true ]]; then
2319 echo $scan | egrep -i "$keyword" > /dev/null 2>&1
2325 # The following functions are instance of check_pool_status()
2326 # is_pool_resilvering - to check if the pool resilver is in progress
2327 # is_pool_resilvered - to check if the pool resilver is completed
2328 # is_pool_scrubbing - to check if the pool scrub is in progress
2329 # is_pool_scrubbed - to check if the pool scrub is completed
2330 # is_pool_scrub_stopped - to check if the pool scrub is stopped
2331 # is_pool_scrub_paused - to check if the pool scrub has paused
2332 # is_pool_removing - to check if the pool removing is a vdev
2333 # is_pool_removed - to check if the pool remove is completed
2334 # is_pool_discarding - to check if the pool checkpoint is being discarded
2336 function is_pool_resilvering #pool <verbose>
2338 check_pool_status "$1" "scan" \
2339 "resilver[ ()0-9A-Za-z_-]* in progress since" $2
2343 function is_pool_resilvered #pool <verbose>
2345 check_pool_status "$1" "scan" "resilvered " $2
2349 function is_pool_scrubbing #pool <verbose>
2351 check_pool_status "$1" "scan" "scrub in progress since " $2
2355 function is_pool_scrubbed #pool <verbose>
2357 check_pool_status "$1" "scan" "scrub repaired" $2
2361 function is_pool_scrub_stopped #pool <verbose>
2363 check_pool_status "$1" "scan" "scrub canceled" $2
2367 function is_pool_scrub_paused #pool <verbose>
2369 check_pool_status "$1" "scan" "scrub paused since " $2
2373 function is_pool_removing #pool
2375 check_pool_status "$1" "remove" "in progress since "
2379 function is_pool_removed #pool
2381 check_pool_status "$1" "remove" "completed on"
2385 function is_pool_discarding #pool
2387 check_pool_status "$1" "checkpoint" "discarding"
2391 function wait_for_degraded
2394 typeset timeout=${2:-30}
2398 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
2399 log_note "$pool is not yet degraded."
2401 if ((SECONDS - t0 > $timeout)); then
2402 log_note "$pool not degraded after $timeout seconds."
2411 # Use create_pool()/destroy_pool() to clean up the information in
2412 # in the given disk to avoid slice overlapping.
2414 function cleanup_devices #vdevs
2416 typeset pool="foopool$$"
2419 zero_partitions $vdev
2422 poolexists $pool && destroy_pool $pool
2423 create_pool $pool $@
2430 # A function to find and locate free disks on a system or from given
2431 # disks as the parameter. It works by locating disks that are in use
2432 # as swap devices and dump devices, and also disks listed in /etc/vfstab
2434 # $@ given disks to find which are free, default is all disks in
2437 # @return a string containing the list of available disks
2441 # Trust provided list, no attempt is made to locate unused devices.
2442 if is_linux || is_freebsd; then
2448 sfi=/tmp/swaplist.$$
2449 dmpi=/tmp/dumpdev.$$
2450 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
2453 dumpadm > $dmpi 2>/dev/null
2455 # write an awk script that can process the output of format
2456 # to produce a list of disks we know about. Note that we have
2457 # to escape "$2" so that the shell doesn't interpret it while
2458 # we're creating the awk script.
2459 # -------------------
2460 cat > /tmp/find_disks.awk <<EOF
2469 if (searchdisks && \$2 !~ "^$"){
2475 /^AVAILABLE DISK SELECTIONS:/{
2479 #---------------------
2481 chmod 755 /tmp/find_disks.awk
2482 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
2483 rm /tmp/find_disks.awk
2486 for disk in $disks; do
2488 grep "${disk}[sp]" /etc/mnttab >/dev/null
2489 (($? == 0)) && continue
2491 grep "${disk}[sp]" $sfi >/dev/null
2492 (($? == 0)) && continue
2493 # check for dump device
2494 grep "${disk}[sp]" $dmpi >/dev/null
2495 (($? == 0)) && continue
2496 # check to see if this disk hasn't been explicitly excluded
2497 # by a user-set environment variable
2498 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
2499 (($? == 0)) && continue
2500 unused_candidates="$unused_candidates $disk"
2505 # now just check to see if those disks do actually exist
2506 # by looking for a device pointing to the first slice in
2507 # each case. limit the number to max_finddisksnum
2509 for disk in $unused_candidates; do
2510 if is_disk_device $DEV_DSKDIR/${disk}s0 && \
2511 [ $count -lt $max_finddisksnum ]; then
2512 unused="$unused $disk"
2513 # do not impose limit if $@ is provided
2514 [[ -z $@ ]] && ((count = count + 1))
2518 # finally, return our disk list
2522 function add_user_freebsd #<group_name> <user_name> <basedir>
2528 # Check to see if the user exists.
2529 if id $user > /dev/null 2>&1; then
2533 # Assign 1000 as the base uid
2537 pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
2541 # The uid is not unique
2545 if [[ $uid == 65000 ]]; then
2546 log_fail "No user id available under 65000 for $user"
2551 touch $basedir/$user/.hushlogin
2557 # Delete the specified user.
2561 function del_user_freebsd #<logname>
2565 if id $user > /dev/null 2>&1; then
2566 log_must pw userdel $user
2573 # Select valid gid and create specified group.
2577 function add_group_freebsd #<group_name>
2581 # See if the group already exists.
2582 if pw groupshow $group >/dev/null 2>&1; then
2586 # Assign 1000 as the base gid
2589 pw groupadd -g $gid -n $group > /dev/null 2>&1
2593 # The gid is not unique
2597 if [[ $gid == 65000 ]]; then
2598 log_fail "No user id available under 65000 for $group"
2604 # Delete the specified group.
2608 function del_group_freebsd #<group_name>
2612 pw groupdel -n $group > /dev/null 2>&1
2615 # Group does not exist, or was deleted successfully.
2617 # Name already exists as a group name
2618 9) log_must pw groupdel $group ;;
2625 function add_user_illumos #<group_name> <user_name> <basedir>
2631 log_must useradd -g $group -d $basedir/$user -m $user
2636 function del_user_illumos #<user_name>
2640 if id $user > /dev/null 2>&1; then
2641 log_must_retry "currently used" 6 userdel $user
2647 function add_group_illumos #<group_name>
2653 groupadd -g $gid $group > /dev/null 2>&1
2657 # The gid is not unique
2664 function del_group_illumos #<group_name>
2668 groupmod -n $grp $grp > /dev/null 2>&1
2671 # Group does not exist.
2673 # Name already exists as a group name
2674 9) log_must groupdel $grp ;;
2679 function add_user_linux #<group_name> <user_name> <basedir>
2685 log_must useradd -g $group -d $basedir/$user -m $user
2687 # Add new users to the same group and the command line utils.
2688 # This allows them to be run out of the original users home
2689 # directory as long as it permissioned to be group readable.
2690 cmd_group=$(stat --format="%G" $(which zfs))
2691 log_must usermod -a -G $cmd_group $user
2696 function del_user_linux #<user_name>
2700 if id $user > /dev/null 2>&1; then
2701 log_must_retry "currently used" 6 userdel $user
2707 function add_group_linux #<group_name>
2711 # Assign 100 as the base gid, a larger value is selected for
2712 # Linux because for many distributions 1000 and under are reserved.
2714 groupadd $group > /dev/null 2>&1
2723 function del_group_linux #<group_name>
2727 getent group $group > /dev/null 2>&1
2730 # Group does not exist.
2732 # Name already exists as a group name
2733 0) log_must groupdel $group ;;
2741 # Add specified user to specified group
2745 # $3 base of the homedir (optional)
2747 function add_user #<group_name> <user_name> <basedir>
2751 typeset basedir=${3:-"/var/tmp"}
2753 if ((${#group} == 0 || ${#user} == 0)); then
2754 log_fail "group name or user name are not defined."
2759 add_user_freebsd "$group" "$user" "$basedir"
2762 add_user_linux "$group" "$user" "$basedir"
2765 add_user_illumos "$group" "$user" "$basedir"
2769 echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.profile
2770 echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.bash_profile
2771 echo "export PATH=\"$STF_PATH\"" >>$basedir/$user/.login
2777 # Delete the specified user.
2780 # $2 base of the homedir (optional)
2782 function del_user #<logname> <basedir>
2785 typeset basedir=${2:-"/var/tmp"}
2787 if ((${#user} == 0)); then
2788 log_fail "login name is necessary."
2793 del_user_freebsd "$user"
2796 del_user_linux "$user"
2799 del_user_illumos "$user"
2803 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
2809 # Select valid gid and create specified group.
2813 function add_group #<group_name>
2817 if ((${#group} == 0)); then
2818 log_fail "group name is necessary."
2823 add_group_freebsd "$group"
2826 add_group_linux "$group"
2829 add_group_illumos "$group"
2837 # Delete the specified group.
2841 function del_group #<group_name>
2845 if ((${#group} == 0)); then
2846 log_fail "group name is necessary."
2851 del_group_freebsd "$group"
2854 del_group_linux "$group"
2857 del_group_illumos "$group"
2865 # This function will return true if it's safe to destroy the pool passed
2866 # as argument 1. It checks for pools based on zvols and files, and also
2867 # files contained in a pool that may have a different mountpoint.
2869 function safe_to_destroy_pool { # $1 the pool name
2872 typeset DONT_DESTROY=""
2874 # We check that by deleting the $1 pool, we're not
2875 # going to pull the rug out from other pools. Do this
2876 # by looking at all other pools, ensuring that they
2877 # aren't built from files or zvols contained in this pool.
2879 for pool in $(zpool list -H -o name)
2883 # this is a list of the top-level directories in each of the
2884 # files that make up the path to the files the pool is based on
2885 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
2888 # this is a list of the zvols that make up the pool
2889 ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
2892 # also want to determine if it's a file-based pool using an
2893 # alternate mountpoint...
2894 POOL_FILE_DIRS=$(zpool status -v $pool | \
2895 grep / | awk '{print $1}' | \
2896 awk -F/ '{print $2}' | grep -v "dev")
2898 for pooldir in $POOL_FILE_DIRS
2900 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
2901 grep "${pooldir}$" | awk '{print $1}')
2903 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
2907 if [ ! -z "$ZVOLPOOL" ]
2910 log_note "Pool $pool is built from $ZVOLPOOL on $1"
2913 if [ ! -z "$FILEPOOL" ]
2916 log_note "Pool $pool is built from $FILEPOOL on $1"
2919 if [ ! -z "$ALTMOUNTPOOL" ]
2922 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
2926 if [ -z "${DONT_DESTROY}" ]
2930 log_note "Warning: it is not safe to destroy $1!"
2936 # Verify zfs operation with -p option work as expected
2937 # $1 operation, value could be create, clone or rename
2938 # $2 dataset type, value could be fs or vol
2940 # $4 new dataset name
2942 function verify_opt_p_ops
2947 typeset newdataset=$4
2949 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2950 log_fail "$datatype is not supported."
2953 # check parameters accordingly
2958 if [[ $datatype == "vol" ]]; then
2959 ops="create -V $VOLSIZE"
2963 if [[ -z $newdataset ]]; then
2964 log_fail "newdataset should not be empty" \
2967 log_must datasetexists $dataset
2968 log_must snapexists $dataset
2971 if [[ -z $newdataset ]]; then
2972 log_fail "newdataset should not be empty" \
2975 log_must datasetexists $dataset
2978 log_fail "$ops is not supported."
2982 # make sure the upper level filesystem does not exist
2983 destroy_dataset "${newdataset%/*}" "-rRf"
2985 # without -p option, operation will fail
2986 log_mustnot zfs $ops $dataset $newdataset
2987 log_mustnot datasetexists $newdataset ${newdataset%/*}
2989 # with -p option, operation should succeed
2990 log_must zfs $ops -p $dataset $newdataset
2993 if ! datasetexists $newdataset ; then
2994 log_fail "-p option does not work for $ops"
2997 # when $ops is create or clone, redo the operation still return zero
2998 if [[ $ops != "rename" ]]; then
2999 log_must zfs $ops -p $dataset $newdataset
3006 # Get configuration of pool
3016 if ! poolexists "$pool" ; then
3019 alt_root=$(zpool list -H $pool | awk '{print $NF}')
3020 if [[ $alt_root == "-" ]]; then
3021 value=$(zdb -C $pool | grep "$config:" | awk -F: \
3024 value=$(zdb -e $pool | grep "$config:" | awk -F: \
3027 if [[ -n $value ]] ; then
3037 # Privated function. Random select one of items from arguments.
3042 function _random_get
3049 ((ind = RANDOM % cnt + 1))
3051 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
3056 # Random select one of item from arguments which include NONE string
3058 function random_get_with_non
3063 _random_get "$cnt" "$@"
3067 # Random select one of item from arguments which doesn't include NONE string
3071 _random_get "$#" "$@"
3075 # Detect if the current system support slog
3077 function verify_slog_support
3079 typeset dir=$TEST_BASE_DIR/disk.$$
3085 mkfile $MINVDEVSIZE $vdev $sdev
3088 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
3097 # The function will generate a dataset name with specific length
3098 # $1, the length of the name
3099 # $2, the base string to construct the name
3101 function gen_dataset_name
3104 typeset basestr="$2"
3105 typeset -i baselen=${#basestr}
3109 if ((len % baselen == 0)); then
3110 ((iter = len / baselen))
3112 ((iter = len / baselen + 1))
3114 while ((iter > 0)); do
3115 l_name="${l_name}$basestr"
3124 # Get cksum tuple of dataset
3127 # sample zdb output:
3128 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
3129 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
3130 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
3131 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
3132 function datasetcksum
3136 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
3137 | awk -F= '{print $7}')
3148 cksum=$(cksum $1 | awk '{print $1}')
3153 # Get the given disk/slice state from the specific field of the pool
3155 function get_device_state #pool disk field("", "spares","logs")
3158 typeset disk=${2#$DEV_DSKDIR/}
3159 typeset field=${3:-$pool}
3161 state=$(zpool status -v "$pool" 2>/dev/null | \
3162 nawk -v device=$disk -v pool=$pool -v field=$field \
3163 'BEGIN {startconfig=0; startfield=0; }
3164 /config:/ {startconfig=1}
3165 (startconfig==1) && ($1==field) {startfield=1; next;}
3166 (startfield==1) && ($1==device) {print $2; exit;}
3168 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
3174 # print the given directory filesystem type
3182 if [[ -z $dir ]]; then
3183 log_fail "Usage: get_fstype <directory>"
3190 df -n $dir | awk '{print $3}'
3194 # Given a disk, label it to VTOC regardless what label was on the disk
3200 if [[ -z $disk ]]; then
3201 log_fail "The disk name is unspecified."
3203 typeset label_file=/var/tmp/labelvtoc.$$
3204 typeset arch=$(uname -p)
3206 if is_linux || is_freebsd; then
3207 log_note "Currently unsupported by the test framework"
3211 if [[ $arch == "i386" ]]; then
3212 echo "label" > $label_file
3213 echo "0" >> $label_file
3214 echo "" >> $label_file
3215 echo "q" >> $label_file
3216 echo "q" >> $label_file
3218 fdisk -B $disk >/dev/null 2>&1
3219 # wait a while for fdisk finishes
3221 elif [[ $arch == "sparc" ]]; then
3222 echo "label" > $label_file
3223 echo "0" >> $label_file
3224 echo "" >> $label_file
3225 echo "" >> $label_file
3226 echo "" >> $label_file
3227 echo "q" >> $label_file
3229 log_fail "unknown arch type"
3232 format -e -s -d $disk -f $label_file
3233 typeset -i ret_val=$?
3236 # wait the format to finish
3239 if ((ret_val != 0)); then
3240 log_fail "unable to label $disk as VTOC."
3247 # check if the system was installed as zfsroot or not
3248 # return: 0 if zfsroot, non-zero if not
3252 df -n / | grep zfs > /dev/null 2>&1
3257 # get the root filesystem name if it's zfsroot system.
3259 # return: root filesystem name
3265 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3266 elif ! is_linux; then
3267 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
3270 if [[ -z "$rootfs" ]]; then
3271 log_fail "Can not get rootfs"
3273 zfs list $rootfs > /dev/null 2>&1
3274 if (($? == 0)); then
3277 log_fail "This is not a zfsroot system."
3282 # get the rootfs's pool name
3286 function get_rootpool
3292 rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
3293 elif ! is_linux; then
3294 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
3297 if [[ -z "$rootfs" ]]; then
3298 log_fail "Can not get rootpool"
3300 zfs list $rootfs > /dev/null 2>&1
3301 if (($? == 0)); then
3304 log_fail "This is not a zfsroot system."
3309 # Get the word numbers from a string separated by white space
3311 function get_word_count
3317 # To verify if the require numbers of disks is given
3319 function verify_disk_count
3321 typeset -i min=${2:-1}
3323 typeset -i count=$(get_word_count "$1")
3325 if ((count < min)); then
3326 log_untested "A minimum of $min disks is required to run." \
3327 " You specified $count disk(s)"
3331 function ds_is_volume
3333 typeset type=$(get_prop type $1)
3334 [[ $type = "volume" ]] && return 0
3338 function ds_is_filesystem
3340 typeset type=$(get_prop type $1)
3341 [[ $type = "filesystem" ]] && return 0
3345 function ds_is_snapshot
3347 typeset type=$(get_prop type $1)
3348 [[ $type = "snapshot" ]] && return 0
3353 # Check if Trusted Extensions are installed and enabled
3355 function is_te_enabled
3357 svcs -H -o state labeld 2>/dev/null | grep "enabled"
3358 if (($? != 0)); then
3365 # Utility function to determine if a system has multiple cpus.
3370 elif is_freebsd; then
3371 sysctl -n kern.smp.cpus
3373 (($(psrinfo | wc -l) > 1))
3379 function get_cpu_freq
3382 lscpu | awk '/CPU MHz/ { print $3 }'
3383 elif is_freebsd; then
3384 sysctl -n hw.clockrate
3386 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
3390 # Run the given command as the user provided.
3396 log_note "user:$user $@"
3397 eval su - \$user -c \"$@\" > $TEST_BASE_DIR/out 2>$TEST_BASE_DIR/err
3401 # Check if the pool contains the specified vdevs
3406 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
3407 # vdevs is not in the pool, and 2 if pool name is missing.
3409 function vdevs_in_pool
3414 if [[ -z $pool ]]; then
3415 log_note "Missing pool name."
3421 # We could use 'zpool list' to only get the vdevs of the pool but we
3422 # can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
3423 # therefore we use the 'zpool status' output.
3424 typeset tmpfile=$(mktemp)
3425 zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
3427 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
3428 [[ $? -ne 0 ]] && return 1
3442 max=$((max > i ? max : i))
3454 min=$((min < i ? min : i))
3460 # Write data that can be compressed into a directory
3461 function write_compressible
3465 typeset nfiles=${3:-1}
3466 typeset bs=${4:-1024k}
3467 typeset fname=${5:-file}
3469 [[ -d $dir ]] || log_fail "No directory: $dir"
3471 # Under Linux fio is not currently used since its behavior can
3472 # differ significantly across versions. This includes missing
3473 # command line options and cases where the --buffer_compress_*
3474 # options fail to behave as expected.
3476 typeset file_bytes=$(to_bytes $megs)
3477 typeset bs_bytes=4096
3478 typeset blocks=$(($file_bytes / $bs_bytes))
3480 for (( i = 0; i < $nfiles; i++ )); do
3481 truncate -s $file_bytes $dir/$fname.$i
3483 # Write every third block to get 66% compression.
3484 for (( j = 0; j < $blocks; j += 3 )); do
3485 dd if=/dev/urandom of=$dir/$fname.$i \
3486 seek=$j bs=$bs_bytes count=1 \
3487 conv=notrunc >/dev/null 2>&1
3491 log_must eval "fio \
3496 --buffer_compress_percentage=66 \
3497 --buffer_compress_chunk=4096 \
3504 --filename_format='$fname.\$jobnum' >/dev/null"
3513 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
3515 objnum=$(stat -f "%i" $pathname)
3517 objnum=$(stat -c %i $pathname)
3523 # Sync data to the pool
3526 # $2 boolean to force uberblock (and config including zpool cache file) update
3528 function sync_pool #pool <force>
3530 typeset pool=${1:-$TESTPOOL}
3531 typeset force=${2:-false}
3533 if [[ $force == true ]]; then
3534 log_must zpool sync -f $pool
3536 log_must zpool sync $pool
3543 # Wait for zpool 'freeing' property drops to zero.
3547 function wait_freeing #pool
3549 typeset pool=${1:-$TESTPOOL}
3551 [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
3557 # Wait for every device replace operation to complete
3561 function wait_replacing #pool
3563 typeset pool=${1:-$TESTPOOL}
3565 [[ "" == "$(zpool status $pool |
3566 awk '/replacing-[0-9]+/ {print $1}')" ]] && break
3572 # Wait for a pool to be scrubbed
3575 # $2 number of seconds to wait (optional)
3577 # Returns true when pool has been scrubbed, or false if there's a timeout or if
3578 # no scrub was done.
3580 function wait_scrubbed
3582 typeset pool=${1:-$TESTPOOL}
3584 is_pool_scrubbed $pool && break
3589 # Backup the zed.rc in our test directory so that we can edit it for our test.
3591 # Returns: Backup file name. You will need to pass this to zed_rc_restore().
3592 function zed_rc_backup
3594 zedrc_backup="$(mktemp)"
3595 cp $ZEDLET_DIR/zed.rc $zedrc_backup
3599 function zed_rc_restore
3601 mv $1 $ZEDLET_DIR/zed.rc
3605 # Setup custom environment for the ZED.
3607 # $@ Optional list of zedlets to run under zed.
3611 log_unsupported "No zed on $(uname)"
3614 if [[ ! -d $ZEDLET_DIR ]]; then
3615 log_must mkdir $ZEDLET_DIR
3618 if [[ ! -e $VDEVID_CONF ]]; then
3619 log_must touch $VDEVID_CONF
3622 if [[ -e $VDEVID_CONF_ETC ]]; then
3623 log_fail "Must not have $VDEVID_CONF_ETC file present on system"
3627 # Create a symlink for /etc/zfs/vdev_id.conf file.
3628 log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
3630 # Setup minimal ZED configuration. Individual test cases should
3631 # add additional ZEDLETs as needed for their specific test.
3632 log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
3633 log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
3635 # Scripts must only be user writable.
3636 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3637 saved_umask=$(umask)
3639 for i in $EXTRA_ZEDLETS ; do
3640 log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
3642 log_must umask $saved_umask
3645 # Customize the zed.rc file to enable the full debug log.
3646 log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
3647 echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
3652 # Cleanup custom ZED environment.
3654 # $@ Optional list of zedlets to remove from our test zed.d directory.
3655 function zed_cleanup
3662 log_must rm -f ${ZEDLET_DIR}/zed.rc
3663 log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
3664 log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
3665 log_must rm -f ${ZEDLET_DIR}/all-debug.sh
3666 log_must rm -f ${ZEDLET_DIR}/state
3668 if [[ -n "$EXTRA_ZEDLETS" ]] ; then
3669 for i in $EXTRA_ZEDLETS ; do
3670 log_must rm -f ${ZEDLET_DIR}/$i
3673 log_must rm -f $ZED_LOG
3674 log_must rm -f $ZED_DEBUG_LOG
3675 log_must rm -f $VDEVID_CONF_ETC
3676 log_must rm -f $VDEVID_CONF
3681 # Check if ZED is currently running, if not start ZED.
3689 # ZEDLET_DIR=/var/tmp/zed
3690 if [[ ! -d $ZEDLET_DIR ]]; then
3691 log_must mkdir $ZEDLET_DIR
3694 # Verify the ZED is not already running.
3695 pgrep -x zed > /dev/null
3696 if (($? == 0)); then
3697 log_note "ZED already running"
3699 log_note "Starting ZED"
3700 # run ZED in the background and redirect foreground logging
3701 # output to $ZED_LOG.
3702 log_must truncate -s 0 $ZED_DEBUG_LOG
3703 log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
3704 "-s $ZEDLET_DIR/state 2>$ZED_LOG &"
3719 log_note "Stopping ZED"
3720 if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
3721 zedpid=$(<${ZEDLET_DIR}/zed.pid)
3723 while ps -p $zedpid > /dev/null; do
3726 rm -f ${ZEDLET_DIR}/zed.pid
3734 function zed_events_drain
3736 while [ $(zpool events -H | wc -l) -ne 0 ]; do
3738 zpool events -c >/dev/null
3742 # Set a variable in zed.rc to something, un-commenting it in the process.
3752 eval sed -i $cmd $ZEDLET_DIR/zed.rc
3755 echo "$var=$val" >> $ZEDLET_DIR/zed.rc
3760 # Check is provided device is being active used as a swap device.
3762 function is_swap_inuse
3766 if [[ -z $device ]] ; then
3767 log_note "No device specified."
3772 swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
3773 elif is_freebsd; then
3774 swapctl -l | grep -w $device
3776 swap -l | grep -w $device > /dev/null 2>&1
3783 # Setup a swap device using the provided device.
3790 log_must eval "mkswap $swapdev > /dev/null 2>&1"
3791 log_must swapon $swapdev
3792 elif is_freebsd; then
3793 log_must swapctl -a $swapdev
3795 log_must swap -a $swapdev
3802 # Cleanup a swap device on the provided device.
3804 function swap_cleanup
3808 if is_swap_inuse $swapdev; then
3810 log_must swapoff $swapdev
3811 elif is_freebsd; then
3812 log_must swapoff $swapdev
3814 log_must swap -d $swapdev
3822 # Set a global system tunable (64-bit value)
3824 # $1 tunable name (use a NAME defined in tunables.cfg)
3827 function set_tunable64
3829 set_tunable_impl "$1" "$2" Z
3833 # Set a global system tunable (32-bit value)
3835 # $1 tunable name (use a NAME defined in tunables.cfg)
3838 function set_tunable32
3840 set_tunable_impl "$1" "$2" W
3843 function set_tunable_impl
3847 typeset mdb_cmd="$3"
3848 typeset module="${4:-zfs}"
3850 eval "typeset tunable=\$$name"
3853 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3856 log_fail "Tunable '$name' must be added to tunables.cfg"
3862 [[ -z "$value" ]] && return 1
3863 [[ -z "$mdb_cmd" ]] && return 1
3867 typeset zfs_tunables="/sys/module/$module/parameters"
3868 [[ -w "$zfs_tunables/$tunable" ]] || return 1
3869 cat >"$zfs_tunables/$tunable" <<<"$value"
3873 sysctl vfs.zfs.$tunable=$value
3877 [[ "$module" -eq "zfs" ]] || return 1
3878 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
3885 # Get a global system tunable
3887 # $1 tunable name (use a NAME defined in tunables.cfg)
3889 function get_tunable
3891 get_tunable_impl "$1"
3894 function get_tunable_impl
3897 typeset module="${2:-zfs}"
3899 eval "typeset tunable=\$$name"
3902 log_unsupported "Tunable '$name' is unsupported on $(uname)"
3905 log_fail "Tunable '$name' must be added to tunables.cfg"
3913 typeset zfs_tunables="/sys/module/$module/parameters"
3914 [[ -f "$zfs_tunables/$tunable" ]] || return 1
3915 cat $zfs_tunables/$tunable
3919 sysctl -n vfs.zfs.$tunable
3922 [[ "$module" -eq "zfs" ]] || return 1
3930 # Prints the current time in seconds since UNIX Epoch.
3932 function current_epoch
3938 # Get decimal value of global uint32_t variable using mdb.
3940 function mdb_get_uint32
3945 value=$(mdb -k -e "$variable/X | ::eval .=U")
3946 if [[ $? -ne 0 ]]; then
3947 log_fail "Failed to get value of '$variable' from mdb."
3956 # Set global uint32_t variable to a decimal value using mdb.
3958 function mdb_set_uint32
3963 mdb -kw -e "$variable/W 0t$value" > /dev/null
3964 if [[ $? -ne 0 ]]; then
3965 echo "Failed to set '$variable' to '$value' in mdb."
3973 # Set global scalar integer variable to a hex value using mdb.
3974 # Note: Target should have CTF data loaded.
3976 function mdb_ctf_set_int
3981 mdb -kw -e "$variable/z $value" > /dev/null
3982 if [[ $? -ne 0 ]]; then
3983 echo "Failed to set '$variable' to '$value' in mdb."
3991 # Compute MD5 digest for given file or stdin if no file given.
3992 # Note: file path must not contain spaces
4003 md5sum -b $file | awk '{ print $1 }'
4009 # Compute SHA256 digest for given file or stdin if no file given.
4010 # Note: file path must not contain spaces
4012 function sha256digest
4021 sha256sum -b $file | awk '{ print $1 }'
4026 function new_fs #<args>
4033 echo y | newfs -v "$@"
4038 function stat_size #<path>
4052 # Run a command as if it was being run in a TTY.
4061 script -q /dev/null env "$@"
4063 script --return --quiet -c "$*" /dev/null
4068 # Produce a random permutation of the integers in a given range (inclusive).
4070 function range_shuffle # begin end
4075 seq ${begin} ${end} | sort -R
4079 # Cross-platform xattr helpers
4082 function get_xattr # name path
4089 getextattr -qq user "${name}" "${path}"
4092 attr -qg "${name}" "${path}"
4097 function set_xattr # name value path
4105 setextattr user "${name}" "${value}" "${path}"
4108 attr -qs "${name}" -V "${value}" "${path}"
4113 function set_xattr_stdin # name value
4120 setextattr -i user "${name}" "${path}"
4123 attr -qs "${name}" "${path}"
4128 function rm_xattr # name path
4135 rmextattr -q user "${name}" "${path}"
4138 attr -qr "${name}" "${path}"
4143 function ls_xattr # path
4149 lsextattr -qq user "${path}"
4157 function kstat # stat flags?
4160 typeset flags=${2-"-n"}
4164 sysctl $flags kstat.zfs.misc.$stat
4167 typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
4168 [[ -f "$zfs_kstat" ]] || return 1
4177 function get_arcstat # stat
4183 kstat arcstats.$stat
4186 kstat arcstats | awk "/$stat/ { print \$3 }"
4195 # Given an array of pids, wait until all processes
4196 # have completed and check their return status.
4198 function wait_for_children #children
4202 for child in "${children[@]}"
4205 wait ${child} || child_exit=$?
4206 if [ $child_exit -ne 0 ]; then
4207 echo "child ${child} failed with ${child_exit}"