3 # Common support functions for testing scripts. If a script-config
4 # files is available it will be sourced so in-tree kernel modules and
5 # utilities will be used. If no script-config can be found then the
6 # installed kernel modules and utilities will be used.
8 basedir="$(dirname $0)"
10 SCRIPT_CONFIG=zfs-script-config.sh
11 if [ -f "${basedir}/../${SCRIPT_CONFIG}" ]; then
12 . "${basedir}/../${SCRIPT_CONFIG}"
14 MODULES=(zlib_deflate spl splat zavl znvpair zunicode zcommon zfs)
29 TESTS_RUN=${TESTS_RUN:-'*'}
30 TESTS_SKIP=${TESTS_SKIP:-}
33 exec_prefix=@exec_prefix@
34 libexecdir=@libexecdir@
35 pkglibexecdir=${libexecdir}/@PACKAGE@
39 ETCDIR=${ETCDIR:-/etc}
40 DEVDIR=${DEVDIR:-/dev/disk/zpool}
41 ZPOOLDIR=${ZPOOLDIR:-${pkglibexecdir}/zpool-config}
42 ZPIOSDIR=${ZPIOSDIR:-${pkglibexecdir}/zpios-test}
43 ZPIOSPROFILEDIR=${ZPIOSPROFILEDIR:-${pkglibexecdir}/zpios-profile}
45 ZDB=${ZDB:-${sbindir}/zdb}
46 ZFS=${ZFS:-${sbindir}/zfs}
47 ZINJECT=${ZINJECT:-${sbindir}/zinject}
48 ZPOOL=${ZPOOL:-${sbindir}/zpool}
49 ZPOOL_ID=${ZPOOL_ID:-${bindir}/zpool_id}
50 ZTEST=${ZTEST:-${sbindir}/ztest}
51 ZPIOS=${ZPIOS:-${sbindir}/zpios}
53 COMMON_SH=${COMMON_SH:-${pkglibexecdir}/common.sh}
54 ZFS_SH=${ZFS_SH:-${pkglibexecdir}/zfs.sh}
55 ZPOOL_CREATE_SH=${ZPOOL_CREATE_SH:-${pkglibexecdir}/zpool-create.sh}
56 ZPIOS_SH=${ZPIOS_SH:-${pkglibexecdir}/zpios.sh}
57 ZPIOS_SURVEY_SH=${ZPIOS_SURVEY_SH:-${pkglibexecdir}/zpios-survey.sh}
59 LDMOD=${LDMOD:-/sbin/modprobe}
60 LSMOD=${LSMOD:-/sbin/lsmod}
61 RMMOD=${RMMOD:-/sbin/rmmod}
62 INFOMOD=${INFOMOD:-/sbin/modinfo}
63 LOSETUP=${LOSETUP:-/sbin/losetup}
64 MDADM=${MDADM:-/sbin/mdadm}
65 PARTED=${PARTED:-/sbin/parted}
66 BLOCKDEV=${BLOCKDEV:-/sbin/blockdev}
67 LSSCSI=${LSSCSI:-/usr/bin/lsscsi}
68 SCSIRESCAN=${SCSIRESCAN:-/usr/bin/scsi-rescan}
69 SYSCTL=${SYSCTL:-/sbin/sysctl}
70 UDEVADM=${UDEVADM:-/sbin/udevadm}
71 AWK=${AWK:-/usr/bin/awk}
73 COLOR_BLACK="\033[0;30m"
74 COLOR_DK_GRAY="\033[1;30m"
75 COLOR_BLUE="\033[0;34m"
76 COLOR_LT_BLUE="\033[1;34m"
77 COLOR_GREEN="\033[0;32m"
78 COLOR_LT_GREEN="\033[1;32m"
79 COLOR_CYAN="\033[0;36m"
80 COLOR_LT_CYAN="\033[1;36m"
81 COLOR_RED="\033[0;31m"
82 COLOR_LT_RED="\033[1;31m"
83 COLOR_PURPLE="\033[0;35m"
84 COLOR_LT_PURPLE="\033[1;35m"
85 COLOR_BROWN="\033[0;33m"
86 COLOR_YELLOW="\033[1;33m"
87 COLOR_LT_GRAY="\033[0;37m"
88 COLOR_WHITE="\033[1;37m"
92 echo -e "${PROG}: $1" >&2
97 if [ ${VERBOSE} ]; then
103 echo -e "${COLOR_GREEN}Pass${COLOR_RESET}"
107 echo -e "${COLOR_RED}Fail${COLOR_RESET} ($1)"
112 echo -e "${COLOR_BROWN}Skip${COLOR_RESET}"
116 ${SYSCTL} -w kernel.spl.debug.dump=1 &>/dev/null
117 local NAME=`dmesg | tail -n 1 | cut -f5 -d' '`
118 ${SPLBUILD}/cmd/spl ${NAME} >${NAME}.log
120 echo "Dumped debug log: ${NAME}.log"
127 local LOADED_MODULES=()
128 local MISSING_MODULES=()
130 for MOD in ${MODULES[*]}; do
131 local NAME=`basename $MOD .ko`
133 if ${LSMOD} | egrep -q "^${NAME}"; then
134 LOADED_MODULES=(${NAME} ${LOADED_MODULES[*]})
137 if [ ${INFOMOD} ${MOD} 2>/dev/null ]; then
138 MISSING_MODULES=("\t${MOD}\n" ${MISSING_MODULES[*]})
142 if [ ${#LOADED_MODULES[*]} -gt 0 ]; then
143 ERROR="Unload these modules with '${PROG} -u':\n"
144 ERROR="${ERROR}${LOADED_MODULES[*]}"
148 if [ ${#MISSING_MODULES[*]} -gt 0 ]; then
149 ERROR="The following modules can not be found,"
150 ERROR="${ERROR} ensure your source trees are built:\n"
151 ERROR="${ERROR}${MISSING_MODULES[*]}"
159 local NAME=`basename $1 .ko`
161 if [ ${VERBOSE} ]; then
162 echo "Loading ${NAME} ($@)"
165 ${LDMOD} $* || ERROR="Failed to load $1" return 1
173 for MOD in ${MODULES[*]}; do
174 local NAME=`basename ${MOD} .ko`
178 OPT_NAME=`echo ${OPT} | cut -f1 -d'='`
180 if [ ${NAME} = "${OPT_NAME}" ]; then
181 VALUE=`echo ${OPT} | cut -f2- -d'='`
185 load_module ${MOD} ${VALUE} || return 1
188 if [ ${VERBOSE} ]; then
189 echo "Successfully loaded ZFS module stack"
196 local NAME=`basename $1 .ko`
198 if [ ${VERBOSE} ]; then
199 echo "Unloading ${NAME} ($@)"
202 ${RMMOD} ${NAME} || ERROR="Failed to unload ${NAME}" return 1
208 local MODULES_REVERSE=( $(echo ${MODULES[@]} |
209 ${AWK} '{for (i=NF;i>=1;i--) printf $i" "} END{print ""}') )
211 for MOD in ${MODULES_REVERSE[*]}; do
212 local NAME=`basename ${MOD} .ko`
213 local USE_COUNT=`${LSMOD} |
214 egrep "^${NAME} "| ${AWK} '{print $3}'`
216 if [ "${USE_COUNT}" = 0 ] ; then
218 if [ "${DUMP_LOG}" -a ${NAME} = "spl" ]; then
222 unload_module ${MOD} || return 1
226 if [ ${VERBOSE} ]; then
227 echo "Successfully unloaded ZFS module stack"
234 # Check that the mdadm utilities are installed.
237 test -f ${LOSETUP} || die "${LOSETUP} utility must be installed"
242 # Find and return an unused loopback device.
244 unused_loop_device() {
245 for DEVICE in `ls -1 /dev/loop* 2>/dev/null`; do
246 ${LOSETUP} ${DEVICE} &>/dev/null
247 if [ $? -ne 0 ]; then
253 die "Error: Unable to find unused loopback device"
257 # This can be slightly dangerous because the loop devices we are
258 # cleaning up may not be ours. However, if the devices are currently
259 # in use we will not be able to remove them, and we only remove
260 # devices which include 'zpool' in the name. So any damage we might
261 # do should be limited to other zfs related testing.
263 cleanup_loop_devices() {
264 local TMP_FILE=`mktemp`
266 ${LOSETUP} -a | tr -d '()' >${TMP_FILE}
267 ${AWK} -F":" -v losetup="$LOSETUP" \
268 '/zpool/ { system("losetup -d "$1) }' ${TMP_FILE}
269 ${AWK} -F" " '/zpool/ { system("rm -f "$3) }' ${TMP_FILE}
275 # Destroy the passed loopback devices, this is used when you know
276 # the names of the loopback devices.
278 destroy_loop_devices() {
281 msg "Destroying ${LODEVICES}"
282 ${LOSETUP} -d ${LODEVICES} || \
283 die "Error $? destroying ${FILE} -> ${DEVICE} loopback"
290 # Check that the mdadm utilities are installed.
293 test -f ${MDADM} || die "${MDADM} utility must be installed"
294 test -f ${PARTED} || die "${PARTED} utility must be installed"
297 check_md_partitionable() {
298 local LOFILE=`mktemp -p /tmp zpool-lo.XXXXXXXX`
299 local LODEVICE=`unused_loop_device`
300 local MDDEVICE=`unused_md_device`
306 dd if=/dev/zero of=${LOFILE} bs=1M count=0 seek=16 \
307 &>/dev/null || return ${RESULT}
309 msg "Creating ${LODEVICE} using ${LOFILE}"
310 ${LOSETUP} ${LODEVICE} ${LOFILE}
311 if [ $? -ne 0 ]; then
316 msg "Creating ${MDDEVICE} using ${LODEVICE}"
317 ${MDADM} --build ${MDDEVICE} --level=faulty \
318 --raid-devices=1 ${LODEVICE} &>/dev/null
319 if [ $? -ne 0 ]; then
320 destroy_loop_devices ${LODEVICE}
324 wait_udev ${MDDEVICE} 30
326 ${BLOCKDEV} --rereadpt ${MDDEVICE} 2>/dev/null
329 destroy_md_devices ${MDDEVICE}
330 destroy_loop_devices ${LODEVICE}
337 # Find and return an unused md device.
340 for (( i=0; i<32; i++ )); do
343 # Skip active devicesudo in /proc/mdstat.
344 grep -q "${MDDEVICE} " /proc/mdstat && continue
346 # Device doesn't exist, use it.
347 if [ ! -e $/dev/{MDDEVICE} ]; then
348 echo /dev/${MDDEVICE}
352 # Device exists but may not be in use.
353 if [ -b /dev/${MDDEVICE} ]; then
354 ${MDADM} --detail /dev/${MDDEVICE} &>/dev/null
355 if [ $? -eq 1 ]; then
356 echo /dev/${MDDEVICE}
362 die "Error: Unable to find unused md device"
366 # This can be slightly dangerous because it is possible the md devices
367 # we are cleaning up may not be ours. However, if the devices are
368 # currently in use we will not be able to remove them, and even if
369 # we remove devices which were not out we do not zero the super block
370 # so you should be able to reconstruct them.
372 cleanup_md_devices() {
373 destroy_md_devices "`ls /dev/md* 2>/dev/null | grep -v p`"
378 # Destroy the passed md devices, this is used when you know
379 # the names of the md devices.
381 destroy_md_devices() {
384 msg "Destroying ${MDDEVICES}"
385 for MDDEVICE in ${MDDEVICES}; do
386 ${MDADM} --stop ${MDDEVICE} &>/dev/null
387 ${MDADM} --remove ${MDDEVICE} &>/dev/null
388 ${MDADM} --detail ${MDDEVICE} &>/dev/null
395 # Check that the scsi utilities are installed.
398 ${INFOMOD} scsi_debug &>/dev/null || die "scsi_debug module required"
399 test -f ${LSSCSI} || die "${LSSCSI} utility must be installed"
403 # Rescan the scsi bus for scsi_debug devices. It is preferable to use the
404 # scsi-rescan tool if it is installed, but if it's not we can fall back to
405 # removing and readding the device manually. This rescan will only effect
406 # the first scsi_debug device if scsi-rescan is missing.
409 local AWK_SCRIPT="/scsi_debug/ { print \$1; exit }"
411 if [ -f ${SCSIRESCAN} ]; then
412 ${SCSIRESCAN} --forcerescan --remove &>/dev/null
414 local SCSIID=`${LSSCSI} | ${AWK} "${AWK_SCRIPT}" | tr -d '[]'`
415 local SCSIHOST=`echo ${SCSIID} | cut -f1 -d':'`
416 echo 1 >"/sys/class/scsi_device/${SCSIID}/device/delete"
418 echo "- - -" >/sys/class/scsi_host/host${SCSIHOST}/scan
424 # Trigger udev and wait for it to settle.
427 if [ -f ${UDEVADM} ]; then
437 # The following udev helper functions assume that the provided
438 # udev rules file will create a /dev/disk/zpool/<CHANNEL><RANK>
439 # disk mapping. In this mapping each CHANNEL is represented by
440 # the letters a-z, and the RANK is represented by the numbers
441 # 1-n. A CHANNEL should identify a group of RANKS which are all
442 # attached to a single controller, each RANK represents a disk.
443 # This provides a simply mechanism to locate a specific drive
444 # given a known hardware configuration.
449 # When running in tree manually contruct symlinks in tree to
450 # the proper devices. Symlinks are installed for all entires
451 # in the config file regardless of if that device actually
452 # exists. When installed as a package udev can be relied on for
453 # this and it will only create links for devices which exist.
454 if [ ${INTREE} ]; then
458 ${AWK} '!/^#/ && /./ { system( \
459 "ln -f -s /dev/disk/by-path/"$2" "$1";" \
460 "ln -f -s /dev/disk/by-path/"$2"-part1 "$1"p1;" \
461 "ln -f -s /dev/disk/by-path/"$2"-part9 "$1"p9;" \
465 DST_FILE=`basename ${SRC_PATH} | cut -f1-2 -d'.'`
466 DST_PATH=/etc/zfs/${DST_FILE}
468 if [ -e ${DST_PATH} ]; then
469 die "Error: Config ${DST_PATH} already exists"
472 cp ${SRC_PATH} ${DST_PATH}
482 if [ ${INTREE} ]; then
485 ${AWK} '!/^#/ && /./ { system( \
486 "rm -f "$1" "$1"p1 "$1"p9") }' $SRC_PATH
494 local CHANNEL=`echo "obase=16; $1+96" | bc`
497 printf "\x${CHANNEL}${RANK}"
506 for RANK in `seq 1 ${RANKS}`; do
507 for CHANNEL in `seq 1 ${CHANNELS}`; do
508 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
509 RAID0S[${IDX}]="${DEVDIR}/${DISK}"
517 udev_raid10_setup() {
523 for RANK in `seq 1 ${RANKS}`; do
524 for CHANNEL1 in `seq 1 2 ${CHANNELS}`; do
525 let CHANNEL2=CHANNEL1+1
526 DISK1=`udev_cr2d ${CHANNEL1} ${RANK}`
527 DISK2=`udev_cr2d ${CHANNEL2} ${RANK}`
528 GROUP="${DEVDIR}/${DISK1} ${DEVDIR}/${DISK2}"
529 RAID10S[${IDX}]="mirror ${GROUP}"
542 for RANK in `seq 1 ${RANKS}`; do
545 for CHANNEL in `seq 1 ${CHANNELS}`; do
546 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
547 RAIDZ[${CHANNEL}]="${DEVDIR}/${DISK}"
550 RAIDZS[${RANK}]="${RAIDZ[*]}"
556 udev_raidz2_setup() {
561 for RANK in `seq 1 ${RANKS}`; do
564 for CHANNEL in `seq 1 ${CHANNELS}`; do
565 DISK=`udev_cr2d ${CHANNEL} ${RANK}`
566 RAIDZ2[${CHANNEL}]="${DEVDIR}/${DISK}"
569 RAIDZ2S[${RANK}]="${RAIDZ2[*]}"
579 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
587 printf "%-4d %-34s " ${TEST_NUM} "${TEST_NAME}"
595 for i in ${TESTS_SKIP[@]}; do
596 if [[ $i == ${TEST_NUM} ]] ; then
597 skip_one_test ${TEST_NUM} "${TEST_NAME}"
602 if [ "${TESTS_RUN[0]}" = "*" ]; then
603 run_one_test ${TEST_NUM} "${TEST_NAME}"
605 for i in ${TESTS_RUN[@]}; do
606 if [[ $i == ${TEST_NUM} ]] ; then
607 run_one_test ${TEST_NUM} "${TEST_NAME}"
612 skip_one_test ${TEST_NUM} "${TEST_NAME}"
622 while [ ! -e ${DEVICE} ]; do
623 if [ ${COUNT} -gt ${DELAY} ]; then