3 # ZFS/ZPOOL configuration test script.
5 basedir="$(dirname $0)"
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
32 while getopts 'hvct:s:?' OPTION; do
57 if [ $(id -u) != 0 ]; then
58 die "Must run as root"
61 # Perform pre-cleanup is requested
62 if [ ${CLEANUP} ]; then
65 rm -f /tmp/zpool.cache.*
68 # Check if we need to skip the tests that require scsi_debug and lsscsi.
70 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
72 test -f ${LSSCSI} && HAVE_LSSCSI=1
73 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
74 echo "Skipping test 10 which requires the scsi_debug " \
75 "module and the ${LSSCSI} utility"
82 local TMP_FILE=`mktemp`
84 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
94 # Validate persistent zpool.cache configuration.
97 local TMP_FILE1=`mktemp`
98 local TMP_FILE2=`mktemp`
99 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
101 # Create a pool save its status for comparison.
102 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
103 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
104 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
106 # Unload/load the module stack and verify the pool persists.
107 ${ZFS_SH} -u || fail 4
108 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
109 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
110 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
112 # Cleanup the test pool and temporary files
113 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
114 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
115 ${ZFS_SH} -u || fail 10
119 run_test 1 "persistent zpool.cache"
121 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
123 local POOL_NAME=test2
124 local TMP_FILE1=`mktemp`
125 local TMP_FILE2=`mktemp`
126 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
128 # Create a pool save its status for comparison.
129 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
130 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
131 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
133 # Unload the module stack, remove the cache file, load the module
134 # stack and attempt to probe the disks to import the pool. As
135 # a cross check verify the old pool state against the imported.
136 ${ZFS_SH} -u || fail 4
137 rm -f ${TMP_CACHE} || fail 5
138 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
139 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
140 ${ZPOOL} import ${POOL_NAME} || fail 8
141 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
142 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
144 # Cleanup the test pool and temporary files
145 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
146 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
147 ${ZFS_SH} -u || fail 13
151 run_test 2 "scan disks for pools to import"
153 zconfig_zvol_device_stat() {
155 local POOL_NAME=/dev/$2
156 local ZVOL_NAME=/dev/$3
157 local SNAP_NAME=/dev/$4
158 local CLONE_NAME=/dev/$5
161 # Briefly delay for udev
165 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
167 # Volume and partitions
168 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
169 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
170 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
172 # Snapshot with partitions
173 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
174 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
175 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
177 # Clone with partitions
178 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
179 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
180 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
182 if [ $EXPECT -ne $COUNT ]; then
189 # zpool import/export device check
190 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
193 local ZVOL_NAME=volume
195 local CLONE_NAME=clone
196 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
197 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
198 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
199 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
201 # Create a pool, volume, partition, snapshot, and clone.
202 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
203 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
204 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
205 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
206 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
207 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
209 # Verify the devices were created
210 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
211 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
214 ${ZPOOL} export ${POOL_NAME} || fail 8
216 # verify the devices were removed
217 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
218 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
220 # Import the pool, wait 1 second for udev
221 ${ZPOOL} import ${POOL_NAME} || fail 10
223 # Verify the devices were created
224 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
225 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
227 # Destroy the pool and consequently the devices
228 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
230 # verify the devices were removed
231 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
232 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
234 ${ZFS_SH} -u || fail 14
235 rm -f ${TMP_CACHE} || fail 15
239 run_test 3 "zpool import/export device"
241 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
247 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
248 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
249 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
250 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
252 # Create a pool, volume, snapshot, and clone
253 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
254 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
255 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
256 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
257 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
258 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
260 # Verify the devices were created
261 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
262 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
265 ${ZFS_SH} -u || fail 8
267 # Verify the devices were removed
268 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
269 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
271 # Load the modules, wait 1 second for udev
272 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
274 # Verify the devices were created
275 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
276 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
278 # Destroy the pool and consequently the devices
279 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
281 # Verify the devices were removed
282 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
283 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
285 ${ZFS_SH} -u || fail 14
286 rm -f ${TMP_CACHE} || fail 15
290 run_test 4 "zpool insmod/rmmod device"
292 # ZVOL volume sanity check
296 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
298 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
300 # Create a pool and volume.
301 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
302 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
303 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
305 # Partition the volume, for a 400M volume there will be
306 # 812 cylinders, 16 heads, and 63 sectors per track.
307 zconfig_partition /dev/${FULL_NAME} 0 812
309 # Format the partition with ext3.
310 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
312 # Mount the ext3 filesystem and copy some data to it.
313 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
314 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
315 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
318 # Verify the copied files match the original files.
319 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
321 # Remove the files, umount, destroy the volume and pool.
322 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
323 umount /tmp/${ZVOL_NAME}1 || fail 11
324 rmdir /tmp/${ZVOL_NAME}1 || fail 12
326 ${ZFS} destroy ${FULL_NAME} || fail 13
327 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
328 ${ZFS_SH} -u || fail 15
329 rm -f ${TMP_CACHE} || fail 16
333 run_test 5 "zvol+ext3 volume"
335 # ZVOL snapshot sanity check
339 local SNAP_NAME=pristine
340 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
341 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
343 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
345 # Create a pool and volume.
346 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
347 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
348 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
350 # Partition the volume, for a 400M volume there will be
351 # 812 cylinders, 16 heads, and 63 sectors per track.
352 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
354 # Format the partition with ext2 (no journal).
355 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
357 # Mount the ext3 filesystem and copy some data to it.
358 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
359 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
361 # Snapshot the pristine ext2 filesystem and mount it read-only.
362 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
363 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
364 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
365 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
367 # Copy to original volume
368 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
371 # Verify the copied files match the original files,
372 # and the copied files do NOT appear in the snapshot.
373 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
374 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
376 # umount, destroy the snapshot, volume, and pool.
377 umount /tmp/${SNAP_NAME}1 || fail 14
378 rmdir /tmp/${SNAP_NAME}1 || fail 15
379 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
381 umount /tmp/${ZVOL_NAME}1 || fail 17
382 rmdir /tmp/${ZVOL_NAME}1 || fail 18
383 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
385 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
386 ${ZFS_SH} -u || fail 21
387 rm -f ${TMP_CACHE} || fail 22
391 run_test 6 "zvol+ext2 snapshot"
393 # ZVOL clone sanity check
397 local SNAP_NAME=pristine
398 local CLONE_NAME=clone
399 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
400 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
401 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
403 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
405 # Create a pool and volume.
406 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
407 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
408 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
410 # Partition the volume, for a 300M volume there will be
411 # 609 cylinders, 16 heads, and 63 sectors per track.
412 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
414 # Format the partition with ext2 (no journal).
415 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
417 # Mount the ext3 filesystem and copy some data to it.
418 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
419 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
421 # Snapshot the pristine ext2 filesystem and mount it read-only.
422 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
423 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
424 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
425 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
427 # Copy to original volume.
428 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
431 # Verify the copied files match the original files,
432 # and the copied files do NOT appear in the snapshot.
433 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
434 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
436 # Clone from the original pristine snapshot
437 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
438 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
439 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
440 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
442 # Verify the clone matches the pristine snapshot,
443 # and the files copied to the original volume are NOT there.
444 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
445 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
447 # Copy to cloned volume.
448 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
451 # Verify the clone matches the modified original volume.
452 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
454 # umount, destroy the snapshot, volume, and pool.
455 umount /tmp/${CLONE_NAME}1 || fail 21
456 rmdir /tmp/${CLONE_NAME}1 || fail 22
457 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
459 umount /tmp/${SNAP_NAME}1 || fail 24
460 rmdir /tmp/${SNAP_NAME}1 || fail 25
461 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
463 umount /tmp/${ZVOL_NAME}1 || fail 27
464 rmdir /tmp/${ZVOL_NAME}1 || fail 28
465 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
467 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
468 ${ZFS_SH} -u || fail 31
469 rm -f ${TMP_CACHE} || fail 32
473 run_test 7 "zvol+ext2 clone"
475 # Send/Receive sanity check
477 local POOL_NAME1=tank1
478 local POOL_NAME2=tank2
481 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
482 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
483 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
484 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
486 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
488 # Create two pools and a volume
489 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
490 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
491 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
492 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
494 # Partition the volume, for a 300M volume there will be
495 # 609 cylinders, 16 heads, and 63 sectors per track.
496 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
498 # Format the partition with ext2.
499 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
501 # Mount the ext3 filesystem and copy some data to it.
502 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
503 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
504 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
507 # Snapshot the ext3 filesystem so it may be sent.
508 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
509 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
511 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
512 (${ZFS} send ${FULL_SNAP_NAME1} | \
513 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
514 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
516 # Mount the sent ext3 filesystem.
517 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
518 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
520 # Verify the contents of the volumes match
521 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
522 &>/dev/null || fail 15
524 # Umount, destroy the volume and pool.
525 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
526 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
527 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
528 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
529 rmdir /tmp/${POOL_NAME1} || fail 20
530 rmdir /tmp/${POOL_NAME2} || fail 21
532 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
533 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
534 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
535 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
536 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
537 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
538 ${ZFS_SH} -u || fail 28
539 rm -f ${TMP_CACHE} || fail 29
543 run_test 8 "zfs send/receive"
545 # zpool event sanity check
549 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
550 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
551 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
553 # Create a pool and volume.
554 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
555 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
556 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
558 # Dump the events, there should be at least 5 lines.
559 ${ZPOOL} events >${TMP_EVENTS} || fail 4
560 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
561 [ $EVENTS -lt 5 ] && fail 5
563 # Clear the events and ensure there are none.
564 ${ZPOOL} events -c >/dev/null || fail 6
565 ${ZPOOL} events >${TMP_EVENTS} || fail 7
566 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
567 [ $EVENTS -gt 1 ] && fail 8
569 ${ZFS} destroy ${FULL_NAME} || fail 9
570 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
571 ${ZFS_SH} -u || fail 11
572 rm -f ${TMP_CACHE} || fail 12
573 rm -f ${TMP_EVENTS} || fail 13
577 run_test 9 "zpool events"
583 local TMP_FILE1=`mktemp`
584 local TMP_FILE2=`mktemp`
585 local TMP_FILE3=`mktemp`
587 BASE_DEVICE=`basename ${DEVICE}`
589 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
590 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
591 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
592 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
594 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
596 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
599 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
602 [ "${PARENT_VDEV}" = "logs" ] || return 1
606 if ! tail -1 ${TMP_FILE3} |
607 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
610 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
615 # zpool add and remove sanity check
618 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
619 local TMP_FILE1=`mktemp`
620 local TMP_FILE2=`mktemp`
622 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
627 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
628 (${RMMOD} scsi_debug || exit 1)
630 /sbin/modprobe scsi_debug dev_size_mb=128 ||
631 die "Error $? creating scsi_debug device"
634 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
635 BASE_SDDEVICE=`basename $SDDEVICE`
638 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
639 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
640 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
642 # Add and remove a cache vdev by full path
643 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
644 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
645 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
646 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
648 # Add and remove a cache vdev by shorthand path
649 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
650 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
651 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
652 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
654 # Add and remove a log vdev
655 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
656 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
657 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
658 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
660 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
661 ${ZFS_SH} -u || fail 17
662 ${RMMOD} scsi_debug || fail 18
664 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
668 run_test 10 "zpool add/remove vdev"