3 # ZFS/ZPOOL configuration test script.
5 basedir="$(dirname $0)"
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
32 while getopts 'hvct:s:?' OPTION; do
57 if [ $(id -u) != 0 ]; then
58 die "Must run as root"
61 # Perform pre-cleanup is requested
62 if [ ${CLEANUP} ]; then
66 rm -f /tmp/zpool.cache.*
69 # Check if we need to skip the tests that require scsi_debug and lsscsi.
71 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
73 test -f ${LSSCSI} && HAVE_LSSCSI=1
74 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
75 echo "Skipping test 10 which requires the scsi_debug " \
76 "module and the ${LSSCSI} utility"
83 local TMP_FILE=`mktemp`
85 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
95 # Validate persistent zpool.cache configuration.
98 local TMP_FILE1=`mktemp`
99 local TMP_FILE2=`mktemp`
100 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
102 # Create a pool save its status for comparison.
103 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
104 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
105 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
107 # Unload/load the module stack and verify the pool persists.
108 ${ZFS_SH} -u || fail 4
109 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
110 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
111 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
113 # Cleanup the test pool and temporary files
114 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
115 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
116 ${ZFS_SH} -u || fail 10
120 run_test 1 "persistent zpool.cache"
122 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
124 local POOL_NAME=test2
125 local TMP_FILE1=`mktemp`
126 local TMP_FILE2=`mktemp`
127 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
129 # Create a pool save its status for comparison.
130 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
131 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
132 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
134 # Unload the module stack, remove the cache file, load the module
135 # stack and attempt to probe the disks to import the pool. As
136 # a cross check verify the old pool state against the imported.
137 ${ZFS_SH} -u || fail 4
138 rm -f ${TMP_CACHE} || fail 5
139 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
140 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
141 ${ZPOOL} import ${POOL_NAME} || fail 8
142 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
143 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
145 # Cleanup the test pool and temporary files
146 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
147 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
148 ${ZFS_SH} -u || fail 13
152 run_test 2 "scan disks for pools to import"
154 zconfig_zvol_device_stat() {
156 local POOL_NAME=/dev/$2
157 local ZVOL_NAME=/dev/$3
158 local SNAP_NAME=/dev/$4
159 local CLONE_NAME=/dev/$5
162 # Briefly delay for udev
166 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
168 # Volume and partitions
169 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
170 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
171 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
173 # Snapshot with partitions
174 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
175 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
176 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
178 # Clone with partitions
179 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
180 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
181 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
183 if [ $EXPECT -ne $COUNT ]; then
190 # zpool import/export device check
191 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
194 local ZVOL_NAME=volume
196 local CLONE_NAME=clone
197 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
198 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
199 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
200 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
202 # Create a pool, volume, partition, snapshot, and clone.
203 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
204 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
205 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
206 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
207 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
208 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
210 # Verify the devices were created
211 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
212 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
215 ${ZPOOL} export ${POOL_NAME} || fail 8
217 # verify the devices were removed
218 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
219 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
221 # Import the pool, wait 1 second for udev
222 ${ZPOOL} import ${POOL_NAME} || fail 10
224 # Verify the devices were created
225 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
226 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
228 # Destroy the pool and consequently the devices
229 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
231 # verify the devices were removed
232 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
233 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
235 ${ZFS_SH} -u || fail 14
236 rm -f ${TMP_CACHE} || fail 15
240 run_test 3 "zpool import/export device"
242 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
248 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
249 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
250 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
251 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
253 # Create a pool, volume, snapshot, and clone
254 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
255 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
256 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
257 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
258 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
259 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
261 # Verify the devices were created
262 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
263 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
266 ${ZFS_SH} -u || fail 8
268 # Verify the devices were removed
269 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
270 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
272 # Load the modules, wait 1 second for udev
273 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
275 # Verify the devices were created
276 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
277 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
279 # Destroy the pool and consequently the devices
280 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
282 # Verify the devices were removed
283 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
284 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
286 ${ZFS_SH} -u || fail 14
287 rm -f ${TMP_CACHE} || fail 15
291 run_test 4 "zpool insmod/rmmod device"
293 # ZVOL volume sanity check
297 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
299 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
301 # Create a pool and volume.
302 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
303 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
304 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
306 # Partition the volume, for a 400M volume there will be
307 # 812 cylinders, 16 heads, and 63 sectors per track.
308 zconfig_partition /dev/${FULL_NAME} 0 812
310 # Format the partition with ext3.
311 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
313 # Mount the ext3 filesystem and copy some data to it.
314 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
315 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
316 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
319 # Verify the copied files match the original files.
320 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
322 # Remove the files, umount, destroy the volume and pool.
323 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
324 umount /tmp/${ZVOL_NAME}1 || fail 11
325 rmdir /tmp/${ZVOL_NAME}1 || fail 12
327 ${ZFS} destroy ${FULL_NAME} || fail 13
328 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
329 ${ZFS_SH} -u || fail 15
330 rm -f ${TMP_CACHE} || fail 16
334 run_test 5 "zvol+ext3 volume"
336 # ZVOL snapshot sanity check
340 local SNAP_NAME=pristine
341 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
342 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
344 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
346 # Create a pool and volume.
347 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
348 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
349 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
351 # Partition the volume, for a 400M volume there will be
352 # 812 cylinders, 16 heads, and 63 sectors per track.
353 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
355 # Format the partition with ext2 (no journal).
356 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
358 # Mount the ext3 filesystem and copy some data to it.
359 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
360 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
362 # Snapshot the pristine ext2 filesystem and mount it read-only.
363 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
364 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
365 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
366 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
368 # Copy to original volume
369 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
372 # Verify the copied files match the original files,
373 # and the copied files do NOT appear in the snapshot.
374 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
375 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
377 # umount, destroy the snapshot, volume, and pool.
378 umount /tmp/${SNAP_NAME}1 || fail 14
379 rmdir /tmp/${SNAP_NAME}1 || fail 15
380 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
382 umount /tmp/${ZVOL_NAME}1 || fail 17
383 rmdir /tmp/${ZVOL_NAME}1 || fail 18
384 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
386 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
387 ${ZFS_SH} -u || fail 21
388 rm -f ${TMP_CACHE} || fail 22
392 run_test 6 "zvol+ext2 snapshot"
394 # ZVOL clone sanity check
398 local SNAP_NAME=pristine
399 local CLONE_NAME=clone
400 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
401 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
402 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
404 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
406 # Create a pool and volume.
407 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
408 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
409 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
411 # Partition the volume, for a 300M volume there will be
412 # 609 cylinders, 16 heads, and 63 sectors per track.
413 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
415 # Format the partition with ext2 (no journal).
416 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
418 # Mount the ext3 filesystem and copy some data to it.
419 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
420 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
422 # Snapshot the pristine ext2 filesystem and mount it read-only.
423 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
424 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
425 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
426 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
428 # Copy to original volume.
429 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
432 # Verify the copied files match the original files,
433 # and the copied files do NOT appear in the snapshot.
434 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
435 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
437 # Clone from the original pristine snapshot
438 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
439 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
440 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
441 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
443 # Verify the clone matches the pristine snapshot,
444 # and the files copied to the original volume are NOT there.
445 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
446 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
448 # Copy to cloned volume.
449 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
452 # Verify the clone matches the modified original volume.
453 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
455 # umount, destroy the snapshot, volume, and pool.
456 umount /tmp/${CLONE_NAME}1 || fail 21
457 rmdir /tmp/${CLONE_NAME}1 || fail 22
458 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
460 umount /tmp/${SNAP_NAME}1 || fail 24
461 rmdir /tmp/${SNAP_NAME}1 || fail 25
462 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
464 umount /tmp/${ZVOL_NAME}1 || fail 27
465 rmdir /tmp/${ZVOL_NAME}1 || fail 28
466 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
468 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
469 ${ZFS_SH} -u || fail 31
470 rm -f ${TMP_CACHE} || fail 32
474 run_test 7 "zvol+ext2 clone"
476 # Send/Receive sanity check
478 local POOL_NAME1=tank1
479 local POOL_NAME2=tank2
482 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
483 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
484 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
485 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
487 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
489 # Create two pools and a volume
490 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
491 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
492 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
493 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
495 # Partition the volume, for a 300M volume there will be
496 # 609 cylinders, 16 heads, and 63 sectors per track.
497 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
499 # Format the partition with ext2.
500 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
502 # Mount the ext3 filesystem and copy some data to it.
503 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
504 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
505 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
508 # Snapshot the ext3 filesystem so it may be sent.
509 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
510 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
512 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
513 (${ZFS} send ${FULL_SNAP_NAME1} | \
514 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
515 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
517 # Mount the sent ext3 filesystem.
518 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
519 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
521 # Verify the contents of the volumes match
522 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
523 &>/dev/null || fail 15
525 # Umount, destroy the volume and pool.
526 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
527 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
528 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
529 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
530 rmdir /tmp/${POOL_NAME1} || fail 20
531 rmdir /tmp/${POOL_NAME2} || fail 21
533 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
534 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
535 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
536 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
537 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
538 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
539 ${ZFS_SH} -u || fail 28
540 rm -f ${TMP_CACHE} || fail 29
544 run_test 8 "zfs send/receive"
546 # zpool event sanity check
550 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
551 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
552 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
554 # Create a pool and volume.
555 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
556 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
557 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
559 # Dump the events, there should be at least 5 lines.
560 ${ZPOOL} events >${TMP_EVENTS} || fail 4
561 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
562 [ $EVENTS -lt 5 ] && fail 5
564 # Clear the events and ensure there are none.
565 ${ZPOOL} events -c >/dev/null || fail 6
566 ${ZPOOL} events >${TMP_EVENTS} || fail 7
567 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
568 [ $EVENTS -gt 1 ] && fail 8
570 ${ZFS} destroy ${FULL_NAME} || fail 9
571 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
572 ${ZFS_SH} -u || fail 11
573 rm -f ${TMP_CACHE} || fail 12
574 rm -f ${TMP_EVENTS} || fail 13
578 run_test 9 "zpool events"
584 local TMP_FILE1=`mktemp`
585 local TMP_FILE2=`mktemp`
586 local TMP_FILE3=`mktemp`
588 BASE_DEVICE=`basename ${DEVICE}`
590 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
591 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
592 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
593 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
595 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
597 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
600 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
603 [ "${PARENT_VDEV}" = "logs" ] || return 1
607 if ! tail -1 ${TMP_FILE3} |
608 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
611 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
616 # zpool add and remove sanity check
619 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
620 local TMP_FILE1=`mktemp`
621 local TMP_FILE2=`mktemp`
623 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
628 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
629 (${RMMOD} scsi_debug || exit 1)
631 /sbin/modprobe scsi_debug dev_size_mb=128 ||
632 die "Error $? creating scsi_debug device"
635 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
636 BASE_SDDEVICE=`basename $SDDEVICE`
639 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
640 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
641 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
643 # Add and remove a cache vdev by full path
644 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
645 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
646 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
647 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
649 # Add and remove a cache vdev by shorthand path
650 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
651 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
652 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
653 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
655 # Add and remove a log vdev
656 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
657 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
658 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
659 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
661 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
662 ${ZFS_SH} -u || fail 17
663 ${RMMOD} scsi_debug || fail 18
665 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
669 run_test 10 "zpool add/remove vdev"