3 # ZFS/ZPOOL configuration test script.
5 basedir="$(dirname $0)"
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
32 while getopts 'hvc?' OPTION; do
51 if [ $(id -u) != 0 ]; then
52 die "Must run as root"
55 # Perform pre-cleanup is requested
56 if [ ${CLEANUP} ]; then
58 rm -f /tmp/zpool.cache.*
65 local TMP_FILE=`mktemp`
67 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
77 # Validate persistent zpool.cache configuration.
80 local TMP_FILE1=`mktemp`
81 local TMP_FILE2=`mktemp`
82 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
84 echo -n "test 1 - persistent zpool.cache: "
86 # Create a pool save its status for comparison.
87 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
88 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
89 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
91 # Unload/load the module stack and verify the pool persists.
92 ${ZFS_SH} -u || fail 4
93 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
94 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
95 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
97 # Cleanup the test pool and temporary files
98 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
99 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
100 ${ZFS_SH} -u || fail 10
106 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
108 local POOL_NAME=test2
109 local TMP_FILE1=`mktemp`
110 local TMP_FILE2=`mktemp`
111 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
113 echo -n "test 2 - scan disks for pools to import: "
115 # Create a pool save its status for comparison.
116 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
117 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
118 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
120 # Unload the module stack, remove the cache file, load the module
121 # stack and attempt to probe the disks to import the pool. As
122 # a cross check verify the old pool state against the imported.
123 ${ZFS_SH} -u || fail 4
124 rm -f ${TMP_CACHE} || fail 5
125 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
126 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
127 ${ZPOOL} import ${POOL_NAME} || fail 8
128 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
129 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
131 # Cleanup the test pool and temporary files
132 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
133 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
134 ${ZFS_SH} -u || fail 13
140 zconfig_zvol_device_stat() {
142 local POOL_NAME=/dev/$2
143 local ZVOL_NAME=/dev/$3
144 local SNAP_NAME=/dev/$4
145 local CLONE_NAME=/dev/$5
148 # Briefly delay for udev
152 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
154 # Volume and partitions
155 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
156 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
157 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
159 # Snapshot with partitions
160 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
161 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
162 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
164 # Clone with partitions
165 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
166 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
167 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
169 if [ $EXPECT -ne $COUNT ]; then
176 # zpool import/export device check
177 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
180 local ZVOL_NAME=volume
182 local CLONE_NAME=clone
183 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
184 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
185 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
186 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
188 echo -n "test 3 - zpool import/export device: "
190 # Create a pool, volume, partition, snapshot, and clone.
191 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
192 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
193 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
194 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
195 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
196 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
198 # Verify the devices were created
199 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
200 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
203 ${ZPOOL} export ${POOL_NAME} || fail 8
205 # verify the devices were removed
206 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
207 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
209 # Import the pool, wait 1 second for udev
210 ${ZPOOL} import ${POOL_NAME} || fail 10
212 # Verify the devices were created
213 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
214 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
216 # Destroy the pool and consequently the devices
217 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
219 # verify the devices were removed
220 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
221 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
223 ${ZFS_SH} -u || fail 14
224 rm -f ${TMP_CACHE} || fail 15
230 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
236 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
237 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
238 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
239 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
241 echo -n "test 4 - zpool insmod/rmmod device: "
243 # Create a pool, volume, snapshot, and clone
244 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
245 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
246 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
247 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
248 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
249 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
251 # Verify the devices were created
252 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
253 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
256 ${ZFS_SH} -u || fail 8
258 # Verify the devices were removed
259 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
260 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
262 # Load the modules, wait 1 second for udev
263 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
265 # Verify the devices were created
266 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
267 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
269 # Destroy the pool and consequently the devices
270 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
272 # Verify the devices were removed
273 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
274 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
276 ${ZFS_SH} -u || fail 14
277 rm -f ${TMP_CACHE} || fail 15
283 # ZVOL volume sanity check
287 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
289 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
291 echo -n "test 5 - zvol+ext3 volume: "
293 # Create a pool and volume.
294 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
295 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
296 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
298 # Partition the volume, for a 400M volume there will be
299 # 812 cylinders, 16 heads, and 63 sectors per track.
300 zconfig_partition /dev/${FULL_NAME} 0 812
302 # Format the partition with ext3.
303 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
305 # Mount the ext3 filesystem and copy some data to it.
306 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
307 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
308 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
311 # Verify the copied files match the original files.
312 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
314 # Remove the files, umount, destroy the volume and pool.
315 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
316 umount /tmp/${ZVOL_NAME}1 || fail 11
317 rmdir /tmp/${ZVOL_NAME}1 || fail 12
319 ${ZFS} destroy ${FULL_NAME} || fail 13
320 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
321 ${ZFS_SH} -u || fail 15
322 rm -f ${TMP_CACHE} || fail 16
328 # ZVOL snapshot sanity check
332 local SNAP_NAME=pristine
333 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
334 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
336 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
338 echo -n "test 6 - zvol+ext2 snapshot: "
340 # Create a pool and volume.
341 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
342 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
343 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
345 # Partition the volume, for a 400M volume there will be
346 # 812 cylinders, 16 heads, and 63 sectors per track.
347 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
349 # Format the partition with ext2 (no journal).
350 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
352 # Mount the ext3 filesystem and copy some data to it.
353 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
354 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
356 # Snapshot the pristine ext2 filesystem and mount it read-only.
357 ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
358 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
359 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
361 # Copy to original volume
362 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
365 # Verify the copied files match the original files,
366 # and the copied files do NOT appear in the snapshot.
367 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
368 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
370 # umount, destroy the snapshot, volume, and pool.
371 umount /tmp/${SNAP_NAME}1 || fail 14
372 rmdir /tmp/${SNAP_NAME}1 || fail 15
373 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
375 umount /tmp/${ZVOL_NAME}1 || fail 17
376 rmdir /tmp/${ZVOL_NAME}1 || fail 18
377 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
379 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
380 ${ZFS_SH} -u || fail 21
381 rm -f ${TMP_CACHE} || fail 22
387 # ZVOL clone sanity check
391 local SNAP_NAME=pristine
392 local CLONE_NAME=clone
393 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
394 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
395 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
397 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
399 echo -n "test 7 - zvol+ext2 clone: "
401 # Create a pool and volume.
402 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
403 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
404 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
406 # Partition the volume, for a 400M volume there will be
407 # 812 cylinders, 16 heads, and 63 sectors per track.
408 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
410 # Format the partition with ext2 (no journal).
411 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
413 # Mount the ext3 filesystem and copy some data to it.
414 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
415 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
417 # Snapshot the pristine ext2 filesystem and mount it read-only.
418 ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
419 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
420 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
422 # Copy to original volume.
423 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
426 # Verify the copied files match the original files,
427 # and the copied files do NOT appear in the snapshot.
428 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
429 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
431 # Clone from the original pristine snapshot
432 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} && sleep 1 || fail 14
433 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
434 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
436 # Verify the clone matches the pristine snapshot,
437 # and the files copied to the original volume are NOT there.
438 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
439 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
441 # Copy to cloned volume.
442 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
445 # Verify the clone matches the modified original volume.
446 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
448 # umount, destroy the snapshot, volume, and pool.
449 umount /tmp/${CLONE_NAME}1 || fail 21
450 rmdir /tmp/${CLONE_NAME}1 || fail 22
451 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
453 umount /tmp/${SNAP_NAME}1 || fail 24
454 rmdir /tmp/${SNAP_NAME}1 || fail 25
455 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
457 umount /tmp/${ZVOL_NAME}1 || fail 27
458 rmdir /tmp/${ZVOL_NAME}1 || fail 28
459 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
461 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
462 ${ZFS_SH} -u || fail 31
463 rm -f ${TMP_CACHE} || fail 32
469 # Send/Receive sanity check
471 local POOL_NAME1=tank1
472 local POOL_NAME2=tank2
475 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
476 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
477 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
478 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
480 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
482 # Create two pools and a volume
483 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
484 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
485 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
486 ${ZFS} create -V 400M ${FULL_ZVOL_NAME1} || fail 4
488 # Partition the volume, for a 400M volume there will be
489 # 812 cylinders, 16 heads, and 63 sectors per track.
490 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 812
492 # Format the partition with ext2.
493 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
495 # Mount the ext3 filesystem and copy some data to it.
496 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
497 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
498 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
501 # Snapshot the ext3 filesystem so it may be sent.
502 ${ZFS} snapshot ${FULL_SNAP_NAME1} && sleep 1 || fail 11
504 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
505 (${ZFS} send ${FULL_SNAP_NAME1} | \
506 ${ZFS} receive ${FULL_ZVOL_NAME2}) && sleep 1 || fail 12
508 # Mount the sent ext3 filesystem.
509 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
510 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
512 # Verify the contents of the volumes match
513 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
514 &>/dev/null || fail 15
516 # Umount, destroy the volume and pool.
517 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
518 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
519 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
520 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
521 rmdir /tmp/${POOL_NAME1} || fail 20
522 rmdir /tmp/${POOL_NAME2} || fail 21
524 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
525 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
526 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
527 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
528 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
529 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
530 ${ZFS_SH} -u || fail 28
531 rm -f ${TMP_CACHE} || fail 29
535 run_test 8 "zfs send/receive"
537 # zpool event sanity check
541 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
542 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
543 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
545 # Create a pool and volume.
546 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
547 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
548 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
550 # Dump the events, there should be at least 5 lines.
551 ${ZPOOL} events >${TMP_EVENTS} || fail 4
552 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
553 [ $EVENTS -lt 5 ] && fail 5
555 # Clear the events and ensure there are none.
556 ${ZPOOL} events -c >/dev/null || fail 6
557 ${ZPOOL} events >${TMP_EVENTS} || fail 7
558 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
559 [ $EVENTS -gt 1 ] && fail 8
561 ${ZFS} destroy ${FULL_NAME} || fail 9
562 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
563 ${ZFS_SH} -u || fail 11
564 rm -f ${TMP_CACHE} || fail 12
565 rm -f ${TMP_EVENTS} || fail 13
569 run_test 9 "zpool events"