3 # ZFS/ZPOOL configuration test script.
5 basedir="$(dirname $0)"
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
32 while getopts 'hvct:s:?' OPTION; do
57 if [ $(id -u) != 0 ]; then
58 die "Must run as root"
61 # Perform pre-cleanup is requested
62 if [ ${CLEANUP} ]; then
65 rm -f /tmp/zpool.cache.*
72 local TMP_FILE=`mktemp`
74 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
84 # Validate persistent zpool.cache configuration.
87 local TMP_FILE1=`mktemp`
88 local TMP_FILE2=`mktemp`
89 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
91 # Create a pool save its status for comparison.
92 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
93 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
94 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
96 # Unload/load the module stack and verify the pool persists.
97 ${ZFS_SH} -u || fail 4
98 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
99 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
100 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
102 # Cleanup the test pool and temporary files
103 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
104 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
105 ${ZFS_SH} -u || fail 10
109 run_test 1 "persistent zpool.cache"
111 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
113 local POOL_NAME=test2
114 local TMP_FILE1=`mktemp`
115 local TMP_FILE2=`mktemp`
116 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
118 # Create a pool save its status for comparison.
119 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
120 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
121 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
123 # Unload the module stack, remove the cache file, load the module
124 # stack and attempt to probe the disks to import the pool. As
125 # a cross check verify the old pool state against the imported.
126 ${ZFS_SH} -u || fail 4
127 rm -f ${TMP_CACHE} || fail 5
128 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
129 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
130 ${ZPOOL} import ${POOL_NAME} || fail 8
131 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
132 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
134 # Cleanup the test pool and temporary files
135 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
136 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
137 ${ZFS_SH} -u || fail 13
141 run_test 2 "scan disks for pools to import"
143 zconfig_zvol_device_stat() {
145 local POOL_NAME=/dev/$2
146 local ZVOL_NAME=/dev/$3
147 local SNAP_NAME=/dev/$4
148 local CLONE_NAME=/dev/$5
151 # Briefly delay for udev
155 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
157 # Volume and partitions
158 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
159 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
160 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
162 # Snapshot with partitions
163 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
164 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
165 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
167 # Clone with partitions
168 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
169 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
170 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
172 if [ $EXPECT -ne $COUNT ]; then
179 # zpool import/export device check
180 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
183 local ZVOL_NAME=volume
185 local CLONE_NAME=clone
186 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
187 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
188 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
189 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
191 # Create a pool, volume, partition, snapshot, and clone.
192 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
193 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
194 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
195 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
196 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
197 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
199 # Verify the devices were created
200 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
201 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
204 ${ZPOOL} export ${POOL_NAME} || fail 8
206 # verify the devices were removed
207 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
208 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
210 # Import the pool, wait 1 second for udev
211 ${ZPOOL} import ${POOL_NAME} || fail 10
213 # Verify the devices were created
214 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
215 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
217 # Destroy the pool and consequently the devices
218 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
220 # verify the devices were removed
221 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
222 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
224 ${ZFS_SH} -u || fail 14
225 rm -f ${TMP_CACHE} || fail 15
229 run_test 3 "zpool import/export device"
231 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
237 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
238 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
239 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
240 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
242 # Create a pool, volume, snapshot, and clone
243 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
244 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
245 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
246 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
247 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
248 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
250 # Verify the devices were created
251 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
252 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
255 ${ZFS_SH} -u || fail 8
257 # Verify the devices were removed
258 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
259 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
261 # Load the modules, wait 1 second for udev
262 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
264 # Verify the devices were created
265 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
266 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
268 # Destroy the pool and consequently the devices
269 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
271 # Verify the devices were removed
272 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
273 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
275 ${ZFS_SH} -u || fail 14
276 rm -f ${TMP_CACHE} || fail 15
280 run_test 4 "zpool insmod/rmmod device"
282 # ZVOL volume sanity check
286 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
288 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
290 # Create a pool and volume.
291 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
292 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
293 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
295 # Partition the volume, for a 400M volume there will be
296 # 812 cylinders, 16 heads, and 63 sectors per track.
297 zconfig_partition /dev/${FULL_NAME} 0 812
299 # Format the partition with ext3.
300 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
302 # Mount the ext3 filesystem and copy some data to it.
303 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
304 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
305 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
308 # Verify the copied files match the original files.
309 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
311 # Remove the files, umount, destroy the volume and pool.
312 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
313 umount /tmp/${ZVOL_NAME}1 || fail 11
314 rmdir /tmp/${ZVOL_NAME}1 || fail 12
316 ${ZFS} destroy ${FULL_NAME} || fail 13
317 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
318 ${ZFS_SH} -u || fail 15
319 rm -f ${TMP_CACHE} || fail 16
323 run_test 5 "zvol+ext3 volume"
325 # ZVOL snapshot sanity check
329 local SNAP_NAME=pristine
330 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
331 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
333 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
335 # Create a pool and volume.
336 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
337 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
338 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
340 # Partition the volume, for a 400M volume there will be
341 # 812 cylinders, 16 heads, and 63 sectors per track.
342 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
344 # Format the partition with ext2 (no journal).
345 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
347 # Mount the ext3 filesystem and copy some data to it.
348 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
349 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
351 # Snapshot the pristine ext2 filesystem and mount it read-only.
352 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
353 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
354 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
355 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
357 # Copy to original volume
358 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
361 # Verify the copied files match the original files,
362 # and the copied files do NOT appear in the snapshot.
363 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
364 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
366 # umount, destroy the snapshot, volume, and pool.
367 umount /tmp/${SNAP_NAME}1 || fail 14
368 rmdir /tmp/${SNAP_NAME}1 || fail 15
369 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
371 umount /tmp/${ZVOL_NAME}1 || fail 17
372 rmdir /tmp/${ZVOL_NAME}1 || fail 18
373 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
375 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
376 ${ZFS_SH} -u || fail 21
377 rm -f ${TMP_CACHE} || fail 22
381 run_test 6 "zvol+ext2 snapshot"
383 # ZVOL clone sanity check
387 local SNAP_NAME=pristine
388 local CLONE_NAME=clone
389 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
390 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
391 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
393 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
395 # Create a pool and volume.
396 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
397 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
398 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
400 # Partition the volume, for a 300M volume there will be
401 # 609 cylinders, 16 heads, and 63 sectors per track.
402 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
404 # Format the partition with ext2 (no journal).
405 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
407 # Mount the ext3 filesystem and copy some data to it.
408 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
409 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
411 # Snapshot the pristine ext2 filesystem and mount it read-only.
412 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
413 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
414 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
415 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
417 # Copy to original volume.
418 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
421 # Verify the copied files match the original files,
422 # and the copied files do NOT appear in the snapshot.
423 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
424 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
426 # Clone from the original pristine snapshot
427 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
428 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
429 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
430 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
432 # Verify the clone matches the pristine snapshot,
433 # and the files copied to the original volume are NOT there.
434 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
435 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
437 # Copy to cloned volume.
438 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
441 # Verify the clone matches the modified original volume.
442 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
444 # umount, destroy the snapshot, volume, and pool.
445 umount /tmp/${CLONE_NAME}1 || fail 21
446 rmdir /tmp/${CLONE_NAME}1 || fail 22
447 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
449 umount /tmp/${SNAP_NAME}1 || fail 24
450 rmdir /tmp/${SNAP_NAME}1 || fail 25
451 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
453 umount /tmp/${ZVOL_NAME}1 || fail 27
454 rmdir /tmp/${ZVOL_NAME}1 || fail 28
455 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
457 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
458 ${ZFS_SH} -u || fail 31
459 rm -f ${TMP_CACHE} || fail 32
463 run_test 7 "zvol+ext2 clone"
465 # Send/Receive sanity check
467 local POOL_NAME1=tank1
468 local POOL_NAME2=tank2
471 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
472 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
473 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
474 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
476 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
478 # Create two pools and a volume
479 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
480 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
481 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
482 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
484 # Partition the volume, for a 300M volume there will be
485 # 609 cylinders, 16 heads, and 63 sectors per track.
486 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
488 # Format the partition with ext2.
489 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
491 # Mount the ext3 filesystem and copy some data to it.
492 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
493 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
494 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
497 # Snapshot the ext3 filesystem so it may be sent.
498 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
499 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
501 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
502 (${ZFS} send ${FULL_SNAP_NAME1} | \
503 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
504 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
506 # Mount the sent ext3 filesystem.
507 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
508 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
510 # Verify the contents of the volumes match
511 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
512 &>/dev/null || fail 15
514 # Umount, destroy the volume and pool.
515 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
516 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
517 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
518 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
519 rmdir /tmp/${POOL_NAME1} || fail 20
520 rmdir /tmp/${POOL_NAME2} || fail 21
522 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
523 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
524 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
525 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
526 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
527 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
528 ${ZFS_SH} -u || fail 28
529 rm -f ${TMP_CACHE} || fail 29
533 run_test 8 "zfs send/receive"
535 # zpool event sanity check
539 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
540 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
541 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
543 # Create a pool and volume.
544 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
545 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
546 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
548 # Dump the events, there should be at least 5 lines.
549 ${ZPOOL} events >${TMP_EVENTS} || fail 4
550 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
551 [ $EVENTS -lt 5 ] && fail 5
553 # Clear the events and ensure there are none.
554 ${ZPOOL} events -c >/dev/null || fail 6
555 ${ZPOOL} events >${TMP_EVENTS} || fail 7
556 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
557 [ $EVENTS -gt 1 ] && fail 8
559 ${ZFS} destroy ${FULL_NAME} || fail 9
560 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
561 ${ZFS_SH} -u || fail 11
562 rm -f ${TMP_CACHE} || fail 12
563 rm -f ${TMP_EVENTS} || fail 13
567 run_test 9 "zpool events"