# zfs allow can deal with invalid arguments.(Invalid options or combination)
#
# STRATEGY:
-# 1. Verify invalid argumets will cause error.
+# 1. Verify invalid arguments will cause error.
# 2. Verify non-optional argument was missing will cause error.
# 3. Verify invalid options cause error.
#
typeset -i timeout=0
while true; do
if ((timeout == $MAXTIMEOUT)); then
- log_fail "Timeout occured"
+ log_fail "Timeout occurred"
fi
((timeout++))
done
# Rinse and repeat, this time faulting both devices at the same time
-# NOTE: "raidz" is exluded since it cannot survive 2 faulted devices
+# NOTE: "raidz" is excluded since it cannot survive 2 faulted devices
# NOTE: "mirror" is a 4-way mirror here and should survive this test
for type in "mirror" "raidz2" "raidz3"; do
# 1. Create a pool with two hot spares
# zpool iostat
#
# STRATEGY:
-# 1. Create a test pool.
+# 1. Create a test pool
# 2. Separately invoke zpool list|status|iostat
-# 3. Verify they was not recored in pool history.
+# 3. Verify they were not recorded in pool history
#
verify_runnable "global"
# STRATEGY:
# 1. Create a test pool.
# 2. Separately invoke zfs list|get|holds|mount|unmount|share|unshare|send
-# 3. Verify they were not recored in pool history.
+# 3. Verify they were not recorded in pool history.
#
verify_runnable "global"
TZ=$TIMEZONE zpool history $migratedpoolname | grep -v "^$" \
>$migrated_cmds_f
RET=$?
- (( $RET != 0 )) && log_fail "zpool histroy $migratedpoolname fails."
+ (( $RET != 0 )) && log_fail "zpool history $migratedpoolname fails."
# The migrated history file should differ with original history file on
# two commands -- 'export' and 'import', which are included in migrated
#
# Here, we determine three things:
- # - Whether we're operating on a set or an indivdual permission (which
+ # - Whether we're operating on a set or an individual permission (which
# dictates the case of the first character in the code)
# - The name of the dataset we're operating on.
# - Whether the operation applies locally or to descendent datasets (or
[[ -z "$dumpdev" ]] && log_untested "No dump device has been configured"
[[ "$dumpdev" != "$diskslice" ]] && \
- log_untested "Dump device has not been been configured to $diskslice"
+ log_untested "Dump device has not been configured to $diskslice"
log_note "Attempt to zpool the dump device"
unset NOINUSE_CHECK
function cleanup
{
#
- # Essentailly this is the default_cleanup routine but I cannot get it
- # to work correctly. So its reproduced below. Still need to full
+ # Essentially this is the default_cleanup routine but I cannot get it
+ # to work correctly. So its reproduced below. Still need to fully
# understand why default_cleanup does not work correctly from here.
#
log_must zfs umount $TESTPOOL/$TESTFS
echo "y" | newfs -v $t > /dev/null 2>&1
(( $? !=0 )) && \
log_fail "newfs over exported pool " \
- "failes unexpected."
+ "fails unexpectedly."
done
return 0
# STRATEGY:
# 1. largest_file will write to a file and increase its size
# to the maximum allowable.
-# 2. The last byte of the file should be accessbile without error.
+# 2. The last byte of the file should be accessible without error.
# 3. Writing beyond the maximum file size generates an 'errno' of
# EFBIG.
#
#
# DESCRIPTION:
-# Writing to a file and mmaping that file at the
+# Writing to a file and mmapping that file at the
# same time does not result in a deadlock.
#
# STRATEGY:
log_note "Uberblock changed $UBER_CHANGES times"
if [ $UBER_CHANGES -lt $MIN_UB_WRITES ]; then
- log_fail "Fewer uberblock writes occured than expected ($EXPECTED)"
+ log_fail "Fewer uberblock writes occurred than expected ($EXPECTED)"
fi
if [ $UBER_CHANGES -gt $MAX_UB_WRITES ]; then
- log_fail "More uberblock writes occured than expected ($EXPECTED)"
+ log_fail "More uberblock writes occurred than expected ($EXPECTED)"
fi
log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
# Ensure the pool root filesystem shows in df output.
# If the pool was full (available == 0) and the pool
-# root filesytem had very little in it (used < 1 block),
+# root filesystem had very little in it (used < 1 block),
# the size reported to df was zero (issue #8253) and
# df skipped the filesystem in its output.
log_must eval "df -h | grep $TESTPOOL"
#
# Strategy:
# 1. Create an origin fs with compression and sha256.
-# 2. Clone origin such that it inherits the properies.
+# 2. Clone origin such that it inherits the properties.
# 3. Use dd with the sync flag to test the sync write path.
#