case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
- node->un_mountp, flags) != 0)
+ node->un_zhp->zfs_name, flags) != 0)
ret = 1;
break;
}
tests/zfs-tests/tests/functional/clean_mirror/Makefile
tests/zfs-tests/tests/functional/cli_root/Makefile
tests/zfs-tests/tests/functional/cli_root/zdb/Makefile
+ tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_change-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile
if (hdl->libzfs_sharetab == NULL)
return (SHARED_NOT_SHARED);
+ /* Reopen ZFS_SHARETAB to prevent reading stale data from open file */
+ if (freopen(ZFS_SHARETAB, "r", hdl->libzfs_sharetab) == NULL)
+ return (SHARED_NOT_SHARED);
+
(void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
* then get freed later. We strdup it to play it safe.
*/
if (mountpoint == NULL)
- mntpt = zfs_strdup(hdl, entry.mnt_mountp);
+ mntpt = zfs_strdup(hdl, entry.mnt_special);
else
mntpt = zfs_strdup(hdl, mountpoint);
#
# By default preserve any existing pools
+# NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from
+# space-delimited to newline-delimited.
#
if [ -z "${KEEP}" ]; then
- KEEP=$(sudo "$ZPOOL" list -H -o name)
+ KEEP="$(sudo "$ZPOOL" list -H -o name)"
if [ -z "${KEEP}" ]; then
KEEP="rpool"
fi
+else
+ KEEP="$(echo -e "${KEEP//[[:blank:]]/\n}")"
fi
-__ZFS_POOL_EXCLUDE="$(echo $KEEP | sed ':a;N;s/\n/ /g;ba')"
+#
+# NOTE: The following environment variables are undocumented
+# and should be used for testing purposes only:
+#
+# __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists
+# __ZFS_POOL_RESTRICT - iterate only over the pools it lists
+#
+# See libzfs/libzfs_config.c for more information.
+#
+__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')"
. "$STF_SUITE/include/default.cfg"
export STF_SUITE
export STF_PATH
export DISKS
+export FILEDIR
export KEEP
export __ZFS_POOL_EXCLUDE
export TESTFAIL_CALLBACKS
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg']
+[tests/functional/cli_root/zfs_bookmark]
+tests = ['zfs_bookmark_cliargs']
+
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
- 'zpool_scrub_encrypted_unloaded']
+ 'zpool_scrub_encrypted_unloaded', 'zpool_scrub_offline_device']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg']
-pre =
-post =
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos',
# pattern to ignore from 'zfs list'.
export NO_DATASETS="no datasets available"
-export TEST_BASE_DIR="/var/tmp"
+# Default directory used for test files
+# NOTE: remove trailing "/", some functions rely on this to do pattern matching
+export TEST_BASE_DIR="$(dirname ${FILEDIR:-/var/tmp}/.)"
# Default to compression ON
export COMPRESSION_PROP=on
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
- log_must zpool create -f $TESTPOOL raidz $1 $2 $3
+ log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_pass
}
+#
+# Utility function used to list all available pool names.
+#
+# NOTE: $KEEP is a variable containing pool names, separated by a newline
+# character, that must be excluded from the returned list.
+#
+function get_all_pools
+{
+ zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
+}
+
function default_cleanup_noexit
{
- typeset exclude=""
typeset pool=""
#
# Destroying the pool will also destroy any
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
- exclude=`eval echo \"'(${KEEP})'\"`
- ALL_POOLS=$(zpool list -H -o name \
- | grep -v "$NO_POOLS" | egrep -v "$exclude")
+ ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
then
destroy_pool $pool
fi
- ALL_POOLS=$(zpool list -H -o name \
- | grep -v "$NO_POOLS" \
- | egrep -v "$exclude")
+ ALL_POOLS=$(get_all_pools)
done
done
# If current system support slog, add slog device for pool
#
if verify_slog_support ; then
- typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
+ typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
log_must mkfile $MINVDEVSIZE $sdevs
log_must zpool add $pool_name log mirror $sdevs
fi
#
function verify_slog_support
{
- typeset dir=/tmp/disk.$$
+ typeset dir=$TEST_BASE_DIR/disk.$$
typeset pool=foo.$$
typeset vdev=$dir/a
typeset sdev=$dir/b
if poolexists $POOL ; then
log_must zpool destroy $POOL
fi
- rm /bootfs_003.$$.dat
+ rm $TESTDIR/bootfs_003.$$.dat
}
if poolexists $POOL; then
log_must zpool destroy $POOL
fi
- rm /bootfs_004.$$.dat
+ rm $TESTDIR/bootfs_004.$$.dat
}
((ret |= $?))
typeset mntpnt=$(get_prop mountpoint $pool)
- dd if=/dev/random of=$mntpnt/testfile.$$ &
+ dd if=/dev/urandom of=$mntpnt/testfile.$$ &
typeset pid=$!
zpool iostat -v 1 3 > /dev/null
((ret |= $?))
kill -9 $pid
+ wait
return $ret
}
SUBDIRS = \
zdb \
zfs \
+ zfs_bookmark \
zfs_change-key \
zfs_clone \
zfs_copies \
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+ rm -f $TEMPFILE
}
verify_runnable "global"
--- /dev/null
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zfs_bookmark
+dist_pkgdata_SCRIPTS = \
+ setup.ksh \
+ cleanup.ksh \
+ zfs_bookmark_cliargs.ksh
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+DISK=${DISKS%% *}
+
+default_volume_setup $DISK
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# 'zfs bookmark' should work with both full and short arguments.
+#
+# STRATEGY:
+# 1. Create initial snapshot
+# 2. Verify we can create a bookmark specifying snapshot and bookmark full paths
+# 3. Verify we can create a bookmark specifying the snapshot name
+# 4. Verify we can create a bookmark specifying the bookmark name
+#
+
+verify_runnable "both"
+
+function cleanup
+{
+ if snapexists "$DATASET@$TESTSNAP"; then
+ log_must zfs destroy "$DATASET@$TESTSNAP"
+ fi
+ if bkmarkexists "$DATASET#$TESTBM"; then
+ log_must zfs destroy "$DATASET#$TESTBM"
+ fi
+}
+
+log_assert "'zfs bookmark' works as expected when passed valid arguments."
+log_onexit cleanup
+
+DATASET="$TESTPOOL/$TESTFS"
+TESTSNAP='snapshot'
+TESTBM='bookmark'
+
+# Create initial snapshot
+log_must zfs snapshot "$DATASET@$TESTSNAP"
+
+# Verify we can create a bookmark specifying snapshot and bookmark full paths
+log_must zfs bookmark "$DATASET@$TESTSNAP" "$DATASET#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+# Verify we can create a bookmark specifying the snapshot name
+log_must zfs bookmark "@$TESTSNAP" "$DATASET#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+# Verify we can create a bookmark specifying the bookmark name
+log_must zfs bookmark "$DATASET@$TESTSNAP" "#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+log_pass "'zfs bookmark' works as expected when passed valid arguments."
verify_runnable "global"
-# See issue: https://github.com/zfsonlinux/zfs/issues/6145
-if is_linux; then
- log_unsupported "Test case occasionally fails"
-fi
-
function cleanup
{
if poolexists $ZPOOL_VERSION_1_NAME; then
destroy_pool $ZPOOL_VERSION_1_NAME
fi
- if [[ -f $TESTDIR/$ZPOOL_VERSION_1_FILES ]]; then
- rm -f $TESTDIR/$ZPOOL_VERSION_1_FILES
+ if [[ -f $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES ]]; then
+ rm -f $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES
+ fi
+
+ if [[ -f $TEST_BASE_DIR/${ZPOOL_VERSION_1_FILES%.*} ]]; then
+ rm -f $TEST_BASE_DIR/${ZPOOL_VERSION_1_FILES%.*}
fi
}
log_assert "Verify that copies cannot be set with pool version 1"
log_onexit cleanup
-cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TESTDIR
-bunzip2 $TESTDIR/$ZPOOL_VERSION_1_FILES
-log_must zpool import -d $TESTDIR $ZPOOL_VERSION_1_NAME
+log_must cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TEST_BASE_DIR
+log_must bunzip2 $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES
+log_must zpool import -d $TEST_BASE_DIR $ZPOOL_VERSION_1_NAME
log_must zfs create $ZPOOL_VERSION_1_NAME/$TESTFS
log_must zfs create -V 1m $ZPOOL_VERSION_1_NAME/$TESTVOL
block_device_wait
log_must snapexists $TESTPOOL/$TESTFS1@snap3
log_note "zfs destroy for snapshots from different pools"
-VIRTUAL_DISK=/var/tmp/disk
+VIRTUAL_DISK=$TEST_BASE_DIR/disk
log_must mkfile $MINVDEVSIZE $VIRTUAL_DISK
log_must zpool create $TESTPOOL2 $VIRTUAL_DISK
log_must poolexists $TESTPOOL2
function cleanup
{
- log_must zpool export $TESTPOOL
+ log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
snapexists $TESTSNAP && log_must zfs destroy $TESTSNAP
[[ -d $MNTPSNAP ]] && log_must rmdir $MNTPSNAP
function cleanup
{
zfs destroy -Rf $TESTPOOL/$TESTFS/base
- rm /tmp/zr010p*
+ rm $TESTDIR/zr010p*
}
log_assert "zfs receive of full send as clone should work"
log_must zfs snapshot $fs@s1
log_must zfs snapshot $fs2@s1
-log_must zfs send $fs@s1 > /tmp/zr010p
-log_must zfs send $fs2@s1 > /tmp/zr010p2
+log_must zfs send $fs@s1 > $TESTDIR/zr010p
+log_must zfs send $fs2@s1 > $TESTDIR/zr010p2
#
# Test that, when we receive a full send as a clone of itself,
# nop-write saves us all the space used by data blocks.
#
-cat /tmp/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
+cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
size=$(get_prop used $rfs)
size2=$(get_prop used $fs)
if [[ $size -ge $(($size2 / 10)) ]] then
log_must zfs destroy -fr $rfs
# Correctness testing: receive each full send as a clone of the other fiesystem.
-cat /tmp/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
+cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
mntpnt_old=$(get_prop mountpoint $fs)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must diff -r $mntpnt_old $mntpnt_new
log_must zfs destroy -r $rfs
-cat /tmp/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
+cat $TESTDIR/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
mntpnt_old=$(get_prop mountpoint $fs2)
mntpnt_new=$(get_prop mountpoint $rfs)
log_must diff -r $mntpnt_old $mntpnt_new
dest=$TESTPOOL/$TESTFS2
typeset userprop=$(valid_user_property 8)
typeset userval=$(user_property_value 8)
-typeset streamfile_full=/var/tmp/streamfile_full.$$
-typeset streamfile_incr=/var/tmp/streamfile_incr.$$
-typeset streamfile_trun=/var/tmp/streamfile_trun.$$
+typeset streamfile_full=$TESTDIR/streamfile_full.$$
+typeset streamfile_incr=$TESTDIR/streamfile_incr.$$
+typeset streamfile_trun=$TESTDIR/streamfile_trun.$$
function cleanup
{
# Generate random data
#
BS=512 ; CNT=2048
-SRC_FILE=/tmp/srcfile.$$
-DST_FILE=/tmp/dstfile.$$
+SRC_FILE=$TESTDIR/srcfile.$$
+DST_FILE=$TESTDIR/dstfile.$$
log_must dd if=/dev/urandom of=$SRC_FILE bs=$BS count=$CNT
fs=$TESTPOOL/$TESTFS/fs.$$
function cleanup
{
- typeset snaps=$(zfs list -H -t snapshot -o name)
- typeset exclude
- typeset snap
- typeset pool_name
-
- if [[ -n $KEEP ]]; then
- exclude=`eval echo \"'(${KEEP})'\"`
- fi
-
- for snap in $snaps; do
- pool_name=$(echo "$snap" | awk -F/ '{print $1}')
- if [[ -n $exclude ]]; then
- echo "$pool_name" | egrep -v "$exclude" > /dev/null 2>&1
- if [[ $? -eq 0 ]]; then
- log_must zfs destroy $snap
- fi
- else
+ for poolname in $(get_all_pools); do
+ for snap in $(zfs list -H -t snapshot -o name -r $poolname); do
log_must zfs destroy $snap
- fi
+ done
done
}
verify_runnable "both"
-# See issue: https://github.com/zfsonlinux/zfs/issues/6421
-if is_linux; then
- log_unsupported "Test often runs for longer than 10 minutes."
-fi
-
function cleanup
{
zfs destroy -rf $TESTPOOL/fs
log_assert "Verify that 'zfs send' drills appropriate holes"
log_onexit cleanup
-streamfile=$(mktemp /var/tmp/file.XXXXXX)
-vdev=$(mktemp /var/tmp/file.XXXXXX)
+streamfile=$(mktemp $TESTDIR/file.XXXXXX)
+vdev=$(mktemp $TEST_BASE_DIR/file.XXXXXX)
test_pool ()
log_must zfs create -o recordsize=512 $POOL/fs
mntpnt=$(get_prop mountpoint "$POOL/fs")
log_must dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null
- first_object=$(ls -i $mntpnt | awk '{print $1}')
+ object=$(ls -i $mntpnt | awk '{print $1}')
log_must zfs snapshot $POOL/fs@a
while true; do
- log_must find $mntpnt/* -delete
+ log_must find $mntpnt/ -type f -delete
sync
log_must mkfiles "$mntpnt/" 4000
- FILE=$(ls -i $mntpnt | awk \
- '{if ($1 == '$first_object') {print $2}}')
- if [[ -n "$FILE" ]]; then
+ sync
+ # check if we started reusing objects
+ object=$(ls -i $mntpnt | sort -n | awk -v object=$object \
+ '{if ($1 <= object) {exit 1}} END {print $1}')
+ if [[ $? -ne 0 ]]; then
break
fi
done
clean_blockfile "$TESTDIR0 $TESTDIR1"
- for file in /var/tmp/$FILEDISK0 /var/tmp/$FILEDISK1 /var/tmp/$FILEDISK2
+ for file in $FILEDISK0 $FILEDISK1 $FILEDISK2
do
if [[ -e $file ]]; then
- rm -rf $file
+ rm -f $TEST_BASE_DIR/$file
fi
done
${DEV_RDSKDIR}/${disk}${SLICE_PREFIX}${SLICE1} >/dev/null 2>&1
create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE4}
create_blockfile $FILESIZE1 $TESTDIR1/$FILEDISK1 ${disk}${SLICE_PREFIX}${SLICE5}
-log_must truncate -s $SIZE /var/tmp/$FILEDISK0
-log_must truncate -s $SIZE /var/tmp/$FILEDISK1
-log_must truncate -s $SIZE /var/tmp/$FILEDISK2
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK0
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK1
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK2
unset NOINUSE_CHECK
log_must zpool export $TESTPOOL
log_note "'zpool create' mirror without '-f' will fail " \
"while devices are of different types."
-log_mustnot zpool create "$TESTPOOL4" "mirror" /var/tmp/$FILEDISK0 \
+log_mustnot zpool create "$TESTPOOL4" "mirror" $TEST_BASE_DIR/$FILEDISK0 \
${disk}${SLICE_PREFIX}${SLICE3}
create_pool "$TESTPOOL4" "mirror" \
- /var/tmp/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE3}
+ $TEST_BASE_DIR/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE3}
log_must poolexists $TESTPOOL4
log_note "'zpool create' without '-f' will fail " \
"while device is part of potentially active pool."
-create_pool "$TESTPOOL5" "mirror" /var/tmp/$FILEDISK1 \
- /var/tmp/$FILEDISK2
-log_must zpool offline $TESTPOOL5 /var/tmp/$FILEDISK2
+create_pool "$TESTPOOL5" "mirror" $TEST_BASE_DIR/$FILEDISK1 \
+ $TEST_BASE_DIR/$FILEDISK2
+log_must zpool offline $TESTPOOL5 $TEST_BASE_DIR/$FILEDISK2
log_must zpool export $TESTPOOL5
-log_mustnot zpool create "$TESTPOOL6" /var/tmp/$FILEDISK2
-create_pool $TESTPOOL6 /var/tmp/$FILEDISK2
+log_mustnot zpool create "$TESTPOOL6" $TEST_BASE_DIR/$FILEDISK2
+create_pool $TESTPOOL6 $TEST_BASE_DIR/$FILEDISK2
log_must poolexists $TESTPOOL6
log_pass "'zpool create -f <pool> <vspec> ...' success."
set -A options "" "-R $ALTER_ROOT"
+typeset -A testpools
typeset -i number=0
-typeset -i id=1
typeset -i i=0
typeset checksum1
-typeset unwantedpool
+typeset poolname
function setup_single_disk #disk #pool #fs #mtpt
{
#
for disk in $DEVICE_FILES
do
-
+ poolname="${TESTPOOL}-$number"
setup_single_disk "$disk" \
- "${TESTPOOL}-$number" \
+ "$poolname" \
"$TESTFS" \
"$TESTDIR.$number"
-
+ testpools[$poolname]=$poolname
(( number = number + 1 ))
done
log_must zpool import -d $DEVICE_DIR ${options[i]} -a -f
- # destroy unintentional imported pools
- typeset exclude=`eval echo \"'(${KEEP})'\"`
- for unwantedpool in $(zpool list -H -o name \
- | egrep -v "$exclude" | grep -v $TESTPOOL); do
- log_must zpool export $unwantedpool
+ # export unintentionally imported pools
+ for poolname in $(get_all_pools); do
+ if [[ -z ${testpools[$poolname]} ]]; then
+ log_must_busy zpool export $poolname
+ fi
done
if [[ -n ${options[i]} ]]; then
checksum_all
fi
- id=1
- while (( id < number )); do
- if poolexists ${TESTPOOL}-$id ; then
- log_must zpool export ${TESTPOOL}-$id
+ for poolname in ${testpools[@]}; do
+ if poolexists $poolname ; then
+ log_must_busy zpool export $poolname
fi
- (( id = id + 1 ))
done
(( i = i + 1 ))
((i = i + 1))
done
-VDEV_FILE=$(mktemp /tmp/tmp.XXXXXX)
+VDEV_FILE=$(mktemp $TEST_BASE_DIR/tmp.XXXXXX)
log_must mkfile -n 128M $VDEV_FILE
log_must zpool create overflow $VDEV_FILE
log_must zfs create overflow/testfs
ID=$(zpool get -Ho value guid overflow)
log_must zpool export overflow
-log_mustnot zpool import -d /tmp $(echo id) \
+log_mustnot zpool import -d $TEST_BASE_DIR $(echo id) \
$(printf "%*s\n" 250 "" | tr ' ' 'c')
log_pass "Successfully imported and renamed a ZPOOL"
zpool_scrub_003_pos.ksh \
zpool_scrub_004_pos.ksh \
zpool_scrub_005_pos.ksh \
- zpool_scrub_encrypted_unloaded.ksh
+ zpool_scrub_encrypted_unloaded.ksh \
+ zpool_scrub_offline_device.ksh
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
+
+#
+# DESCRIPTION:
+# Scrubbing a pool with offline devices correctly preserves DTL entries
+#
+# STRATEGY:
+# 1. Create the pool
+# 2. Offline the first device
+# 3. Write to the pool
+# 4. Scrub the pool
+# 5. Online the first device and offline the second device
+# 6. Scrub the pool again
+# 7. Verify data integrity
+#
+# NOTE:
+# Ported from script used to reproduce issue #5806
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+ log_must rm -f $DISK1 $DISK2 $DISK3
+}
+
+#
+# Update to [online|offline] $device status on $pool synchronously
+#
+function zpool_do_sync # <status> <pool> <device>
+{
+ status="$1"
+ pool="$2"
+ device="$3"
+
+ if [[ $status != "online" && $status != "offline" ]]; then
+ log_fail "zpool_do_sync: invalid status $status"
+ fi
+
+ log_must zpool $status $pool $device
+ for i in {1..10}; do
+ check_state $pool $device $status && return 0
+ done
+ log_fail "Failed to $status device $device"
+}
+
+#
+# Start a scrub on $pool and wait for its completion
+#
+function zpool_scrub_sync # <pool>
+{
+ pool="$1"
+
+ log_must zpool scrub $pool
+ while ! is_pool_scrubbed $pool; do
+ sleep 1
+ done
+}
+
+log_assert "Scrubbing a pool with offline devices correctly preserves DTLs"
+log_onexit cleanup
+
+DEVSIZE='128m'
+FILESIZE='100m'
+TESTDIR="$TEST_BASE_DIR/zpool_scrub_offline_device"
+DISK1="$TEST_BASE_DIR/zpool_disk1.dat"
+DISK2="$TEST_BASE_DIR/zpool_disk2.dat"
+DISK3="$TEST_BASE_DIR/zpool_disk3.dat"
+
+# 1. Create the pool
+log_must truncate -s $DEVSIZE $DISK1
+log_must truncate -s $DEVSIZE $DISK2
+log_must truncate -s $DEVSIZE $DISK3
+poolexists $TESTPOOL && destroy_pool $TESTPOOL
+log_must zpool create -O mountpoint=$TESTDIR $TESTPOOL \
+ raidz1 $DISK1 $DISK2 $DISK3
+
+# 2. Offline the first device
+zpool_do_sync 'offline' $TESTPOOL $DISK1
+
+# 3. Write to the pool
+log_must mkfile $FILESIZE "$TESTDIR/data.bin"
+
+# 4. Scrub the pool
+zpool_scrub_sync $TESTPOOL
+
+# 5. Online the first device and offline the second device
+zpool_do_sync 'online' $TESTPOOL $DISK1
+zpool_do_sync 'offline' $TESTPOOL $DISK2
+
+# 6. Scrub the pool again
+zpool_scrub_sync $TESTPOOL
+
+# 7. Verify data integrity
+cksum=$(zpool status $TESTPOOL | awk 'L{print $NF;L=0} /CKSUM$/{L=1}')
+if [[ $cksum != 0 ]]; then
+ log_fail "Unexpected CKSUM errors found on $TESTPOOL ($cksum)"
+fi
+
+log_pass "Scrubbing a pool with offline devices correctly preserves DTLs"
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_set
dist_pkgdata_SCRIPTS = \
+ setup.ksh \
+ cleanup.ksh \
zpool_set_001_pos.ksh \
zpool_set_002_neg.ksh \
zpool_set_003_neg.ksh \
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+DISK=${DISKS%% *}
+
+default_setup $DISK
function cleanup {
zpool destroy bootfs
- rm /tmp/zpool_set_002.$$.dat
+ rm $FILEVDEV
}
log_assert "Malformed zpool set commands are rejected"
# Create a pool called bootfs (so-called, so as to trip any clashes between
# property name, and pool name)
# Also create a filesystem in this pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool_set_002.$$.dat
-log_must zpool create bootfs /tmp/zpool_set_002.$$.dat
+FILEVDEV="$TEST_BASE_DIR/zpool_set_002.$$.dat"
+log_must mkfile $MINVDEVSIZE $FILEVDEV
+log_must zpool create bootfs $FILEVDEV
log_must zfs create bootfs/root
typeset -i i=0;
function cleanup
{
- zpool destroy $TESTPOOL
- rm /tmp/zpool_set_003.$$.dat
+ zpool destroy $TESTPOOL1
+ rm $FILEVDEV
}
set -A props "available" "capacity" "guid" "health" "size" "used"
log_assert "zpool set cannot set a readonly property"
-log_must mkfile $MINVDEVSIZE /tmp/zpool_set_003.$$.dat
-log_must zpool create $TESTPOOL /tmp/zpool_set_003.$$.dat
+FILEVDEV="$TEST_BASE_DIR/zpool_set_003.$$.dat"
+log_must mkfile $MINVDEVSIZE $FILEVDEV
+log_must zpool create $TESTPOOL1 $FILEVDEV
typeset -i i=0;
while [ $i -lt "${#props[@]}" ]
do
# try to set each property in the prop list with it's corresponding val
- log_mustnot eval "zpool set ${props[$i]}=${vals[$i]} $TESTPOOL \
+ log_mustnot eval "zpool set ${props[$i]}=${vals[$i]} $TESTPOOL1 \
> /dev/null 2>&1"
i=$(( $i + 1))
done
verify_runnable "global"
-# See issue: https://github.com/zfsonlinux/zfs/issues/5658
-if is_linux; then
- log_unsupported "Test case occasionally fails"
-fi
-
function cleanup
{
if datasetexists $root_testfs; then
log_onexit cleanup
-typeset exclude=`eval echo \"'(${KEEP})'\"`
-for pool in $(zpool list -H -o name | \
- egrep -v "$exclude" | \
- grep -v "$TESTPOOL" | \
- egrep -v "$NO_POOLS"); do
- log_must zpool destroy $pool
+for pool in $(get_all_pools); do
+ if [[ "$pool" != "$TESTPOOL" ]]; then
+ log_must zpool destroy $pool
+ fi
done
DISK=${DISKS%% *}
log_must zpool destroy $TESTPOOL
log_must zpool destroy $TESTPOOL2
-log_must rm /tmp/zpool_version_1.dat
-log_must rm /tmp/zpool2_version_1.dat
+log_must rm $TEST_BASE_DIR/zpool_version_1.dat
+log_must rm $TEST_BASE_DIR/zpool2_version_1.dat
default_cleanup
verify_runnable "global"
# create a version 1 pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool_version_1.dat
-log_must zpool create -o version=1 $TESTPOOL /tmp/zpool_version_1.dat
+log_must mkfile $MINVDEVSIZE $TEST_BASE_DIR/zpool_version_1.dat
+log_must zpool create -o version=1 $TESTPOOL $TEST_BASE_DIR/zpool_version_1.dat
# create another version 1 pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool2_version_1.dat
-log_must zpool create -o version=1 $TESTPOOL2 /tmp/zpool2_version_1.dat
+log_must mkfile $MINVDEVSIZE $TEST_BASE_DIR/zpool2_version_1.dat
+log_must zpool create -o version=1 $TESTPOOL2 $TEST_BASE_DIR/zpool2_version_1.dat
log_pass
function cleanup
{
for obj in $OBJ_LIST; do
- datasetexists $obj && log_must zfs destroy -f $obj
+ datasetexists $obj && log_must_busy zfs destroy -f $obj
done
log_must zero_reservation $TESTPOOL/$TESTFS
verify_runnable "both"
# See issue: https://github.com/zfsonlinux/zfs/issues/6086
-if is_linux; then
+if is_32bit; then
log_unsupported "Test case occasionally fails"
fi
((ret |= $?))
typeset mntpnt=$(get_prop mountpoint $pool)
- dd if=/dev/random of=$mntpnt/testfile.$$ &
+ dd if=/dev/urandom of=$mntpnt/testfile.$$ &
typeset pid=$!
zpool iostat -v 1 3 > /dev/null
((ret |= $?))
kill -9 $pid
+ wait
return $ret
}
verify_runnable "both"
-# https://github.com/zfsonlinux/zfs/issues/6143
-if is_linux; then
- log_unsupported "Test case occasionally fails"
-fi
-
function cleanup
{
typeset snap=""
log_assert "Ensure zeroed file gets written correctly during a sync operation"
-srcfile="/tmp/cosmo.$$"
+srcfile="$TESTDIR/cosmo.$$"
log_must dd if=/dev/urandom of=$srcfile bs=1024k count=1
log_onexit cleanup
function cleanup
{
- [[ -f /tmp/$TESTFILE ]] && log_must rm -f /tmp/$TESTFILE
+ [[ -f $TESTDIR/$TESTFILE ]] && log_must rm -f $TESTDIR/$TESTFILE
[[ -f $NEW_VFSTAB_FILE ]] && log_must rm -f $NEW_VFSTAB_FILE
[[ -f $PREV_VFSTAB_FILE ]] && \
log_must mv $PREV_VFSTAB_FILE $VFSTAB_FILE
log_must cp $NEW_VFSTAB_FILE $VFSTAB_FILE
log_must swapadd $VFSTAB_FILE
-log_must file_write -o create -f /tmp/$TESTFILE \
+log_must file_write -o create -f $TESTDIR/$TESTFILE \
-b $BLOCKSZ -c $NUM_WRITES -d $DATA
-[[ ! -f /tmp/$TESTFILE ]] &&
- log_fail "Unable to create file under /tmp"
+[[ ! -f $TESTDIR/$TESTFILE ]] &&
+ log_fail "Unable to create file under $TESTDIR"
-filesize=`ls -l /tmp/$TESTFILE | awk '{print $5}'`
+filesize=`ls -l $TESTDIR/$TESTFILE | awk '{print $5}'`
tf_size=$((BLOCKSZ * NUM_WRITES))
(($tf_size != $filesize)) && \
log_fail "testfile is ($filesize bytes), expected ($tf_size bytes)"