]> granicus.if.org Git - zfs/commitdiff
Fix some ZFS Test Suite issues
authorLOLi <loli10K@users.noreply.github.com>
Mon, 25 Sep 2017 17:32:34 +0000 (19:32 +0200)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 25 Sep 2017 17:32:34 +0000 (10:32 -0700)
* Add 'zfs bookmark' coverage (zfs_bookmark_cliargs)

 * Add OpenZFS 8166 coverage (zpool_scrub_offline_device)

 * Fix "busy" zfs_mount_remount failures

 * Fix bootfs_003_pos, bootfs_004_neg, zdb_005_pos local cleanup

 * Update usage of $KEEP variable, add get_all_pools() function

 * Enable history_008_pos and rsend_019_pos (non-32bit builders)

 * Enable zfs_copies_005_neg, update local cleanup

 * Fix zfs_send_007_pos (large_dnode + OpenZFS 8199)

 * Fix rollback_003_pos (use dataset name, not mountpoint, to unmount)

 * Update default_raidz_setup() to work properly with more than 3 disks

 * Use $TEST_BASE_DIR instead of hardcoded (/var)/tmp for file VDEVs

 * Update usage of /dev/random to /dev/urandom

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: loli10K <ezomori.nozomu@gmail.com>
Issue #6086
Closes #5658
Closes #6143
Closes #6421
Closes #6627
Closes #6632

44 files changed:
cmd/zfs/zfs_main.c
configure.ac
lib/libzfs/libzfs_mount.c
scripts/zfs-tests.sh
tests/runfiles/linux.run
tests/zfs-tests/include/default.cfg.in
tests/zfs-tests/include/libtest.shlib
tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh
tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh
tests/zfs-tests/tests/functional/cache/cache.kshlib
tests/zfs-tests/tests/functional/cli_root/Makefile.am
tests/zfs-tests/tests/functional/cli_root/zdb/zdb_005_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile.am [new file with mode: 0644]
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/cleanup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/setup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_remount.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_010_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_014_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_007_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_009_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_007_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_002_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_all_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_rename_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile.am
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile.am
tests/zfs-tests/tests/functional/cli_root/zpool_set/cleanup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zpool_set/setup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh
tests/zfs-tests/tests/functional/history/history_008_pos.ksh
tests/zfs-tests/tests/functional/pool_names/pool_names_002_neg.ksh
tests/zfs-tests/tests/functional/poolversion/cleanup.ksh
tests/zfs-tests/tests/functional/poolversion/setup.ksh
tests/zfs-tests/tests/functional/reservation/reservation_002_pos.sh
tests/zfs-tests/tests/functional/rsend/rsend_019_pos.ksh
tests/zfs-tests/tests/functional/slog/slog.kshlib
tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
tests/zfs-tests/tests/functional/truncate/truncate_002_pos.ksh
tests/zfs-tests/tests/functional/zvol/zvol_swap/zvol_swap_003_pos.ksh

index 4331df40324a339a254124697daf73142a3a67cb..ef131f9b5f1c7f8b850b2c382cc980be984c413f 100644 (file)
@@ -6729,7 +6729,7 @@ unshare_unmount(int op, int argc, char **argv)
 
                        case OP_MOUNT:
                                if (zfs_unmount(node->un_zhp,
-                                   node->un_mountp, flags) != 0)
+                                   node->un_zhp->zfs_name, flags) != 0)
                                        ret = 1;
                                break;
                        }
index e83c5397dd144922f5be86c0bb839ff40abb95e5..37babef54cfc3a99f2c36304f64e6a0eac07accc 100644 (file)
@@ -182,6 +182,7 @@ AC_CONFIG_FILES([
        tests/zfs-tests/tests/functional/clean_mirror/Makefile
        tests/zfs-tests/tests/functional/cli_root/Makefile
        tests/zfs-tests/tests/functional/cli_root/zdb/Makefile
+       tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile
        tests/zfs-tests/tests/functional/cli_root/zfs_change-key/Makefile
        tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile
        tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile
index b2d082e17c4660228b6605ddb932a4e1092a8df3..59b98a28450e7f20c7cd9545822cc3ef7c00bac5 100644 (file)
@@ -136,6 +136,10 @@ is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
        if (hdl->libzfs_sharetab == NULL)
                return (SHARED_NOT_SHARED);
 
+       /* Reopen ZFS_SHARETAB to prevent reading stale data from open file */
+       if (freopen(ZFS_SHARETAB, "r", hdl->libzfs_sharetab) == NULL)
+               return (SHARED_NOT_SHARED);
+
        (void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
 
        while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
@@ -660,7 +664,7 @@ zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
                 * then get freed later. We strdup it to play it safe.
                 */
                if (mountpoint == NULL)
-                       mntpt = zfs_strdup(hdl, entry.mnt_mountp);
+                       mntpt = zfs_strdup(hdl, entry.mnt_special);
                else
                        mntpt = zfs_strdup(hdl, mountpoint);
 
index 3345fb5a57879db62ecc1f83be7d05658abb4f12..9b8d166bf6ee3bab3e7997ed6ac17d18eec39bc9 100755 (executable)
@@ -438,15 +438,28 @@ fi
 
 #
 # By default preserve any existing pools
+# NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from
+# space-delimited to newline-delimited.
 #
 if [ -z "${KEEP}" ]; then
-       KEEP=$(sudo "$ZPOOL" list -H -o name)
+       KEEP="$(sudo "$ZPOOL" list -H -o name)"
        if [ -z "${KEEP}" ]; then
                KEEP="rpool"
        fi
+else
+       KEEP="$(echo -e "${KEEP//[[:blank:]]/\n}")"
 fi
 
-__ZFS_POOL_EXCLUDE="$(echo $KEEP | sed ':a;N;s/\n/ /g;ba')"
+#
+# NOTE: The following environment variables are undocumented
+# and should be used for testing purposes only:
+#
+# __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists
+# __ZFS_POOL_RESTRICT - iterate only over the pools it lists
+#
+# See libzfs/libzfs_config.c for more information.
+#
+__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')"
 
 . "$STF_SUITE/include/default.cfg"
 
@@ -524,6 +537,7 @@ export STF_TOOLS
 export STF_SUITE
 export STF_PATH
 export DISKS
+export FILEDIR
 export KEEP
 export __ZFS_POOL_EXCLUDE
 export TESTFAIL_CALLBACKS
index bee3a6b5524af82f450ca1120779c3ca93acc2c2..a83d1d68db21870b2d1da073e33e3c37caa2941e 100644 (file)
@@ -70,6 +70,9 @@ post =
 [tests/functional/cli_root/zfs]
 tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg']
 
+[tests/functional/cli_root/zfs_bookmark]
+tests = ['zfs_bookmark_cliargs']
+
 [tests/functional/cli_root/zfs_change-key]
 tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
     'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
@@ -293,12 +296,10 @@ tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
 [tests/functional/cli_root/zpool_scrub]
 tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
     'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
-    'zpool_scrub_encrypted_unloaded']
+    'zpool_scrub_encrypted_unloaded', 'zpool_scrub_offline_device']
 
 [tests/functional/cli_root/zpool_set]
 tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg']
-pre =
-post =
 
 [tests/functional/cli_root/zpool_status]
 tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos',
index 9ac74f29c453363b1d22cbc011e395562fe5b829..7b2ef4d3071850e4c7fd941bec9afc7d35399aad 100644 (file)
@@ -61,7 +61,9 @@ export NO_POOLS="no pools available"
 # pattern to ignore from 'zfs list'.
 export NO_DATASETS="no datasets available"
 
-export TEST_BASE_DIR="/var/tmp"
+# Default directory used for test files
+# NOTE: remove trailing "/", some functions rely on this to do pattern matching
+export TEST_BASE_DIR="$(dirname ${FILEDIR:-/var/tmp}/.)"
 
 # Default to compression ON
 export COMPRESSION_PROP=on
index 3248ceb70810af2c2d336823ac2ee1284f146898..d72ebc8edc0e40b3467724a0d2cac3af55ee360d 100644 (file)
@@ -481,7 +481,7 @@ function default_raidz_setup
        fi
 
        [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
-       log_must zpool create -f $TESTPOOL raidz $1 $2 $3
+       log_must zpool create -f $TESTPOOL raidz $disklist
        log_must zfs create $TESTPOOL/$TESTFS
        log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
 
@@ -503,9 +503,19 @@ function default_cleanup
        log_pass
 }
 
+#
+# Utility function used to list all available pool names.
+#
+# NOTE: $KEEP is a variable containing pool names, separated by a newline
+# character, that must be excluded from the returned list.
+#
+function get_all_pools
+{
+       zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
+}
+
 function default_cleanup_noexit
 {
-       typeset exclude=""
        typeset pool=""
        #
        # Destroying the pool will also destroy any
@@ -513,9 +523,7 @@ function default_cleanup_noexit
        #
        if is_global_zone; then
                zfs unmount -a > /dev/null 2>&1
-               exclude=`eval echo \"'(${KEEP})'\"`
-               ALL_POOLS=$(zpool list -H -o name \
-                   | grep -v "$NO_POOLS" | egrep -v "$exclude")
+               ALL_POOLS=$(get_all_pools)
                # Here, we loop through the pools we're allowed to
                # destroy, only destroying them if it's safe to do
                # so.
@@ -527,9 +535,7 @@ function default_cleanup_noexit
                                then
                                        destroy_pool $pool
                                fi
-                               ALL_POOLS=$(zpool list -H -o name \
-                                   | grep -v "$NO_POOLS" \
-                                   | egrep -v "$exclude")
+                               ALL_POOLS=$(get_all_pools)
                        done
                done
 
@@ -1617,7 +1623,7 @@ function zfs_zones_setup #zone_name zone_root zone_ip
        # If current system support slog, add slog device for pool
        #
        if verify_slog_support ; then
-               typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
+               typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
                log_must mkfile $MINVDEVSIZE $sdevs
                log_must zpool add $pool_name log mirror $sdevs
        fi
@@ -2615,7 +2621,7 @@ function random_get
 #
 function verify_slog_support
 {
-       typeset dir=/tmp/disk.$$
+       typeset dir=$TEST_BASE_DIR/disk.$$
        typeset pool=foo.$$
        typeset vdev=$dir/a
        typeset sdev=$dir/b
index 83a582396dab44fd4f092791a6c43d37fb6c7340..e719b94e27635846f7774441b0f38ac4f0081049 100755 (executable)
@@ -50,7 +50,7 @@ function cleanup {
        if poolexists $POOL ; then
                log_must zpool destroy $POOL
        fi
-       rm /bootfs_003.$$.dat
+       rm $TESTDIR/bootfs_003.$$.dat
 }
 
 
index 9a5d7507a0380c0d840deee296dc751a26e541dd..97b456aade3eea1e15738b3cd86edc61e3c6942a 100755 (executable)
@@ -51,7 +51,7 @@ function cleanup {
        if poolexists $POOL; then
                log_must zpool destroy $POOL
        fi
-       rm /bootfs_004.$$.dat
+       rm $TESTDIR/bootfs_004.$$.dat
 }
 
 
index 26b56f68e579a26af9c5b19f4cc502ca8bbecea9..2e258e22cd3974382d50719584c0836a5081ca59 100644 (file)
@@ -57,13 +57,14 @@ function display_status
        ((ret |= $?))
 
        typeset mntpnt=$(get_prop mountpoint $pool)
-       dd if=/dev/random of=$mntpnt/testfile.$$ &
+       dd if=/dev/urandom of=$mntpnt/testfile.$$ &
        typeset pid=$!
 
        zpool iostat -v 1 3 > /dev/null
        ((ret |= $?))
 
        kill -9 $pid
+       wait
 
        return $ret
 }
index 9abaa8f4fd977b18a939e5c228413ebcb8818ed8..c6c394e2b841ad87c193361e1957252082c725e9 100644 (file)
@@ -5,6 +5,7 @@ dist_pkgdata_SCRIPTS = \
 SUBDIRS = \
        zdb \
        zfs \
+       zfs_bookmark \
        zfs_change-key \
        zfs_clone \
        zfs_copies \
index 60bbb5615602be68d30b74ce96fb3d2828e9f54c..c98caed426f6ca03de7341ccbf954cd66f73a1d8 100755 (executable)
@@ -36,6 +36,7 @@ log_onexit cleanup
 function cleanup
 {
        datasetexists $TESTPOOL && destroy_pool $TESTPOOL
+       rm -f $TEMPFILE
 }
 
 verify_runnable "global"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile.am
new file mode 100644 (file)
index 0000000..e71fbc8
--- /dev/null
@@ -0,0 +1,5 @@
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zfs_bookmark
+dist_pkgdata_SCRIPTS = \
+       setup.ksh \
+       cleanup.ksh \
+       zfs_bookmark_cliargs.ksh
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/cleanup.ksh
new file mode 100755 (executable)
index 0000000..6a4e7cf
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/setup.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/setup.ksh
new file mode 100755 (executable)
index 0000000..2a9de05
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+DISK=${DISKS%% *}
+
+default_volume_setup $DISK
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh
new file mode 100755 (executable)
index 0000000..3851831
--- /dev/null
@@ -0,0 +1,77 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# 'zfs bookmark' should work with both full and short arguments.
+#
+# STRATEGY:
+# 1. Create initial snapshot
+# 2. Verify we can create a bookmark specifying snapshot and bookmark full paths
+# 3. Verify we can create a bookmark specifying the snapshot name
+# 4. Verify we can create a bookmark specifying the bookmark name
+#
+
+verify_runnable "both"
+
+function cleanup
+{
+       if snapexists "$DATASET@$TESTSNAP"; then
+               log_must zfs destroy "$DATASET@$TESTSNAP"
+       fi
+       if bkmarkexists "$DATASET#$TESTBM"; then
+               log_must zfs destroy "$DATASET#$TESTBM"
+       fi
+}
+
+log_assert "'zfs bookmark' works as expected when passed valid arguments."
+log_onexit cleanup
+
+DATASET="$TESTPOOL/$TESTFS"
+TESTSNAP='snapshot'
+TESTBM='bookmark'
+
+# Create initial snapshot
+log_must zfs snapshot "$DATASET@$TESTSNAP"
+
+# Verify we can create a bookmark specifying snapshot and bookmark full paths
+log_must zfs bookmark "$DATASET@$TESTSNAP" "$DATASET#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+# Verify we can create a bookmark specifying the snapshot name
+log_must zfs bookmark "@$TESTSNAP" "$DATASET#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+# Verify we can create a bookmark specifying the bookmark name
+log_must zfs bookmark "$DATASET@$TESTSNAP" "#$TESTBM"
+log_must eval "bkmarkexists $DATASET#$TESTBM"
+log_must zfs destroy "$DATASET#$TESTBM"
+
+log_pass "'zfs bookmark' works as expected when passed valid arguments."
index 608d07413efd27b350292662e98feeafc7bab444..9e5a5c66112b0b69de774ad64dd7d9842e4db904 100755 (executable)
 
 verify_runnable "global"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/6145
-if is_linux; then
-       log_unsupported "Test case occasionally fails"
-fi
-
 function cleanup
 {
        if poolexists $ZPOOL_VERSION_1_NAME; then
                destroy_pool $ZPOOL_VERSION_1_NAME
        fi
 
-       if [[ -f $TESTDIR/$ZPOOL_VERSION_1_FILES ]]; then
-               rm -f $TESTDIR/$ZPOOL_VERSION_1_FILES
+       if [[ -f $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES ]]; then
+               rm -f $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES
+       fi
+
+       if [[ -f $TEST_BASE_DIR/${ZPOOL_VERSION_1_FILES%.*} ]]; then
+               rm -f $TEST_BASE_DIR/${ZPOOL_VERSION_1_FILES%.*}
        fi
 }
 
 log_assert "Verify that copies cannot be set with pool version 1"
 log_onexit cleanup
 
-cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TESTDIR
-bunzip2 $TESTDIR/$ZPOOL_VERSION_1_FILES
-log_must zpool import -d $TESTDIR $ZPOOL_VERSION_1_NAME
+log_must cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TEST_BASE_DIR
+log_must bunzip2 $TEST_BASE_DIR/$ZPOOL_VERSION_1_FILES
+log_must zpool import -d $TEST_BASE_DIR $ZPOOL_VERSION_1_NAME
 log_must zfs create $ZPOOL_VERSION_1_NAME/$TESTFS
 log_must zfs create -V 1m $ZPOOL_VERSION_1_NAME/$TESTVOL
 block_device_wait
index 487c7a64227e4d3346aa66f7a1fe26817137d38e..f399ad270634b9a5bd16382909144d058b0533d1 100755 (executable)
@@ -138,7 +138,7 @@ log_must snapexists $TESTPOOL/$TESTFS1/$TESTFS2@fs12snap
 log_must snapexists $TESTPOOL/$TESTFS1@snap3
 
 log_note "zfs destroy for snapshots from different pools"
-VIRTUAL_DISK=/var/tmp/disk
+VIRTUAL_DISK=$TEST_BASE_DIR/disk
 log_must mkfile $MINVDEVSIZE $VIRTUAL_DISK
 log_must zpool create $TESTPOOL2 $VIRTUAL_DISK
 log_must poolexists $TESTPOOL2
index ab51020aa853a738ac863f65b0fb9cb22ef5c356..a83e2d117c8e6a2bf25584dc8e7bb73125a9e212 100755 (executable)
@@ -44,7 +44,7 @@ verify_runnable "both"
 
 function cleanup
 {
-       log_must zpool export $TESTPOOL
+       log_must_busy zpool export $TESTPOOL
        log_must zpool import $TESTPOOL
        snapexists $TESTSNAP && log_must zfs destroy $TESTSNAP
        [[ -d $MNTPSNAP ]] && log_must rmdir $MNTPSNAP
index 820e05656f68416ff006d48dae1a0d35fb431917..84485977aa235944097885e8bd30b19d6fad2e4f 100755 (executable)
@@ -77,7 +77,7 @@ function create_pair
 function cleanup
 {
        zfs destroy -Rf $TESTPOOL/$TESTFS/base
-       rm /tmp/zr010p*
+       rm $TESTDIR/zr010p*
 }
 
 log_assert "zfs receive of full send as clone should work"
@@ -145,15 +145,15 @@ done
 log_must zfs snapshot $fs@s1
 log_must zfs snapshot $fs2@s1
 
-log_must zfs send $fs@s1 > /tmp/zr010p
-log_must zfs send $fs2@s1 > /tmp/zr010p2
+log_must zfs send $fs@s1 > $TESTDIR/zr010p
+log_must zfs send $fs2@s1 > $TESTDIR/zr010p2
 
 
 #
 # Test that, when we receive a full send as a clone of itself,
 # nop-write saves us all the space used by data blocks.
 #
-cat /tmp/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
+cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs@s1 $rfs
 size=$(get_prop used $rfs)
 size2=$(get_prop used $fs)
 if [[ $size -ge $(($size2 / 10)) ]] then
@@ -163,13 +163,13 @@ fi
 log_must zfs destroy -fr $rfs
 
 # Correctness testing: receive each full send as a clone of the other fiesystem.
-cat /tmp/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
+cat $TESTDIR/zr010p | log_must zfs receive -o origin=$fs2@s1 $rfs
 mntpnt_old=$(get_prop mountpoint $fs)
 mntpnt_new=$(get_prop mountpoint $rfs)
 log_must diff -r $mntpnt_old $mntpnt_new
 log_must zfs destroy -r $rfs
 
-cat /tmp/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
+cat $TESTDIR/zr010p2 | log_must zfs receive -o origin=$fs@s1 $rfs
 mntpnt_old=$(get_prop mountpoint $fs2)
 mntpnt_new=$(get_prop mountpoint $rfs)
 log_must diff -r $mntpnt_old $mntpnt_new
index b92e712e9e32aa8200c9657f9791dae5f35ac6de..be04aed2b24c7a493303327651d9a11d68c1afd6 100755 (executable)
@@ -42,9 +42,9 @@ orig=$TESTPOOL/$TESTFS1
 dest=$TESTPOOL/$TESTFS2
 typeset userprop=$(valid_user_property 8)
 typeset userval=$(user_property_value 8)
-typeset streamfile_full=/var/tmp/streamfile_full.$$
-typeset streamfile_incr=/var/tmp/streamfile_incr.$$
-typeset streamfile_trun=/var/tmp/streamfile_trun.$$
+typeset streamfile_full=$TESTDIR/streamfile_full.$$
+typeset streamfile_incr=$TESTDIR/streamfile_incr.$$
+typeset streamfile_trun=$TESTDIR/streamfile_trun.$$
 
 function cleanup
 {
index f7faa3b36afca0a0b5a788b2e7e92e3c7f641cd9..3623d2bca1c976e305cad2a332d2bce085a94e4d 100755 (executable)
@@ -76,8 +76,8 @@ log_onexit cleanup
 # Generate random data
 #
 BS=512 ; CNT=2048
-SRC_FILE=/tmp/srcfile.$$
-DST_FILE=/tmp/dstfile.$$
+SRC_FILE=$TESTDIR/srcfile.$$
+DST_FILE=$TESTDIR/dstfile.$$
 log_must dd if=/dev/urandom of=$SRC_FILE bs=$BS count=$CNT
 
 fs=$TESTPOOL/$TESTFS/fs.$$
index d19c9485e652d908d770b859dc851254c7241c16..1bdaebdb03fcdda523f2030098a144d7933dd100 100755 (executable)
@@ -47,25 +47,10 @@ verify_runnable "both"
 
 function cleanup
 {
-       typeset snaps=$(zfs list -H -t snapshot -o name)
-       typeset exclude
-       typeset snap
-       typeset pool_name
-
-       if [[ -n $KEEP ]]; then
-               exclude=`eval echo \"'(${KEEP})'\"`
-       fi
-
-       for snap in $snaps; do
-               pool_name=$(echo "$snap" | awk -F/ '{print $1}')
-               if [[ -n $exclude ]]; then
-                       echo "$pool_name" | egrep -v "$exclude" > /dev/null 2>&1
-                       if [[ $? -eq 0 ]]; then
-                               log_must zfs destroy $snap
-                       fi
-               else
+       for poolname in $(get_all_pools); do
+               for snap in $(zfs list -H -t snapshot -o name -r $poolname); do
                        log_must zfs destroy $snap
-               fi
+               done
        done
 }
 
index ae352d4c0a61425aed00222765f6cf7cd00de248..5fdb125bca0a53254d1cfa83d71057e7a79e3763 100755 (executable)
 
 verify_runnable "both"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/6421
-if is_linux; then
-       log_unsupported "Test often runs for longer than 10 minutes."
-fi
-
 function cleanup
 {
        zfs destroy -rf $TESTPOOL/fs
@@ -53,8 +48,8 @@ function cleanup
 
 log_assert "Verify that 'zfs send' drills appropriate holes"
 log_onexit cleanup
-streamfile=$(mktemp /var/tmp/file.XXXXXX)
-vdev=$(mktemp /var/tmp/file.XXXXXX)
+streamfile=$(mktemp $TESTDIR/file.XXXXXX)
+vdev=$(mktemp $TEST_BASE_DIR/file.XXXXXX)
 
 
 test_pool ()
@@ -63,15 +58,17 @@ test_pool ()
        log_must zfs create -o recordsize=512 $POOL/fs
        mntpnt=$(get_prop mountpoint "$POOL/fs")
        log_must dd if=/dev/urandom of=${mntpnt}/file bs=512 count=1 2>/dev/null
-       first_object=$(ls -i $mntpnt | awk '{print $1}')
+       object=$(ls -i $mntpnt | awk '{print $1}')
        log_must zfs snapshot $POOL/fs@a
        while true; do
-               log_must find $mntpnt/* -delete
+               log_must find $mntpnt/ -type f -delete
                sync
                log_must mkfiles "$mntpnt/" 4000
-               FILE=$(ls -i $mntpnt | awk \
-                       '{if ($1 == '$first_object') {print $2}}')
-               if [[ -n "$FILE" ]]; then
+               sync
+               # check if we started reusing objects
+               object=$(ls -i $mntpnt | sort -n | awk -v object=$object \
+                   '{if ($1 <= object) {exit 1}} END {print $1}')
+               if [[ $? -ne 0 ]]; then
                        break
                fi
        done
index 95375778dd9a6bb54c38218bf4b9f048c847be62..bc50b65cc6d3793ee6bb5c5606ea2b21cc73d178 100755 (executable)
@@ -55,10 +55,10 @@ function cleanup
 
        clean_blockfile "$TESTDIR0 $TESTDIR1"
 
-       for file in /var/tmp/$FILEDISK0 /var/tmp/$FILEDISK1 /var/tmp/$FILEDISK2
+       for file in $FILEDISK0 $FILEDISK1 $FILEDISK2
        do
                if [[ -e $file ]]; then
-                       rm -rf $file
+                       rm -f $TEST_BASE_DIR/$file
                fi
        done
 
@@ -80,9 +80,9 @@ log_must echo "y" | newfs \
        ${DEV_RDSKDIR}/${disk}${SLICE_PREFIX}${SLICE1} >/dev/null 2>&1
 create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE4}
 create_blockfile $FILESIZE1 $TESTDIR1/$FILEDISK1 ${disk}${SLICE_PREFIX}${SLICE5}
-log_must truncate -s $SIZE /var/tmp/$FILEDISK0
-log_must truncate -s $SIZE /var/tmp/$FILEDISK1
-log_must truncate -s $SIZE /var/tmp/$FILEDISK2
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK0
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK1
+log_must truncate -s $SIZE $TEST_BASE_DIR/$FILEDISK2
 
 unset NOINUSE_CHECK
 log_must zpool export $TESTPOOL
@@ -107,20 +107,20 @@ log_must poolexists $TESTPOOL3
 
 log_note "'zpool create' mirror without '-f' will fail " \
        "while devices are of different types."
-log_mustnot zpool create "$TESTPOOL4" "mirror" /var/tmp/$FILEDISK0 \
+log_mustnot zpool create "$TESTPOOL4" "mirror" $TEST_BASE_DIR/$FILEDISK0 \
        ${disk}${SLICE_PREFIX}${SLICE3}
 create_pool "$TESTPOOL4" "mirror" \
-       /var/tmp/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE3}
+       $TEST_BASE_DIR/$FILEDISK0 ${disk}${SLICE_PREFIX}${SLICE3}
 log_must poolexists $TESTPOOL4
 
 log_note "'zpool create' without '-f' will fail " \
        "while device is part of potentially active pool."
-create_pool "$TESTPOOL5"  "mirror" /var/tmp/$FILEDISK1 \
-       /var/tmp/$FILEDISK2
-log_must zpool offline $TESTPOOL5 /var/tmp/$FILEDISK2
+create_pool "$TESTPOOL5"  "mirror" $TEST_BASE_DIR/$FILEDISK1 \
+       $TEST_BASE_DIR/$FILEDISK2
+log_must zpool offline $TESTPOOL5 $TEST_BASE_DIR/$FILEDISK2
 log_must zpool export $TESTPOOL5
-log_mustnot zpool create "$TESTPOOL6" /var/tmp/$FILEDISK2
-create_pool $TESTPOOL6 /var/tmp/$FILEDISK2
+log_mustnot zpool create "$TESTPOOL6" $TEST_BASE_DIR/$FILEDISK2
+create_pool $TESTPOOL6 $TEST_BASE_DIR/$FILEDISK2
 log_must poolexists $TESTPOOL6
 
 log_pass "'zpool create -f <pool> <vspec> ...' success."
index 884c896bda3a45d408d62854a58c3954fa44e690..b384ec9b1fce19a7cf280871e6ff6278f337a2f7 100755 (executable)
@@ -48,11 +48,11 @@ verify_runnable "global"
 
 set -A options "" "-R $ALTER_ROOT"
 
+typeset -A testpools
 typeset -i number=0
-typeset -i id=1
 typeset -i i=0
 typeset checksum1
-typeset unwantedpool
+typeset poolname
 
 function setup_single_disk #disk #pool #fs #mtpt
 {
@@ -130,12 +130,12 @@ number=1
 #
 for disk in $DEVICE_FILES
 do
-
+       poolname="${TESTPOOL}-$number"
        setup_single_disk "$disk" \
-               "${TESTPOOL}-$number" \
+               "$poolname" \
                "$TESTFS" \
                "$TESTDIR.$number"
-
+       testpools[$poolname]=$poolname
        (( number = number + 1 ))
 done
 
@@ -143,11 +143,11 @@ while (( i < ${#options[*]} )); do
 
        log_must zpool import -d $DEVICE_DIR ${options[i]} -a -f
 
-       # destroy unintentional imported pools
-       typeset exclude=`eval echo \"'(${KEEP})'\"`
-       for unwantedpool in $(zpool list -H -o name \
-            | egrep -v "$exclude" | grep -v $TESTPOOL); do
-               log_must zpool export $unwantedpool
+       # export unintentionally imported pools
+       for poolname in $(get_all_pools); do
+               if [[ -z ${testpools[$poolname]} ]]; then
+                       log_must_busy zpool export $poolname
+               fi
        done
 
        if [[ -n ${options[i]} ]]; then
@@ -156,12 +156,10 @@ while (( i < ${#options[*]} )); do
                checksum_all
        fi
 
-       id=1
-       while (( id < number )); do
-               if poolexists ${TESTPOOL}-$id ; then
-                       log_must zpool export ${TESTPOOL}-$id
+       for poolname in ${testpools[@]}; do
+               if poolexists $poolname ; then
+                       log_must_busy zpool export $poolname
                fi
-               (( id = id + 1 ))
        done
 
        (( i = i + 1 ))
index 6d8db3b674961f93dd61bf4acffd39abfe3eabae..bb6bf86d788124eec13712aa89436cd67ba25f29 100755 (executable)
@@ -161,14 +161,14 @@ while (( i < ${#pools[*]} )); do
        ((i = i + 1))
 done
 
-VDEV_FILE=$(mktemp /tmp/tmp.XXXXXX)
+VDEV_FILE=$(mktemp $TEST_BASE_DIR/tmp.XXXXXX)
 
 log_must mkfile -n 128M $VDEV_FILE
 log_must zpool create overflow $VDEV_FILE
 log_must zfs create overflow/testfs
 ID=$(zpool get -Ho value guid overflow)
 log_must zpool export overflow
-log_mustnot zpool import -d /tmp $(echo id) \
+log_mustnot zpool import -d $TEST_BASE_DIR $(echo id) \
     $(printf "%*s\n" 250 "" | tr ' ' 'c')
 
 log_pass "Successfully imported and renamed a ZPOOL"
index ccca437eba95081bb1f9954bbdca2159056ed8eb..5b0d3978eee5f746a9dbac3f790bdc54a4272ed3 100644 (file)
@@ -8,4 +8,5 @@ dist_pkgdata_SCRIPTS = \
        zpool_scrub_003_pos.ksh \
        zpool_scrub_004_pos.ksh \
        zpool_scrub_005_pos.ksh \
-       zpool_scrub_encrypted_unloaded.ksh
+       zpool_scrub_encrypted_unloaded.ksh \
+       zpool_scrub_offline_device.ksh
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh
new file mode 100755 (executable)
index 0000000..8404744
--- /dev/null
@@ -0,0 +1,128 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
+
+#
+# DESCRIPTION:
+# Scrubbing a pool with offline devices correctly preserves DTL entries
+#
+# STRATEGY:
+# 1. Create the pool
+# 2. Offline the first device
+# 3. Write to the pool
+# 4. Scrub the pool
+# 5. Online the first device and offline the second device
+# 6. Scrub the pool again
+# 7. Verify data integrity
+#
+# NOTE:
+# Ported from script used to reproduce issue #5806
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+       poolexists $TESTPOOL && destroy_pool $TESTPOOL
+       log_must rm -f $DISK1 $DISK2 $DISK3
+}
+
+#
+# Update to [online|offline] $device status on $pool synchronously
+#
+function zpool_do_sync # <status> <pool> <device>
+{
+       status="$1"
+       pool="$2"
+       device="$3"
+
+       if [[ $status != "online" && $status != "offline" ]]; then
+               log_fail "zpool_do_sync: invalid status $status"
+       fi
+
+       log_must zpool $status $pool $device
+       for i in {1..10}; do
+               check_state $pool $device $status && return 0
+       done
+       log_fail "Failed to $status device $device"
+}
+
+#
+# Start a scrub on $pool and wait for its completion
+#
+function zpool_scrub_sync # <pool>
+{
+       pool="$1"
+
+       log_must zpool scrub $pool
+       while ! is_pool_scrubbed $pool; do
+               sleep 1
+       done
+}
+
+log_assert "Scrubbing a pool with offline devices correctly preserves DTLs"
+log_onexit cleanup
+
+DEVSIZE='128m'
+FILESIZE='100m'
+TESTDIR="$TEST_BASE_DIR/zpool_scrub_offline_device"
+DISK1="$TEST_BASE_DIR/zpool_disk1.dat"
+DISK2="$TEST_BASE_DIR/zpool_disk2.dat"
+DISK3="$TEST_BASE_DIR/zpool_disk3.dat"
+
+# 1. Create the pool
+log_must truncate -s $DEVSIZE $DISK1
+log_must truncate -s $DEVSIZE $DISK2
+log_must truncate -s $DEVSIZE $DISK3
+poolexists $TESTPOOL && destroy_pool $TESTPOOL
+log_must zpool create -O mountpoint=$TESTDIR $TESTPOOL \
+    raidz1 $DISK1 $DISK2 $DISK3
+
+# 2. Offline the first device
+zpool_do_sync 'offline' $TESTPOOL $DISK1
+
+# 3. Write to the pool
+log_must mkfile $FILESIZE "$TESTDIR/data.bin"
+
+# 4. Scrub the pool
+zpool_scrub_sync $TESTPOOL
+
+# 5. Online the first device and offline the second device
+zpool_do_sync 'online' $TESTPOOL $DISK1
+zpool_do_sync 'offline' $TESTPOOL $DISK2
+
+# 6. Scrub the pool again
+zpool_scrub_sync $TESTPOOL
+
+# 7. Verify data integrity
+cksum=$(zpool status $TESTPOOL | awk 'L{print $NF;L=0} /CKSUM$/{L=1}')
+if [[ $cksum != 0 ]]; then
+       log_fail "Unexpected CKSUM errors found on $TESTPOOL ($cksum)"
+fi
+
+log_pass "Scrubbing a pool with offline devices correctly preserves DTLs"
index e01dfde6b60ecb6e1701417dc3794f4f4208327d..77471688b6259ea4d13198ebfa3ef4c0e2d38d0a 100644 (file)
@@ -1,5 +1,7 @@
 pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_set
 dist_pkgdata_SCRIPTS = \
+       setup.ksh \
+       cleanup.ksh \
        zpool_set_001_pos.ksh \
        zpool_set_002_neg.ksh \
        zpool_set_003_neg.ksh \
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_set/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_set/cleanup.ksh
new file mode 100755 (executable)
index 0000000..79cd6e9
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_set/setup.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_set/setup.ksh
new file mode 100755 (executable)
index 0000000..6a9af3b
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+DISK=${DISKS%% *}
+
+default_setup $DISK
index dbe33f9c8325f1d0841ea4d15494b1aaee903999..34d7fd8214640a587ff12c984fce2afc39ff577b 100755 (executable)
@@ -49,7 +49,7 @@ verify_runnable "global"
 function cleanup {
 
        zpool destroy bootfs
-       rm /tmp/zpool_set_002.$$.dat
+       rm $FILEVDEV
 }
 
 log_assert "Malformed zpool set commands are rejected"
@@ -103,8 +103,9 @@ arguments[${#arguments[@]}]="bootfs=$bigname"
 # Create a pool called bootfs (so-called, so as to trip any clashes between
 # property name, and pool name)
 # Also create a filesystem in this pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool_set_002.$$.dat
-log_must zpool create bootfs /tmp/zpool_set_002.$$.dat
+FILEVDEV="$TEST_BASE_DIR/zpool_set_002.$$.dat"
+log_must mkfile $MINVDEVSIZE $FILEVDEV
+log_must zpool create bootfs $FILEVDEV
 log_must zfs create bootfs/root
 
 typeset -i i=0;
index c705ef33978bd12d2edf1642d7c645424c4ef1a8..1d637b77af0026adeecfd6aae932b48a7b58681c 100755 (executable)
@@ -45,8 +45,8 @@ verify_runnable "global"
 
 function cleanup
 {
-        zpool destroy $TESTPOOL
-        rm /tmp/zpool_set_003.$$.dat
+        zpool destroy $TESTPOOL1
+        rm $FILEVDEV
 }
 
 set -A props "available" "capacity" "guid"  "health"  "size" "used"
@@ -56,14 +56,15 @@ log_onexit cleanup
 
 log_assert "zpool set cannot set a readonly property"
 
-log_must mkfile $MINVDEVSIZE /tmp/zpool_set_003.$$.dat
-log_must zpool create $TESTPOOL /tmp/zpool_set_003.$$.dat
+FILEVDEV="$TEST_BASE_DIR/zpool_set_003.$$.dat"
+log_must mkfile $MINVDEVSIZE $FILEVDEV
+log_must zpool create $TESTPOOL1 $FILEVDEV
 
 typeset -i i=0;
 while [ $i -lt "${#props[@]}" ]
 do
        # try to set each property in the prop list with it's corresponding val
-        log_mustnot eval "zpool set ${props[$i]}=${vals[$i]} $TESTPOOL \
+        log_mustnot eval "zpool set ${props[$i]}=${vals[$i]} $TESTPOOL1 \
  > /dev/null 2>&1"
         i=$(( $i + 1))
 done
index f77fa2fc3621f65837ecda2cf52fc8ab2f879c0a..996c7658c32c6ad3b09bda8252c39ba793d5e83b 100755 (executable)
 
 verify_runnable "global"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/5658
-if is_linux; then
-       log_unsupported "Test case occasionally fails"
-fi
-
 function cleanup
 {
        if datasetexists $root_testfs; then
index 4d5c83f07bec7254fb5c6406d552010ce01fe58d..0c96e1999ef43333bb6c4b7a50395d70c6c0b82f 100755 (executable)
@@ -63,12 +63,10 @@ function cleanup
 
 log_onexit cleanup
 
-typeset exclude=`eval echo \"'(${KEEP})'\"`
-for pool in $(zpool list -H -o name | \
-       egrep -v "$exclude" | \
-       grep -v "$TESTPOOL" | \
-       egrep -v "$NO_POOLS"); do
-       log_must zpool destroy $pool
+for pool in $(get_all_pools); do
+       if [[ "$pool" != "$TESTPOOL" ]]; then
+               log_must zpool destroy $pool
+       fi
 done
 
 DISK=${DISKS%% *}
index 1a7c3ea4cfeae2c7d303435d81ff7218d564df9f..c9777b27e40664a84f2ed9cc6305faf548f29b61 100755 (executable)
@@ -36,7 +36,7 @@ verify_runnable "global"
 log_must zpool destroy $TESTPOOL
 log_must zpool destroy $TESTPOOL2
 
-log_must rm /tmp/zpool_version_1.dat
-log_must rm /tmp/zpool2_version_1.dat
+log_must rm $TEST_BASE_DIR/zpool_version_1.dat
+log_must rm $TEST_BASE_DIR/zpool2_version_1.dat
 
 default_cleanup
index 4c85b90f5f8872d8a61f677a6f0a35fe09103a60..660083b9fc00e44e870415172596c11ec6e51698 100755 (executable)
 verify_runnable "global"
 
 # create a version 1 pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool_version_1.dat
-log_must zpool create -o version=1 $TESTPOOL /tmp/zpool_version_1.dat
+log_must mkfile $MINVDEVSIZE $TEST_BASE_DIR/zpool_version_1.dat
+log_must zpool create -o version=1 $TESTPOOL $TEST_BASE_DIR/zpool_version_1.dat
 
 
 # create another version 1 pool
-log_must mkfile $MINVDEVSIZE /tmp/zpool2_version_1.dat
-log_must zpool create -o version=1 $TESTPOOL2 /tmp/zpool2_version_1.dat
+log_must mkfile $MINVDEVSIZE $TEST_BASE_DIR/zpool2_version_1.dat
+log_must zpool create -o version=1 $TESTPOOL2 $TEST_BASE_DIR/zpool2_version_1.dat
 
 log_pass
index 202393d4d58c22558111404d156dba18abae3dad..8ae3593613f0df17f95bcfb5403ab46e90b9d8c9 100755 (executable)
@@ -54,7 +54,7 @@ verify_runnable "both"
 function cleanup
 {
        for obj in $OBJ_LIST; do
-               datasetexists $obj && log_must zfs destroy -f $obj
+               datasetexists $obj && log_must_busy zfs destroy -f $obj
        done
 
        log_must zero_reservation $TESTPOOL/$TESTFS
index 771d7e2f04a03160c4842b4534d1affb76ceffcb..f76bce9d04bfd10dd0f28fae3f0e0ed2ceb95c02 100755 (executable)
@@ -36,7 +36,7 @@
 verify_runnable "both"
 
 # See issue: https://github.com/zfsonlinux/zfs/issues/6086
-if is_linux; then
+if is_32bit; then
        log_unsupported "Test case occasionally fails"
 fi
 
index ca1b5ed2171044e655cfd6e55a9c9a14cf9aa99a..b32d18f2eac7aa4dde4cc268b4f7d2239dbb0b22 100644 (file)
@@ -59,13 +59,14 @@ function display_status
        ((ret |= $?))
 
        typeset mntpnt=$(get_prop mountpoint $pool)
-       dd if=/dev/random of=$mntpnt/testfile.$$ &
+       dd if=/dev/urandom of=$mntpnt/testfile.$$ &
        typeset pid=$!
 
        zpool iostat -v 1 3 > /dev/null
        ((ret |= $?))
 
        kill -9 $pid
+       wait
 
        return $ret
 }
index 0a21a8ae01decd3ab782ac8a62b00e28f0fcaf9b..9b1e400db56e075a3c7e84fd8d556b077ca99593 100755 (executable)
 
 verify_runnable "both"
 
-# https://github.com/zfsonlinux/zfs/issues/6143
-if is_linux; then
-       log_unsupported "Test case occasionally fails"
-fi
-
 function cleanup
 {
        typeset snap=""
index 9708b818e68f32c129715c890b550956e9677850..2a4996a1d926f2f3e607263272da3f69858dbb3e 100755 (executable)
@@ -52,7 +52,7 @@ function cleanup
 
 log_assert "Ensure zeroed file gets written correctly during a sync operation"
 
-srcfile="/tmp/cosmo.$$"
+srcfile="$TESTDIR/cosmo.$$"
 log_must dd if=/dev/urandom of=$srcfile bs=1024k count=1
 
 log_onexit cleanup
index 41e099492489669c8786a1b4abab02e10d1d9bef..a6a562cbb96d6347ff603f561ddfc642fff87dea 100755 (executable)
@@ -52,7 +52,7 @@ fi
 
 function cleanup
 {
-       [[ -f /tmp/$TESTFILE ]] && log_must rm -f /tmp/$TESTFILE
+       [[ -f $TESTDIR/$TESTFILE ]] && log_must rm -f $TESTDIR/$TESTFILE
        [[ -f $NEW_VFSTAB_FILE ]] && log_must rm -f $NEW_VFSTAB_FILE
        [[ -f $PREV_VFSTAB_FILE ]] && \
            log_must mv $PREV_VFSTAB_FILE $VFSTAB_FILE
@@ -86,13 +86,13 @@ log_must cp $VFSTAB_FILE $PREV_VFSTAB_FILE
 log_must cp $NEW_VFSTAB_FILE $VFSTAB_FILE
 log_must swapadd $VFSTAB_FILE
 
-log_must file_write -o create -f /tmp/$TESTFILE \
+log_must file_write -o create -f $TESTDIR/$TESTFILE \
     -b $BLOCKSZ -c $NUM_WRITES -d $DATA
 
-[[ ! -f /tmp/$TESTFILE ]] &&
-    log_fail "Unable to create file under /tmp"
+[[ ! -f $TESTDIR/$TESTFILE ]] &&
+    log_fail "Unable to create file under $TESTDIR"
 
-filesize=`ls -l /tmp/$TESTFILE | awk '{print $5}'`
+filesize=`ls -l $TESTDIR/$TESTFILE | awk '{print $5}'`
 tf_size=$((BLOCKSZ * NUM_WRITES))
 (($tf_size != $filesize)) && \
     log_fail "testfile is ($filesize bytes), expected ($tf_size bytes)"