]> granicus.if.org Git - zfs/commitdiff
Enable remaining tests
authorBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 19 May 2017 00:21:15 +0000 (20:21 -0400)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 22 May 2017 16:34:32 +0000 (12:34 -0400)
Enable most of the remaining test cases which were previously
disabled.  The required fixes are as follows:

* cache_001_pos - No changes required.

* cache_010_neg - Updated to use losetup under Linux.  Loopback
  cache devices are allowed, ZVOLs as cache devices are not.
  Disabled until all the builders pass reliably.

* cachefile_001_pos, cachefile_002_pos, cachefile_003_pos,
  cachefile_004_pos - Set set_device_dir path in cachefile.cfg,
  updated CPATH1 and CPATH2 to reference unique files.

* zfs_clone_005_pos - Wait for udev to create volumes.

* zfs_mount_007_pos - Updated mount options to expected Linux names.

* zfs_mount_009_neg, zfs_mount_all_001_pos - No changes required.

* zfs_unmount_005_pos, zfs_unmount_009_pos, zfs_unmount_all_001_pos -
  Updated to expect -f to not unmount busy mount points under Linux.

* rsend_019_pos - Observed to occasionally take a long time on both
  32-bit systems and the kmemleak builder.

* zfs_written_property_001_pos - Switched sync(1) to sync_pool.

* devices_001_pos, devices_002_neg - Updated create_dev_file() helper
  for Linux.

* exec_002_neg.ksh - Fixed mmap_exec.c to preserve errno.  Updated
  test case to expect EPERM from Linux as described by mmap(2).

* grow_pool_001_pos - Adding missing setup.ksh and cleanup.ksh
  scripts from OpenZFS.

* grow_replicas_001_pos.ksh - Added missing $SLICE_* variables.

* history_004_pos, history_006_neg, history_008_pos - Fixed by
  previous commits and were not enabled.  No changes required.

* zfs_allow_010_pos - Added missing spaces after assorted zfs
  commands in delegate_common.kshlib.

* inuse_* - Illumos dump device tests skipped.  Remaining test
  cases updated to correctly create required partitions.

* large_files_001_pos - Fixed largest_file.c to accept EINVAL
  as well as EFBIG as described in write(2).

* link_count_001 - Added nproc to required commands.

* umountall_001 - Updated to use umount -a.

* online_offline_001_* - Pull in OpenZFS change to file_trunc.c
  to make the '-c 0' option run the test in a loop.  Included
  online_offline.cfg file in all test cases.

* rename_dirs_001_pos - Updated to use the rename_dir test binary,
  pkill restricted to exact matches and total runtime reduced.

* slog_013_neg, write_dirs_002_pos - No changes required.

* slog_013_pos.ksh - Updated to use losetup under Linux.

* slog_014_pos.ksh - ZED will not be running, manually degrade
  the damaged vdev as expected.

* nopwrite_varying_compression, nopwrite_volume - Forced pool
  sync with sync_pool to ensure up to date property values.

* Fixed typos in ZED log messages.  Refactored zed_* helper
  functions to resolve all-syslog exit=1 errors in zedlog.

* zfs_copies_005_neg, zfs_get_004_pos, zpool_add_004_pos,
  zpool_destroy_001_pos, largest_pool_001_pos, clone_001_pos.ksh,
  clone_001_pos, - Skip until layering pools on zvols is solid.

* largest_pool_001_pos - Limited to 7eb pool, maximum
  supported size in 8eb-1 on Linux.

* zpool_expand_001_pos, zpool_expand_003_neg - Requires
  additional support from the ZED, updated skip reason.

* zfs_rollback_001_pos, zfs_rollback_002_pos - Properly cleanup
  busy mount points under Linux between test loops.

* privilege_001_pos, privilege_003_pos, rollback_003_pos,
  threadsappend_001_pos - Skip with log_unsupported.

* snapshot_016_pos - No changes required.

* snapshot_008_pos - Increased LIMIT from 512K to 2M and added
  sync_pool to avoid false positives.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #6128

85 files changed:
cmd/zed/agents/zfs_mod.c
cmd/zed/zed_disk_event.c
tests/runfiles/linux.run
tests/zfs-tests/cmd/file_common.h
tests/zfs-tests/cmd/file_trunc/file_trunc.c
tests/zfs-tests/cmd/largest_file/largest_file.c
tests/zfs-tests/cmd/mmap_exec/mmap_exec.c
tests/zfs-tests/include/commands.cfg
tests/zfs-tests/include/default.cfg
tests/zfs-tests/include/libtest.shlib
tests/zfs-tests/tests/functional/cache/cache_010_neg.ksh
tests/zfs-tests/tests/functional/cachefile/Makefile.am
tests/zfs-tests/tests/functional/cachefile/cachefile.cfg
tests/zfs-tests/tests/functional/cachefile/cleanup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cachefile/setup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/cli_root/zfs_clone/zfs_clone_005_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_005_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_006_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_007_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_property/zfs_written_property_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib
tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_005_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_005_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_009_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_unmount/zfs_unmount_all_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_002_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zfs_unshare/zfs_unshare_006_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_004_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_destroy/zpool_destroy_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_expand/cleanup.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_expand/setup.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/zpool_upgrade_004_pos.ksh
tests/zfs-tests/tests/functional/cli_user/misc/zfs_share_001_neg.ksh
tests/zfs-tests/tests/functional/cli_user/misc/zfs_unshare_001_neg.ksh
tests/zfs-tests/tests/functional/delegate/delegate_common.kshlib
tests/zfs-tests/tests/functional/devices/devices_001_pos.ksh
tests/zfs-tests/tests/functional/devices/devices_002_neg.ksh
tests/zfs-tests/tests/functional/devices/devices_common.kshlib
tests/zfs-tests/tests/functional/exec/exec_001_pos.ksh
tests/zfs-tests/tests/functional/exec/exec_002_neg.ksh
tests/zfs-tests/tests/functional/fault/cleanup.ksh
tests/zfs-tests/tests/functional/fault/fault.cfg
tests/zfs-tests/tests/functional/fault/setup.ksh
tests/zfs-tests/tests/functional/grow_pool/Makefile.am
tests/zfs-tests/tests/functional/grow_pool/cleanup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/grow_pool/grow_pool.cfg
tests/zfs-tests/tests/functional/grow_pool/setup.ksh [new file with mode: 0755]
tests/zfs-tests/tests/functional/grow_replicas/grow_replicas.cfg
tests/zfs-tests/tests/functional/grow_replicas/grow_replicas_001_pos.ksh
tests/zfs-tests/tests/functional/history/history_006_neg.ksh
tests/zfs-tests/tests/functional/history/history_008_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse.cfg
tests/zfs-tests/tests/functional/inuse/inuse_001_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_003_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_005_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_006_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_007_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_008_pos.ksh
tests/zfs-tests/tests/functional/inuse/inuse_009_pos.ksh
tests/zfs-tests/tests/functional/large_files/large_files_001_pos.ksh
tests/zfs-tests/tests/functional/largest_pool/largest_pool.cfg
tests/zfs-tests/tests/functional/largest_pool/largest_pool_001_pos.ksh
tests/zfs-tests/tests/functional/link_count/link_count_001.ksh
tests/zfs-tests/tests/functional/mmap/mmap_write_001_pos.ksh
tests/zfs-tests/tests/functional/mount/umountall_001.ksh
tests/zfs-tests/tests/functional/nopwrite/nopwrite.shlib
tests/zfs-tests/tests/functional/online_offline/online_offline_001_pos.ksh
tests/zfs-tests/tests/functional/online_offline/online_offline_002_neg.ksh
tests/zfs-tests/tests/functional/privilege/privilege_001_pos.ksh
tests/zfs-tests/tests/functional/privilege/privilege_002_pos.ksh
tests/zfs-tests/tests/functional/redundancy/redundancy_004_neg.ksh
tests/zfs-tests/tests/functional/rename_dirs/rename_dirs_001_pos.ksh
tests/zfs-tests/tests/functional/rsend/rsend_019_pos.ksh
tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh
tests/zfs-tests/tests/functional/slog/slog_014_pos.ksh
tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh
tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
tests/zfs-tests/tests/functional/snapshot/snapshot.cfg
tests/zfs-tests/tests/functional/snapshot/snapshot_008_pos.ksh
tests/zfs-tests/tests/functional/snapshot/snapshot_016_pos.ksh
tests/zfs-tests/tests/functional/threadsappend/threadsappend_001_pos.ksh

index 8f2f60bbfb94bab747c707e71102d522c4ee937c..a906decab406dcfb2890874133e5dd48e3342330 100644 (file)
@@ -763,12 +763,12 @@ zfs_deliver_dle(nvlist_t *nvl)
        char *devname;
 
        if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
-               zed_log_msg(LOG_INFO, "zfs_deliver_event: no physpath");
+               zed_log_msg(LOG_INFO, "zfs_deliver_dle: no physpath");
                return (-1);
        }
 
        if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
-               zed_log_msg(LOG_INFO, "zfs_deliver_event: device '%s' not "
+               zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
                    "found", devname);
                return (1);
        }
index 32eefb2f386c4d2d4a811d99280623020212fde2..a778f9a240fe9e23e8b05ffc15a2307bc3ef3247 100644 (file)
@@ -161,7 +161,7 @@ zed_udev_monitor(void *arg)
        struct udev_monitor *mon = arg;
        char *tmp, *tmp2;
 
-       zed_log_msg(LOG_INFO, "Waiting for new uduev disk events...");
+       zed_log_msg(LOG_INFO, "Waiting for new udev disk events...");
 
        while (1) {
                struct udev_device *dev;
index d2148d7adb584652024304f03b44a4c2a60dcf01..55fdd5dbeb047d1930812fc90db7d2126f022f04 100644 (file)
@@ -19,7 +19,7 @@ post_user = root
 post = cleanup
 outputdir = /var/tmp/test_results
 
-# DISABLED: update to use ZFS_ACL_* variables and user_run helper.
+# Update to use ZFS_ACL_* variables and user_run helper.
 # posix_001_pos
 # posix_002_pos
 [tests/functional/acl/posix]
@@ -33,22 +33,15 @@ tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
     'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
     'bootfs_008_pos']
 
-# DISABLED:
-# cache_001_pos - needs investigation
-# cache_010_neg - needs investigation
 [tests/functional/cache]
-tests = ['cache_002_pos', 'cache_003_pos', 'cache_004_neg',
+tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
     'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
-    'cache_009_pos', 'cache_011_pos']
+    'cache_009_pos', 'cache_010_neg', 'cache_011_pos']
 
-# DISABLED: needs investigation
-#[tests/functional/cachefile]
-#tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
-#    'cachefile_004_pos']
-#pre =
-#post =
+[tests/functional/cachefile]
+tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
+    'cachefile_004_pos']
 
-# DISABLED: needs investigation
 # 'sensitive_none_lookup', 'sensitive_none_delete',
 # 'sensitive_formd_lookup', 'sensitive_formd_delete',
 # 'insensitive_none_lookup', 'insensitive_none_delete',
@@ -77,20 +70,15 @@ post =
 [tests/functional/cli_root/zfs]
 tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg']
 
-# DISABLED:
-# zfs_clone_005_pos - busy unmount
 [tests/functional/cli_root/zfs_clone]
 tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
-    'zfs_clone_004_pos', 'zfs_clone_006_pos',
+    'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
     'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
     'zfs_clone_010_pos']
 
-# DISABLED:
-# zfs_copies_003_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# zfs_copies_005_neg - https://github.com/zfsonlinux/zfs/issues/3484
 [tests/functional/cli_root/zfs_copies]
-tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_004_neg',
-    'zfs_copies_006_pos']
+tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
+    'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
 
 [tests/functional/cli_root/zfs_create]
 tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
@@ -107,38 +95,29 @@ tests = ['zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
     'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
     'zfs_destroy_016_pos']
 
-# DISABLED:
-# zfs_get_004_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# zfs_get_006_neg - needs investigation
 [tests/functional/cli_root/zfs_get]
 tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
-    'zfs_get_005_neg', 'zfs_get_007_neg', 'zfs_get_008_pos',
-    'zfs_get_009_pos', 'zfs_get_010_neg']
+    'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
+    'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
 
 [tests/functional/cli_root/zfs_inherit]
 tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos']
 
-# DISABLED:
 # zfs_mount_006_pos - https://github.com/zfsonlinux/zfs/issues/4990
-# zfs_mount_007_pos - needs investigation
-# zfs_mount_009_neg - needs investigation
-# zfs_mount_all_001_pos - needs investigation
 [tests/functional/cli_root/zfs_mount]
 tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
-    'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_008_pos',
-    'zfs_mount_010_neg', 'zfs_mount_011_neg', 'zfs_mount_012_neg']
+    'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
+    'zfs_mount_008_pos', 'zfs_mount_009_neg', 'zfs_mount_010_neg',
+    'zfs_mount_011_neg', 'zfs_mount_012_neg', 'zfs_mount_all_001_pos']
 
 [tests/functional/cli_root/zfs_promote]
 tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
     'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
     'zfs_promote_007_neg', 'zfs_promote_008_pos']
 
-# DISABLED:
-# zfs_written_property_001_pos - https://github.com/zfsonlinux/zfs/issues/2441
 [tests/functional/cli_root/zfs_property]
-tests = []
+tests = ['zfs_written_property_001_pos']
 
-# DISABLED:
 # zfs_receive_004_neg - Fails for OpenZFS on illumos
 [tests/functional/cli_root/zfs_receive]
 tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
@@ -148,7 +127,6 @@ tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
     'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
     'receive-o-x_props_override']
 
-# DISABLED:
 # zfs_rename_006_pos - https://github.com/zfsonlinux/zfs/issues/5647
 # zfs_rename_009_neg - https://github.com/zfsonlinux/zfs/issues/5648
 [tests/functional/cli_root/zfs_rename]
@@ -161,11 +139,9 @@ tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
 [tests/functional/cli_root/zfs_reservation]
 tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
 
-# DISABLED:
-# zfs_rollback_001_pos - busy mountpoint behavior
-# zfs_rollback_002_pos - busy mountpoint behavior
 [tests/functional/cli_root/zfs_rollback]
-tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg']
+tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
+    'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
 
 [tests/functional/cli_root/zfs_send]
 tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
@@ -195,21 +171,15 @@ tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
     'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
     'zfs_snapshot_009_pos']
 
-# DISABLED:
-# zfs_unmount_005_pos - needs investigation
-# zfs_unmount_009_pos - needs investigation
-# zfs_unmount_all_001_pos - needs investigation
 [tests/functional/cli_root/zfs_unmount]
 tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
-    'zfs_unmount_004_pos', 'zfs_unmount_006_pos',
-    'zfs_unmount_007_neg', 'zfs_unmount_008_neg']
+    'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
+    'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
+    'zfs_unmount_all_001_pos']
 
-# DISABLED:
-# zfs_unshare_002_pos - zfs set sharenfs=off won't unshare if it was already off
-# zfs_unshare_006_pos - some distros come with Samba "user shares" disabled
 [tests/functional/cli_root/zfs_unshare]
-tests = ['zfs_unshare_001_pos', 'zfs_unshare_003_pos',
-    'zfs_unshare_004_neg', 'zfs_unshare_005_neg']
+tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
+    'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos']
 
 [tests/functional/cli_root/zfs_upgrade]
 tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
@@ -312,6 +282,9 @@ post =
 tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos']
 user =
 
+[tests/functional/cli_root/zpool_sync]
+tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
+
 [tests/functional/cli_root/zpool_upgrade]
 tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
     'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
@@ -319,20 +292,14 @@ tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
     'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
     'zpool_upgrade_009_neg']
 
-[tests/functional/cli_root/zpool_sync]
-tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
-
-# DISABLED:
-# zfs_share_001_neg - requires additional dependencies
-# zfs_unshare_001_neg - requires additional dependencies
 [tests/functional/cli_user/misc]
 tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
     'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
     'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
     'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
     'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
-    'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
-    'zfs_unmount_001_neg', 'zfs_upgrade_001_neg',
+    'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
+    'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
     'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
     'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
     'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
@@ -365,27 +332,20 @@ tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
 [tests/functional/ctime]
 tests = ['ctime_001_pos' ]
 
-# DISABLED:
-# zfs_allow_010_pos - https://github.com/zfsonlinux/zfs/issues/5646
 [tests/functional/delegate]
 tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos',
     'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
     'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
-    'zfs_allow_011_neg', 'zfs_allow_012_neg',
+    'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
     'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
     'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
     'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
 
-# DISABLED:
-# devices_001_pos - needs investigation
-# devices_002_neg - needs investigation
 [tests/functional/devices]
-tests = ['devices_003_pos']
+tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
 
-# DISABLED:
-# exec_002_neg - needs investigation
 [tests/functional/exec]
-tests = ['exec_001_pos']
+tests = ['exec_001_pos', 'exec_002_neg']
 
 [tests/functional/fault]
 tests = ['auto_online_001_pos', 'auto_replace_001_pos']
@@ -398,56 +358,40 @@ tests = ['large_dnode_001_pos', 'large_dnode_002_pos', 'large_dnode_003_pos',
          'large_dnode_004_neg', 'large_dnode_005_pos', 'large_dnode_006_pos',
          'large_dnode_007_neg']
 
-# DISABLED: needs investigation
-#[tests/functional/grow_pool]
-#tests = ['grow_pool_001_pos']
-#pre =
-#post =
-
-# DISABLED: needs investigation
-#[tests/functional/grow_replicas]
-#tests = ['grow_replicas_001_pos']
-#pre =
-#post =
-
-# DISABLED:
-# history_004_pos - https://github.com/zfsonlinux/zfs/issues/5664
-# history_006_neg - https://github.com/zfsonlinux/zfs/issues/5657
-# history_008_pos - https://github.com/zfsonlinux/zfs/issues/5658
+[tests/functional/grow_pool]
+tests = ['grow_pool_001_pos']
+
+[tests/functional/grow_replicas]
+tests = ['grow_replicas_001_pos']
+pre =
+post =
+
 [tests/functional/history]
 tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
-    'history_005_neg',
-    'history_007_pos', 'history_009_pos',
+    'history_004_pos', 'history_005_neg', 'history_006_neg',
+    'history_007_pos', 'history_008_pos', 'history_009_pos',
     'history_010_pos']
 
 [tests/functional/inheritance]
 tests = ['inherit_001_pos']
 pre =
 
-# DISABLED:
-# inuse_001_pos, inuse_007_pos - no dumpadm command
-# inuse_005_pos - partition issue
-# inuse_006_pos - partition issue
-# inuse_008_pos - partition issue
-# inuse_009_pos - partition issue
 [tests/functional/inuse]
-tests = ['inuse_004_pos']
+tests = ['inuse_001_pos', 'inuse_003_pos', 'inuse_004_pos',
+    'inuse_005_pos', 'inuse_006_pos', 'inuse_007_pos', 'inuse_008_pos',
+    'inuse_009_pos']
 post =
 
-# DISABLED: needs investigation
-# large_files_001_pos
 [tests/functional/large_files]
-tests = ['large_files_002_pos']
+tests = ['large_files_001_pos', 'large_files_002_pos']
 
-# DISABLED: needs investigation
-#[tests/functional/largest_pool]
-#tests = ['largest_pool_001_pos']
-#pre =
-#post =
+[tests/functional/largest_pool]
+tests = ['largest_pool_001_pos']
+pre =
+post =
 
-# DISABLED: needs investigation
-#[tests/functional/link_count]
-#tests = ['link_count_001']
+[tests/functional/link_count]
+tests = ['link_count_001']
 
 [tests/functional/migration]
 tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
@@ -455,15 +399,11 @@ tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
     'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
     'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
 
-# DISABLED:
-# mmap_write_001_pos - needs investigation
 [tests/functional/mmap]
-tests = ['mmap_read_001_pos']
+tests = ['mmap_write_001_pos', 'mmap_read_001_pos']
 
-# DISABLED:
-# umountall_001 - requires umountall command.
 [tests/functional/mount]
-tests = ['umount_001']
+tests = ['umount_001', 'umountall_001']
 
 [tests/functional/mv_files]
 tests = ['mv_files_001_pos', 'mv_files_002_pos']
@@ -474,17 +414,14 @@ tests = ['nestedfs_001_pos']
 [tests/functional/no_space]
 tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos']
 
-# DISABLED:
-# nopwrite_volume - https://github.com/zfsonlinux/zfs/issues/5510
-# nopwrite_varying_compression - needs investigation
 [tests/functional/nopwrite]
 tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
-    'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync']
+    'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
+    'nopwrite_varying_compression', 'nopwrite_volume']
 
-# DISABLED: needs investigation
-#[tests/functional/online_offline]
-#tests = ['online_offline_001_pos', 'online_offline_002_neg',
-#    'online_offline_003_neg']
+[tests/functional/online_offline]
+tests = ['online_offline_001_pos', 'online_offline_002_neg',
+    'online_offline_003_neg']
 
 [tests/functional/pool_names]
 tests = ['pool_names_001_pos', 'pool_names_002_neg']
@@ -494,9 +431,8 @@ post =
 [tests/functional/poolversion]
 tests = ['poolversion_001_pos', 'poolversion_002_pos']
 
-# DISABLED: requires pfexec command or 'RBAC profile'
-#[tests/functional/privilege]
-#tests = ['privilege_001_pos', 'privilege_002_pos']
+[tests/functional/privilege]
+tests = ['privilege_001_pos', 'privilege_002_pos']
 
 [tests/functional/quota]
 tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
@@ -513,20 +449,17 @@ tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos',
 tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
     'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg']
 
-# DISABLED:
-# refreserv_004_pos - needs investigation
+# refreserv_004_pos - Fails for OpenZFS on illumos
 [tests/functional/refreserv]
 tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
     'refreserv_005_pos']
 
-# DISABLED:
-#[tests/functional/rename_dirs]
-#tests = ['rename_dirs_001_pos']
+[tests/functional/rename_dirs]
+tests = ['rename_dirs_001_pos']
 
 [tests/functional/replacement]
 tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos']
 
-# DISABLED:
 # reservation_001_pos - https://github.com/zfsonlinux/zfs/issues/4445
 # reservation_013_pos - https://github.com/zfsonlinux/zfs/issues/4444
 # reservation_018_pos - https://github.com/zfsonlinux/zfs/issues/5642
@@ -541,7 +474,6 @@ tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
 [tests/functional/rootpool]
 tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
 
-# DISABLED:
 # rsend_008_pos - https://github.com/zfsonlinux/zfs/issues/6066
 # rsend_009_pos - https://github.com/zfsonlinux/zfs/issues/5887
 [tests/functional/rsend]
@@ -562,30 +494,21 @@ tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
 tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
     'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
 
-# DISABLED: Scripts need to be updated.
-# slog_012_neg - needs investigation
-# slog_013_pos - requires 'lofiadm' command.
-# slog_014_pos - needs investigation
 [tests/functional/slog]
 tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
     'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
-    'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_015_pos']
+    'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
+    'slog_013_pos', 'slog_014_pos', 'slog_015_pos']
 
-# DISABLED:
-# clone_001_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# rollback_003_pos - Hangs in unmount and spins.
-# snapshot_016_pos - Problem with automount
-# snapshot_008_pos - https://github.com/zfsonlinux/zfs/issues/5784
 [tests/functional/snapshot]
-tests = ['rollback_001_pos', 'rollback_002_pos',
-    'snapshot_001_pos', 'snapshot_002_pos',
+tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
+    'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
     'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
-    'snapshot_006_pos', 'snapshot_007_pos',
+    'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
     'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
     'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
-    'snapshot_015_pos', 'snapshot_017_pos']
+    'snapshot_015_pos', 'snapshot_016_pos', 'snapshot_017_pos']
 
-# DISABLED:
 # snapused_004_pos - https://github.com/zfsonlinux/zfs/issues/5513
 [tests/functional/snapused]
 tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
@@ -594,9 +517,8 @@ tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
 [tests/functional/sparse]
 tests = ['sparse_001_pos']
 
-# DISABLED: needs investigation
-#[tests/functional/threadsappend]
-#tests = ['threadsappend_001_pos']
+[tests/functional/threadsappend]
+tests = ['threadsappend_001_pos']
 
 [tests/functional/tmpfile]
 tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos']
@@ -617,18 +539,14 @@ tests = [
     'userspace_001_pos', 'userspace_002_pos', 'userspace_003_pos',
     'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ]
 
-# DISABLED:
 # vdev_zaps_007_pos -- fails due to a pre-existing issue with zpool split
 [tests/functional/vdev_zaps]
 tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
     'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
 
-# DISABLED:
-# write_dirs_002_pos - needs investigation
 [tests/functional/write_dirs]
-tests = ['write_dirs_001_pos']
+tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
 
-# DISABLED: No 'runat' command, replace the Linux equivalent and add xattrtest
 #[tests/functional/xattr]
 #tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
 #    'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos',
index 759889e70d82de56cf504c107f785cb60c2dae1f..64b1777a9ae8db37b54af6f6d8f602d7f87d52d9 100644 (file)
 extern "C" {
 #endif
 
+#ifndef _FILE_OFFSET_BITS
+#define        _FILE_OFFSET_BITS 64
+#endif
+
+#ifndef _LARGEFILE64_SOURCE
+#define        _LARGEFILE64_SOURCE
+#endif
+
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <errno.h>
index f9431300beb6f7821965017147eb87927022ceb0..69096752efa2a7d9ffd702ab56053f984a37beb0 100644 (file)
@@ -25,7 +25,7 @@
  */
 
 /*
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
  */
 
 #include <stdio.h>
@@ -87,11 +87,9 @@ main(int argc, char *argv[])
                exit(3);
        }
 
-       while (i < count) {
+       for (i = 0; count == 0 || i < count; i++) {
                (void) do_write(fd);
                (void) do_trunc(fd);
-
-               i++;
        }
 
        (void) close(fd);
@@ -188,7 +186,7 @@ do_write(int fd)
                exit(5);
        }
 
-       strcpy(buf, "ZFS Test Suite Truncation Test");
+       (void) strcpy(buf, "ZFS Test Suite Truncation Test");
        if (write(fd, buf, bsize) < bsize) {
                perror("write");
                exit(6);
index 286232da5b1919ddbdd0b3ad2f115841ae333a34..5e6a1866059b1aed0a2dbe41c8625e69695b5155 100644 (file)
@@ -108,8 +108,8 @@ main(int argc, char **argv)
 
        write_ret = write(fd, mybuf, 1);
        if (write_ret < 0) {
-               if (errno == EFBIG) {
-                       (void) printf("write errno=EFBIG: success\n");
+               if (errno == EFBIG || errno == EINVAL) {
+                       (void) printf("write errno=EFBIG|EINVAL: success\n");
                        err = 0;
                } else {
                        err = errno;
index 6a48a9c0459eb939bcf0e9fabeacbae383c9359e..db90adbdca10dc9cea12b21583b2a3f14837676c 100644 (file)
@@ -38,7 +38,7 @@
 int
 main(int argc, char *argv[])
 {
-       int fd;
+       int error, fd;
        struct stat statbuf;
 
        if (argc != 2) {
@@ -51,18 +51,21 @@ main(int argc, char *argv[])
        errno = 0;
 
        if ((fd = open(argv[1], O_RDONLY)) < 0) {
+               error = errno;
                perror("open");
-               return (errno);
+               return (error);
        }
        if (fstat(fd, &statbuf) < 0) {
+               error = errno;
                perror("fstat");
-               return (errno);
+               return (error);
        }
 
        if (mmap(0, statbuf.st_size,
            PROT_EXEC, MAP_SHARED, fd, 0) == MAP_FAILED) {
+               error = errno;
                perror("mmap");
-               return (errno);
+               return (error);
        }
 
        return (0);
index e458f4197511634afc45b9f354b2ee3045b29660..968ab3cd1a5b0aa0111f32894df23a6ed5934a21 100644 (file)
@@ -44,6 +44,7 @@ export SYSTEM_FILES='arp
     getconf
     getent
     getfacl
+    getfattr
     grep
     groupadd
     groupdel
@@ -58,6 +59,7 @@ export SYSTEM_FILES='arp
     ksh
     ln
     logname
+    losetup
     ls
     lsblk
     lsmod
@@ -72,6 +74,7 @@ export SYSTEM_FILES='arp
     mpstat
     mv
     net
+    nproc
     openssl
     parted
     pax
index 4556b2cf684436470e234bd08a12d201ae662292..84edce7377bc9ccbed76a601332ec90a0550fef7 100644 (file)
@@ -172,7 +172,11 @@ if is_linux; then
        ZVOL_RDEVDIR="/dev/zvol"
        DEV_RDSKDIR="/dev"
        DEV_MPATHDIR="/dev/mapper"
+
        ZEDLET_DIR="/var/tmp/zed"
+       VDEVID_CONF="$ZEDLET_DIR/vdev_id.conf"
+       VDEVID_CONF_ETC="/etc/zfs/vdev_id.conf"
+
 
        NEWFS_DEFAULT_FS="ext2"
 else
@@ -191,4 +195,4 @@ else
 fi
 export unpack_opts pack_opts verbose unpack_preserve pack_preserve \
        ZVOL_DEVDIR ZVOL_RDEVDIR NEWFS_DEFAULT_FS DEV_RDSKDIR DEV_MPATHDIR \
-       ZEDLET_DIR
+       ZEDLET_DIR VDEVID_CONF VDEVID_CONF_ETC
index 5a9da2735074494e92965fce243d898e1db8244b..56f765d204c245bad0bff735ddd6814b1ffceaf0 100644 (file)
@@ -751,6 +751,8 @@ function zero_partitions #<whole_disk_name>
                        set_partition $i "" 0mb $diskname
                done
        fi
+
+       return 0
 }
 
 #
@@ -3252,32 +3254,84 @@ function wait_replacing #pool
        done
 }
 
+#
+# Setup custom environment for the ZED.
+#
+function zed_setup
+{
+       if ! is_linux; then
+               return
+       fi
+
+       if [[ ! -d $ZEDLET_DIR ]]; then
+               log_must mkdir $ZEDLET_DIR
+       fi
+
+       if [[ ! -e $VDEVID_CONF ]]; then
+               log_must touch $VDEVID_CONF
+       fi
+
+       if [[ -e $VDEVID_CONF_ETC ]]; then
+               log_fail "Must not have $VDEVID_CONF_ETC file present on system"
+       fi
+
+       # Create a symlink for /etc/zfs/vdev_id.conf file.
+       log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
+
+       # Setup minimal ZED configuration.  Individual test cases should
+       # add additional ZEDLETs as needed for their specific test.
+       log_must cp ${ZEDLETDIR}/zed.rc $ZEDLET_DIR
+       log_must cp ${ZEDLETDIR}/zed-functions.sh $ZEDLET_DIR
+       log_must cp ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
+
+       log_must zpool events -c
+}
+
+#
+# Cleanup custom ZED environment.
+#
+function zed_cleanup
+{
+       if ! is_linux; then
+               return
+       fi
+
+       log_must rm -f ${ZEDLET_DIR}/zed.rc
+       log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
+       log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
+       log_must rm -f ${ZEDLET_DIR}/zed.pid
+       log_must rm -f ${ZEDLET_DIR}/zedlog
+       log_must rm -f ${ZEDLET_DIR}/state
+       log_must rm -f $VDEVID_CONF_ETC
+       log_must rm -f $VDEVID_CONF
+       rmdir $ZEDLET_DIR
+}
+
 #
 # Check if ZED is currently running, if not start ZED.
 #
 function zed_start
 {
-       if is_linux; then
-               # ZEDLET_DIR=/var/tmp/zed
-               if [[ ! -d $ZEDLET_DIR ]]; then
-                       log_must mkdir $ZEDLET_DIR
-               fi
-
-               # Verify the ZED is not already running.
-               pgrep -x zed > /dev/null
-               if (($? == 0)); then
-                       log_fail "ZED already running"
-               fi
+       if ! is_linux; then
+               return
+       fi
 
-               # ZEDLETDIR=</etc/zfs/zed.d | ${SRCDIR}/cmd/zed/zed.d>
-               log_must cp ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
+       # ZEDLET_DIR=/var/tmp/zed
+       if [[ ! -d $ZEDLET_DIR ]]; then
+               log_must mkdir $ZEDLET_DIR
+       fi
 
-               log_note "Starting ZED"
-               # run ZED in the background and redirect foreground logging
-               # output to zedlog
-               log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
-                   "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
+       # Verify the ZED is not already running.
+       pgrep -x zed > /dev/null
+       if (($? == 0)); then
+               log_fail "ZED already running"
        fi
+
+       log_note "Starting ZED"
+       # run ZED in the background and redirect foreground logging
+       # output to zedlog
+       log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
+           "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
 }
 
 #
@@ -3285,16 +3339,13 @@ function zed_start
 #
 function zed_stop
 {
-       if is_linux; then
-               if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
-                       zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
-                       log_must kill $zedpid
-               fi
-               log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
-               log_must rm -f ${ZEDLET_DIR}/zed.pid
-               log_must rm -f ${ZEDLET_DIR}/zedlog
-               log_must rm -f ${ZEDLET_DIR}/state
-               log_must rmdir $ZEDLET_DIR
+       if ! is_linux; then
+               return
+       fi
+
+       if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
+               zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
+               log_must kill $zedpid
        fi
 }
 
index 42fa0ebf66cf6a76f4f410d540f862c6f7e6dbd8..98d6c4545a7e424a9a762fe388e7755858484731 100755 (executable)
 
 verify_runnable "global"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup_testenv
 {
        cleanup
        if [[ -n $lofidev ]]; then
-               log_must lofiadm -d $lofidev
+               if is_linux; then
+                       losetup -d $lofidev
+               else
+                       lofiadm -d $lofidev
+               fi
        fi
 }
 
@@ -59,32 +68,38 @@ TESTVOL=testvol1$$
 dsk1=${DISKS%% *}
 log_must zpool create $TESTPOOL ${DISKS#$dsk1}
 
-if is_linux; then
-       SLICE="p1"
-else
-       SLICE="s0"
-fi
-
 # Add nomal ${DEV_RDSKDIR} device
-log_mustnot zpool add $TESTPOOL cache ${DEV_RDSKDIR}/${dsk1}${SLICE}
-#log_must verify_cache_device $TESTPOOL $dsk1 'ONLINE'
+log_must zpool add $TESTPOOL cache \
+    ${DEV_RDSKDIR}/${dsk1}${SLICE_PREFIX}${SLICE0}
+log_must verify_cache_device $TESTPOOL $dsk1 'ONLINE'
 
-# Add nomal file
+# Add normal file
 log_mustnot zpool add $TESTPOOL cache $VDEV2
 
-# Add /dev/rlofi device
-lofidev=${VDEV2%% *}
-log_must lofiadm -a $lofidev
-lofidev=$(lofiadm $lofidev)
-log_mustnot zpool add $TESTPOOL cache "/dev/rlofi/${lofidev#/dev/lofi/}"
-if [[ -n $lofidev ]]; then
+# Add /dev/rlofi device (allowed under Linux)
+if is_linux; then
+       lofidev=$(losetup -f)
+       lofidev=${lofidev##*/}
+       log_must losetup $lofidev ${VDEV2%% *}
+       log_must zpool add $TESTPOOL cache $lofidev
+       log_must zpool remove $TESTPOOL $lofidev
+       log_must losetup -d $lofidev
+       lofidev=""
+else
+       lofidev=${VDEV2%% *}
+       log_must lofiadm -a $lofidev
+       lofidev=$(lofiadm $lofidev)
+       log_mustnot zpool add $TESTPOOL cache "/dev/rlofi/${lofidev#/dev/lofi/}"
        log_must lofiadm -d $lofidev
        lofidev=""
 fi
 
-# Add ${ZVOL_RDEVDIR} device
-log_must zpool create $TESTPOOL2 $VDEV2
-log_must zfs create -V $SIZE $TESTPOOL2/$TESTVOL
-log_mustnot zpool add $TESTPOOL cache ${ZVOL_RDEVDIR}/$TESTPOOL2/$TESTVOL
+# Add /dev/zvol/rdsk device (allowed under Linux)
+if ! is_linux; then
+       log_must zpool create $TESTPOOL2 $VDEV2
+       log_must zfs create -V $SIZE $TESTPOOL2/$TESTVOL
+       log_mustnot zpool add $TESTPOOL cache \
+           ${ZVOL_RDEVDIR}/$TESTPOOL2/$TESTVOL
+fi
 
 log_pass "Cache device can only be block devices."
index 4252c70b421797b8bd5bac4ba14c0b7d628629a3..89d6a9b860a31a1091a70f6fb026f18108e12eaa 100644 (file)
@@ -1,5 +1,7 @@
 pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cachefile
 dist_pkgdata_SCRIPTS = \
+       cleanup.ksh \
+       setup.ksh \
        cachefile.cfg \
        cachefile.kshlib \
        cachefile_001_pos.ksh \
index 13791981c653339c6012a4e1f46764827caec67f..d93ec3ed6f3b122b6bfa7fabe04842f2fd55857b 100644 (file)
@@ -29,5 +29,9 @@
 #
 
 export CPATH="/etc/zfs/zpool.cache"
-export CPATH1=/var/tmp/cachefile.$$
-export CPATH2=$TEST_BASE_DIR/cachefile.$$
+export CPATH1=$TEST_BASE_DIR/cachefile.1.$$
+export CPATH2=$TEST_BASE_DIR/cachefile.2.$$
+
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+set_device_dir
diff --git a/tests/zfs-tests/tests/functional/cachefile/cleanup.ksh b/tests/zfs-tests/tests/functional/cachefile/cleanup.ksh
new file mode 100755 (executable)
index 0000000..79cd6e9
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
diff --git a/tests/zfs-tests/tests/functional/cachefile/setup.ksh b/tests/zfs-tests/tests/functional/cachefile/setup.ksh
new file mode 100755 (executable)
index 0000000..47c7893
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cachefile/cachefile.cfg
+. $STF_SUITE/tests/functional/cachefile/cachefile.kshlib
+
+for pool in "$TESTPOOL" "$TESTPOOL2" "$TESTPOOL3" ; do
+       if poolexists $pool ; then
+               destroy_pool $pool
+       fi
+done
+
+for file in $CPATH1 $CPATH2 ; do
+       if [[ -f $file ]] ; then
+               log_must rm $file
+       fi
+done
+
+if pool_in_cache $TESTPOOL; then
+       log_unsupported "Skipping test group due to existing pool"
+fi
index 879f837a6688bc569d3b5921e68214c667e63fd2..afa8b46a6f7c2facfeba68dbc082f928a15a8b19 100755 (executable)
@@ -61,7 +61,10 @@ log_assert "'zfs clone -o property=value -V size volume' can successfully" \
 log_must zfs snapshot $SNAPFS1
 typeset -i i=0
 while (( $i < ${#RW_VOL_CLONE_PROP[*]} )); do
-       log_must zfs clone -o ${RW_VOL_CLONE_PROP[$i]} $SNAPFS1 $TESTPOOL/$TESTCLONE
+       log_must zfs clone -o ${RW_VOL_CLONE_PROP[$i]} $SNAPFS1 \
+           $TESTPOOL/$TESTCLONE
+       block_device_wait
+
        datasetexists $TESTPOOL/$TESTCLONE || \
                log_fail "zfs clone $TESTPOOL/$TESTCLONE fail."
        propertycheck $TESTPOOL/$TESTCLONE ${RW_VOL_CLONE_PROP[i]} || \
index ae81ae964e7bd0812f64f83b7a7eccbefeec7f50..608d07413efd27b350292662e98feeafc7bab444 100755 (executable)
 
 verify_runnable "global"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        if poolexists $ZPOOL_VERSION_1_NAME; then
@@ -57,11 +62,12 @@ function cleanup
 log_assert "Verify that copies cannot be set with pool version 1"
 log_onexit cleanup
 
-cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/blockfiles/$ZPOOL_VERSION_1_FILES $TESTDIR
+cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TESTDIR
 bunzip2 $TESTDIR/$ZPOOL_VERSION_1_FILES
 log_must zpool import -d $TESTDIR $ZPOOL_VERSION_1_NAME
 log_must zfs create $ZPOOL_VERSION_1_NAME/$TESTFS
 log_must zfs create -V 1m $ZPOOL_VERSION_1_NAME/$TESTVOL
+block_device_wait
 
 for val in 3 2 1; do
        for ds in $ZPOOL_VERSION_1_NAME/$TESTFS $ZPOOL_VERSION_1_NAME/$TESTVOL; do
index f57628c163f5bd61aac2e1d25b50ffcf0716f8de..609d346cbbcc187d9b325fa39f7d264bb726fb99 100755 (executable)
 
 verify_runnable "both"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        [[ -e $propfile ]] && rm -f $propfile
index d006748e55aba1e15affb2d93df971c49cd9aac6..a916858490690275bc1001bc2f7743e186652c40 100755 (executable)
@@ -53,6 +53,8 @@ set -A  bad_combine "ALL" "\-R all" "-P all" "-h all" "-rph all" "-RpH all" "-Pr
                "all -rph $TESTPOOL" "all,available,reservation $TESTPOOL" \
                "all $TESTPOOL?" "all $TESTPOOL*" "all nonexistpool"
 
+export POSIXLY_CORRECT=1
+
 typeset -i i=0
 while (( i < ${#bad_combine[*]} ))
 do
@@ -61,4 +63,6 @@ do
        (( i = i + 1 ))
 done
 
+unset POSIXLY_CORRECT
+
 log_pass "'zfs get all' fails with invalid combinations scenarios as expected."
index fe275672e32b3adcf1d298544748c7dd630822f5..e2ef0bf00db0f5332cc9368162061f2758532814 100755 (executable)
@@ -76,11 +76,19 @@ function get_reverse_option
        typeset prop=$2
 
        # Define property value: "reverse if value=on" "reverse if value=off"
-       set -A values "noatime"   "atime" \
-                     "nodevices" "devices" \
-                     "noexec"    "exec" \
-                     "rw"        "ro" \
-                     "nosetuid"  "setuid"
+       if is_linux; then
+               set -A values "noatime"   "atime" \
+                             "nodev"     "dev" \
+                             "noexec"    "exec" \
+                             "rw"        "ro" \
+                             "nosuid"    "suid"
+       else
+               set -A values "noatime"   "atime" \
+                             "nodevices" "devices" \
+                             "noexec"    "exec" \
+                             "rw"        "ro" \
+                             "nosetuid"  "setuid"
+       fi
 
        typeset -i i=0
        while (( i < ${#properties[@]} )); do
index 6d6365ddb001421a0e55e6d7be1da749a7b77e52..bf94274ddbf85283923066c2ae4087c411471b43 100755 (executable)
@@ -72,8 +72,7 @@ for i in 1 2 3; do
        log_must zfs snapshot $TESTPOOL/$TESTFS1@snap$i
        log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/testfile.$i bs=1M \
            count=$blocks
-       log_must sync
-       log_must sleep 10
+       sync_pool
        written=$(get_prop written $TESTPOOL/$TESTFS1)
        ((expected_written=blocks * mb_block))
        within_percent $written $expected_written 99.5 || \
@@ -117,8 +116,7 @@ log_note "delete data"
 before_written=$(get_prop written $TESTPOOL/$TESTFS1)
 log_must rm /$TESTPOOL/$TESTFS1/testfile.3
 snap3_size=0
-log_must sync
-log_must sleep 10
+sync_pool
 written=$(get_prop written $TESTPOOL/$TESTFS1)
 writtenat3=$(get_prop written@snap3 $TESTPOOL/$TESTFS1)
 [[ $written -eq $writtenat3 ]] || \
@@ -140,8 +138,7 @@ log_note "write data"
 blocks=20
 log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/testfile.3 bs=1M \
     count=$blocks
-log_must sync
-log_must sleep 10
+sync_pool
 written=$(get_prop written $TESTPOOL/$TESTFS1)
 writtenat1=$(get_prop written@snap1 $TESTPOOL/$TESTFS1)
 writtenat2=$(get_prop written@snap2 $TESTPOOL/$TESTFS1)
@@ -167,7 +164,7 @@ log_must zfs clone $TESTPOOL/$TESTFS1@snap1 $TESTPOOL/$TESTFS1/snap1.clone
 log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/snap1.clone/testfile bs=1M \
     count=40
 after_clone=$(get_prop written $TESTPOOL/$TESTFS1)
-[[ $before_clone -eq $after_clone ]] || \
+within_percent $before_clone $after_clone 99.5 || \
     log_fail "unexpected written for clone $before_clone $after_clone"
 
 log_note "deleted snapshot"
@@ -177,8 +174,7 @@ typeset -l snap_before_written2=$(get_prop_mb written $TESTPOOL/$TESTFS1@snap2)
 typeset -l snap_before_written3=$(get_prop_mb written $TESTPOOL/$TESTFS1@snap3)
 log_must zfs destroy $TESTPOOL/$TESTFS1@snap2
 log_mustnot snapexists $TESTPOOL/$TESTFS1@snap2
-log_must sync
-log_must sleep 10
+sync_pool
 written1=$(get_prop_mb written@snap1 $TESTPOOL/$TESTFS1)
 written3=$(get_prop_mb written@snap3 $TESTPOOL/$TESTFS1)
 [[ $before_written1 -eq $written1 && $before_written3 -eq $written3 ]] || \
@@ -204,8 +200,7 @@ for ds in $datasets; do
        [[ $writtenat -ne 0 ]] && \
            log_fail "Unexpected written@ value"
        log_must dd if=/dev/urandom of=/$ds/testfile bs=1M count=$blocks
-       log_must sync
-       log_must sleep 10
+       sync_pool
        writtenat=$(get_prop written@now $ds)
        ((expected_writtenat = blocks * mb_block))
        within_percent $writtenat $expected_writtenat 0.1 || \
@@ -219,8 +214,7 @@ for ds in $datasets; do
        log_must zfs snapshot $ds@current
        log_must dd if=/dev/urandom of=/$ds/testfile bs=1M \
            count=$blocks
-       log_must sync
-       log_must sleep 10
+       sync_pool
 done
 recursive_output=$(zfs get -r written@current $TESTPOOL | \
     grep -v $TESTFS1@ | grep -v $TESTFS2@ | grep -v $TESTFS3@ | \
index 90e7b8340a039d98d0780e2c9d706b41904bcecd..5511f6ad6db6531a2a79c99c032f7af6d9c29a95 100755 (executable)
@@ -72,10 +72,15 @@ function test_n_check #opt num_snap_clone num_rollback
                log_fail "Unsupported testing condition."
 
        # Clean up the test environment
+       if pgrep -x dd 2>/dev/null; then
+               pkill -x dd
+       fi
+
        datasetexists $FS && log_must zfs destroy -Rf $FS
        if datasetexists $VOL; then
-               df -lhF ufs "$ZVOL_DEVDIR/$VOL" > /dev/null 2>&1
-               (( $? == 0 )) && log_must umount -f $TESTDIR1
+               if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
+                       log_must umount -f $TESTDIR1
+               fi
 
                log_must zfs destroy -Rf $VOL
        fi
@@ -117,7 +122,9 @@ function test_n_check #opt num_snap_clone num_rollback
                fi
 
                if [[ $dtst == $VOL ]]; then
-                       log_must umount -f $TESTDIR1
+                       if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
+                               log_must umount -f $TESTDIR1
+                       fi
                        log_must zfs rollback $opt $dtst@$snap_point
                        log_must mount \
                                $ZVOL_DEVDIR/$TESTPOOL/$TESTVOL $TESTDIR1
index 8f79113dcf8e6ffecaafbda792c5a3080ed6d3e4..6097afa81e9a3a29295a0852eccaa2b7282a21d8 100644 (file)
@@ -83,7 +83,7 @@ function setup_snap_env
 
                # Make sure the ufs|ext2 filesystem hasn't been mounted,
                # then mount the new ufs|ext2 filesystem.
-               if ! ismounted "$ZVOL_DEVDIR/$VOL" $NEWFS_DEFAULT_FS; then
+               if ! ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
                        log_must mount \
                                $ZVOL_DEVDIR/$TESTPOOL/$TESTVOL $TESTDIR1
                fi
@@ -133,6 +133,7 @@ function setup_snap_env
                        if [[ $createclone == "true" ]]; then
                                if datasetnonexists $clone; then
                                        log_must zfs clone $snap $clone
+                                       block_device_wait
                                fi
                        fi
                        (( ind += 1 ))
@@ -157,7 +158,7 @@ function cleanup_env
        typeset dtst
        typeset snap
 
-       pkill ${DD##*/}
+       pkill -x dd
 
        if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
                log_must umount -f $TESTDIR1
index f24ce7f922ca06a93827dbe5a5bb2ad15b80aa52..9f369e372dee283e0be124d026a61838ad36f00a 100755 (executable)
@@ -46,7 +46,7 @@ verify_runnable "both"
 
 function cleanup
 {
-       poolexists $TESTPOOL && log_must zpool export $TESTPOOL
+       poolexists $TESTPOOL && log_must_busy zpool export $TESTPOOL
        log_must zpool import $TESTPOOL
 
        datasetexists $TESTPOOL@snap && \
index e21a2954d0c61f6f004d59955591707752af9244..eaea4696dfb45208a54b666c18707d40a3a74125 100755 (executable)
@@ -101,7 +101,11 @@ typeset -i i=0
 
 while (( i <  ${#options[*]} )); do
        if [[ ${options[i]} == "-f" ]]; then
-               do_unmount_multiple "${options[i]}"
+               if is_linux; then
+                       do_unmount_multiple "${options[i]}" 1
+               else
+                       do_unmount_multiple "${options[i]}"
+               fi
        else
                do_unmount_multiple "${options[i]}" 1
        fi
index b1d98e68bc4f7148e952e363389edc77317140db..0ed14a99fc27062f12f67630c47067e980e2b8fa 100755 (executable)
@@ -96,8 +96,13 @@ for fs in $TESTPOOL/$TESTFS $TESTPOOL ; do
        log_must cd .zfs/snapshot/$TESTSNAP
 
        log_mustnot zfs unmount -a
-       log_must zfs unmount -fa
-       log_mustnot ls
+       if is_linux; then
+               log_mustnot zfs unmount -fa
+               log_must ls
+       else
+               log_must zfs unmount -fa
+               log_mustnot ls
+       fi
        log_must cd /
 
        log_must zfs mount -a
@@ -105,8 +110,13 @@ for fs in $TESTPOOL/$TESTFS $TESTPOOL ; do
        log_must cd .zfs/snapshot/$TESTSNAP
 
        if is_global_zone || [[ $fs != $TESTPOOL ]] ; then
-               log_must zfs destroy -rf $fs
-               log_mustnot ls
+               if is_linux; then
+                       log_mustnot zfs destroy -rf $fs
+                       log_must ls
+               else
+                       log_must zfs destroy -rf $fs
+                       log_mustnot ls
+               fi
                log_must cd /
        fi
 
@@ -114,8 +124,13 @@ for fs in $TESTPOOL/$TESTFS $TESTPOOL ; do
 done
 
 if is_global_zone ; then
-       log_must zpool destroy -f $TESTPOOL
-       log_mustnot ls
+       if is_linux; then
+               log_mustnot zpool destroy -f $TESTPOOL
+               log_must ls
+       else
+               log_must zpool destroy -f $TESTPOOL
+               log_mustnot ls
+       fi
        log_must cd /
 fi
 
index b1be2093eda8894204d021deec6ee05ff269206c..b5a01b53aeab934ba68ca198b074de17f3871fb7 100755 (executable)
@@ -181,6 +181,10 @@ for opt in "-a" "-fa"; do
        fi
 
        export __ZFS_POOL_RESTRICT="$TESTPOOL"
+       if [[ $opt == "-fa" ]] && is_linux; then
+               log_mustnot zfs unmount $opt
+               cd /tmp
+       fi
        log_must zfs unmount $opt
        unset __ZFS_POOL_RESTRICT
 
index 1620c0bd7161d2bcefda9e76393952245cc57a38..6a9c72311c74a95cc123e35506137ab5cbb6f4c8 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "zfs set sharenfs=off won't unshare if already off"
+fi
+
 function cleanup
 {
        typeset -i i=0
index a6ff2010234d0904949cd2aa330ed085d22dacf2..b4318020cc7f1fcb65aa442c52ee34755e1a8871 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "some distros come with Samba "user shares" disabled"
+fi
+
 function cleanup
 {
        log_must zfs unshare -a
index 7b6a029dc9d8a8255455cbe04a4b499b99ff5b72..43756ba4046fd2c856847cf762cea64e9313b8ce 100755 (executable)
@@ -45,9 +45,9 @@
 
 verify_runnable "global"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# https://github.com/zfsonlinux/zfs/issues/6145
 if is_linux; then
-       log_unsupported "Creating a pool containing a zvol may deadlock"
+       log_unsupported "Test case occasionally fails"
 fi
 
 function cleanup
index 428765e2ebdddb102db82628e1b511a2232696e9..687eef5972e8c4d65a38e57e6130833a01971b42 100755 (executable)
 
 verify_runnable "global"
 
+# https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
@@ -72,15 +77,10 @@ log_onexit cleanup
 partition_disk $SLICE_SIZE $DISK 2
 
 create_pool "$TESTPOOL" "${DISK}${SLICE_PREFIX}${SLICE0}"
-
-if is_linux; then
-       # Layering a pool on a zvol can deadlock and isn't supported.
-       create_pool "$TESTPOOL2" "${DISK}${SLICE_PREFIX}${SLICE1}"
-else
-       create_pool "$TESTPOOL1" "${DISK}${SLICE_PREFIX}${SLICE1}"
-       log_must zfs create -s -V $VOLSIZE $TESTPOOL1/$TESTVOL
-       create_pool "$TESTPOOL2" "${ZVOL_DEVDIR}/$TESTPOOL1/$TESTVOL"
-fi
+create_pool "$TESTPOOL1" "${DISK}${SLICE_PREFIX}${SLICE1}"
+log_must zfs create -s -V $VOLSIZE $TESTPOOL1/$TESTVOL
+block_device_wait
+create_pool "$TESTPOOL2" "${ZVOL_DEVDIR}/$TESTPOOL1/$TESTVOL"
 
 typeset -i i=0
 while (( i < ${#datasets[*]} )); do
index d00826bb276f6f9eba23e8be9a35b8b676a0f937..059c3839050b5189dc6f3c66f8811c4886c4dff6 100755 (executable)
@@ -29,4 +29,7 @@
 
 verify_runnable "global"
 
+zed_stop
+zed_cleanup
+
 default_cleanup
index 2069e97017eb9284df939c78ad01a78dbef49158..7d6a43ef5280dfdd69322b03d01eeb481f5525c0 100755 (executable)
@@ -29,6 +29,9 @@
 
 verify_runnable "global"
 
+zed_setup
+zed_start
+
 DISK=${DISKS%% *}
 
 default_setup $DISK
index 8cdef954e538f46114bfbf17f8572883141bb274..7d0f32b9e269f877200720f29a795bcfb64050fc 100755 (executable)
@@ -48,9 +48,9 @@
 
 verify_runnable "global"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# See issue: https://github.com/zfsonlinux/zfs/issues/5771
 if is_linux; then
-       log_unsupported "Creating a pool containing a zvol may deadlock"
+       log_unsupported "Requires additional ZED support"
 fi
 
 function cleanup
@@ -73,6 +73,7 @@ log_assert "zpool can be autoexpanded after set autoexpand=on on LUN expansion"
 for i in 1 2 3; do
        log_must zfs create -V $org_size $VFS/vol$i
 done
+block_device_wait
 
 for type in " " mirror raidz raidz2; do
 
index 002b07cee0040256935c67efe5889d77e69fe635..0134db3496a09f0a821f533ce5a2734fb1ef9e5e 100755 (executable)
@@ -48,9 +48,9 @@
 
 verify_runnable "global"
 
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# See issue: https://github.com/zfsonlinux/zfs/issues/5771
 if is_linux; then
-       log_unsupported "Creating a pool containing a zvol may deadlock"
+       log_unsupported "Requires additional ZED support"
 fi
 
 function cleanup
@@ -73,6 +73,7 @@ log_assert "zpool can not expand if set autoexpand=off after LUN expansion"
 for i  in 1 2 3; do
        log_must zfs create -V $org_size $VFS/vol$i
 done
+block_device_wait
 
 for type in " " mirror raidz raidz2; do
        log_must zpool create $TESTPOOL1 $type ${ZVOL_DEVDIR}/$VFS/vol1 \
index f04b66413632704017e79453a0c6480e5cb898d0..97325e9b1f9db75ea753f25f55d725430529b3f6 100755 (executable)
 
 verify_runnable "global"
 
+# https://github.com/zfsonlinux/zfs/issues/6141
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        for config in $CONFIGS; do
index 1cc1783c8abb9c9bb067c429a9e8fe11b74230a2..3f120c2438f7306e3d84017eaf1766dac5137a8e 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Requires additional dependencies"
+fi
+
 log_assert "zfs share returns an error when run as a user"
 
 if is_shared $TESTDIR/unshared
index 48d4d5294e73684b6364a20cd0b410fdd494d7e4..72ed1f5d3e9fdd5c1c4a41b440549e5c65304646 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Requires additional dependencies"
+fi
+
 log_assert "zfs unshare returns an error when run as a user"
 
 #  verify that the filesystem was shared initially
index 8e6bd46a27614c44719c31ad7d0f13b59150ceff..8dd601850884146fc5b250aaed41a03ea091190f 100644 (file)
@@ -431,7 +431,7 @@ function verify_fs_receive
                log_must zfs snapshot $dtstsnap
 
                log_must eval "zfs send $dtstsnap > $bak_root"
-               log_must zfs destroy -rf $dtst
+               log_must_busy zfs destroy -rf $dtst
 
                user_run $user eval "zfs receive $dtst < $bak_root"
                if datasetexists $dtstsnap ; then
@@ -461,10 +461,10 @@ function verify_fs_receive
 
                # check the data integrity
                log_must eval "zfs send $dtstsnap > $bak_user"
-               log_must zfs destroy -rf $dtst
+               log_must_busy zfs destroy -rf $dtst
                log_must eval "zfs receive $dtst < $bak_root"
                log_must eval "zfs send $dtstsnap > $bak_root"
-               log_must zfs destroy -rf $dtst
+               log_must_busy zfs destroy -rf $dtst
                if [[ $(checksum $bak_user) != $(checksum $bak_root) ]]; then
                        return 1
                fi
@@ -612,26 +612,26 @@ function verify_fs_create
                        return 1
                fi
 
-               log_must zfsallow $user mount $fs
-               user_run $user zfscreate -V 150m $newvol
+               log_must zfs allow $user mount $fs
+               user_run $user zfs create -V 150m $newvol
                block_device_wait
-               log_must zfsunallow $user mount $fs
+               log_must zfs unallow $user mount $fs
                if datasetexists $newvol ; then
                        return 1
                fi
 
-               log_must zfsallow $user reservation $fs
-               user_run $user zfscreate -V 150m $newvol
+               log_must zfs allow $user reservation $fs
+               user_run $user zfs create -V 150m $newvol
                block_device_wait
-               log_must zfsunallow $user reservation $fs
+               log_must zfs unallow $user reservation $fs
                if datasetexists $newvol ; then
                        return 1
                fi
 
-               log_must zfsallow $user refreservation $fs
-               user_run $user zfscreate -V 150m $newvol
+               log_must zfs allow $user refreservation $fs
+               user_run $user zfs create -V 150m $newvol
                block_device_wait
-               log_must zfsunallow $user refreservation $fs
+               log_must zfs unallow $user refreservation $fs
                if datasetexists $newvol ; then
                        return 1
                fi
@@ -648,7 +648,7 @@ function verify_fs_create
                fi
 
                block_device_wait
-               log_must zfsdestroy $newvol
+               log_must zfs destroy $newvol
                block_device_wait
        fi
 
@@ -1123,7 +1123,7 @@ function verify_fs_dnodesize
        typeset fs=$3
        value="2k"
 
-       user_run $user zfsset dnodesize=$value $fs
+       user_run $user zfs set dnodesize=$value $fs
        if [[ $value != $(get_prop dnodesize $fs) ]]; then
                return 1
        fi
index ac031ed6a52f300defc0731a2dcd0fb5de2cc39d..c02aa2d9fbd944f01f7ebc7542436c7a476d8489 100755 (executable)
 
 verify_runnable "global"
 
+if is_32bit; then
+       log_unsupported "Test case fails on 32-bit systems"
+fi
+
 log_assert "Setting devices=on on file system, the devices files in this file" \
        "system can be used."
 log_onexit cleanup
index ce25502b818b9c99e90f2c6086b4c07d50483ce4..1a645cc3ccf0a7bde5c9433db0f8742ccc81125d 100755 (executable)
 
 verify_runnable "global"
 
+if is_32bit; then
+       log_unsupported "Test case fails on 32-bit systems"
+fi
+
 log_assert "Setting devices=off on file system, the devices files in this file"\
        "system can not be used."
 log_onexit cleanup
index fe575eed8b2132a97ef048ec4b58dc00e9fb2136..2c7df8d058c34651a0387b9b9a39d604ddb5807f 100644 (file)
@@ -45,10 +45,15 @@ function create_dev_file
        case $filetype in
                b)
                        if is_linux; then
-                               devtype=$(df -T / | awk '{print $2}')
-                       else
-                               devtype=$(df -n / | awk '{print $3}')
+                               major=$(awk '/[hsv]d/ { print $1; exit }' \
+                                   /proc/partitions)
+                               minor=$(awk '/[hsv]d/ { print $2; exit }' \
+                                   /proc/partitions)
+                               log_must mknod $filename b $major $minor
+                               return 0
                        fi
+
+                       devtype=$(df -n / | awk '{print $3}')
                        case $devtype in
                                zfs)
                                        rootpool=$(df / | \
@@ -76,9 +81,6 @@ function create_dev_file
                                        [[ -z $devstr ]] && \
                                                log_fail "Can not get block device file."
                                        ;;
-                               ext2)
-                                       # TODO: Linux version
-                                       ;;
                                *)
                                        log_unsupported "Unsupported fstype " \
                                                "for / ($devtype)," \
@@ -106,7 +108,13 @@ function create_dev_file
                        #
                        # Create device file '/dev/null'
                        #
-                       log_must mknod $filename c $(getmajor mm) 2
+                       if is_linux; then
+                               major=$(stat -c %t /dev/null)
+                               minor=$(stat -c %T /dev/null)
+                               log_must mknod $filename c $major $minor
+                       else
+                               log_must mknod $filename c $(getmajor mm) 2
+                       fi
                        ;;
                *)
                        log_fail "'$filetype' is wrong."
index d76a265e2472a9f263e69c045c1a609505274e04..5de8a79ea30ffe3d3fcb41467f2b2074006bbbef 100755 (executable)
@@ -58,6 +58,6 @@ log_onexit cleanup
 log_must cp $STF_PATH/ls $TESTDIR/myls
 log_must zfs set exec=on $TESTPOOL/$TESTFS
 log_must $TESTDIR/myls
-log_must $MMAP_EXEC $TESTDIR/myls
+log_must mmap_exec $TESTDIR/myls
 
 log_pass "Setting exec=on on filesystem testing passed."
index eaf2d0b62d888ecd112ddcdaafc5626a4994c21e..c11bf8442bcd98b72b48ef4c3425685d617ccc1e 100755 (executable)
@@ -75,10 +75,15 @@ log_assert "Setting exec=off on a filesystem, processes can not be executed " \
        "from this file system."
 log_onexit cleanup
 
-log_must cp /usr/bin/ls $TESTDIR/myls
+log_must cp  $STF_PATH/ls $TESTDIR/myls
 log_must zfs set exec=off $TESTPOOL/$TESTFS
 
-log_must exec_n_check 126 $TESTDIR/myls
-log_must exec_n_check 13 $MMAP_EXEC $TESTDIR/myls
+if is_linux; then
+       log_must exec_n_check 126 $TESTDIR/myls
+       log_must exec_n_check 1 mmap_exec $TESTDIR/myls # EPERM
+else
+       log_must exec_n_check 126 $TESTDIR/myls
+       log_must exec_n_check 13 mmap_exec $TESTDIR/myls # EACCES
+fi
 
 log_pass "Setting exec=off on filesystem testing passed."
index 88bf5e28a75a00099083125f5e44a540770aae7c..f39f05d6fe8ed3f54356ba6d914e2793958e3e7a 100755 (executable)
@@ -30,10 +30,8 @@ verify_runnable "global"
 
 cleanup_devices $DISKS
 
-# Remove symlink and vdev_id.conf in-tree file
-rm -f $VDEVID_CONF_ETC
-rm -f $VDEVID_CONF
 zed_stop
+zed_cleanup
 
 SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}')
 SDDEVICE=$(echo $SD | nawk -F / '{print $3}')
index 6a05b8cc497766facdfab6c18ba3b2a862f3b911..e6e4fe58266dcd910a54e6083a8e75253692ea02 100644 (file)
@@ -41,9 +41,6 @@ export DISK1=$(echo $DISKS | nawk '{print $1}')
 export DISK2=$(echo $DISKS | nawk '{print $2}')
 export DISK3=$(echo $DISKS | nawk '{print $3}')
 
-export VDEVID_CONF=$ZEDLET_DIR/vdev_id.conf
-export VDEVID_CONF_ETC=/etc/zfs/vdev_id.conf
-
 if is_linux; then
        set_slice_prefix
        set_device_dir
index 2b9fb61c0ecad1da60c6ee1efd00c2b9d4950344..3d54d4f217546e8a61303961fb661e9444a603d5 100755 (executable)
 
 verify_runnable "global"
 
-if [[ ! -d $ZEDLET_DIR ]]; then
-       log_must mkdir $ZEDLET_DIR
-fi
-
-if [[ ! -e $VDEVID_CONF ]]; then
-       log_must touch $VDEVID_CONF
-fi
-
-if [[ -e $VDEVID_CONF_ETC ]]; then
-       log_fail "Must not have $VDEVID_CONF_ETC file present on system"
-fi
-
-# Create a symlink for /etc/zfs/vdev_id.conf file
-log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
-
+zed_setup
 zed_start
 
 # Create a scsi_debug device to be used with auto-online (if using loop devices)
index 4fc98ffc3c44f00074d66cd9dfbfa746981c5c99..cdad61757f4a4f68e4fb8ff42e5a8565f23e9651 100644 (file)
@@ -1,4 +1,6 @@
 pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/grow_pool
 dist_pkgdata_SCRIPTS = \
+       setup.ksh \
+       cleanup.ksh \
        grow_pool.cfg \
        grow_pool_001_pos.ksh
diff --git a/tests/zfs-tests/tests/functional/grow_pool/cleanup.ksh b/tests/zfs-tests/tests/functional/grow_pool/cleanup.ksh
new file mode 100755 (executable)
index 0000000..91cf675
--- /dev/null
@@ -0,0 +1,53 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
+
+verify_runnable "global"
+
+ismounted $TESTFS && \
+       log_must zfs umount $TESTDIR
+destroy_pool "$TESTPOOL"
+
+#
+# Here we create & destroy a zpool using the disks
+# because this resets the partitions to normal
+#
+if [[ -z $DISK ]]; then
+       create_pool ZZZ "$DISK0 $DISK1"
+       destroy_pool ZZZ
+else
+       create_pool ZZZ "$DISK"
+       destroy_pool ZZZ
+fi
+
+log_pass
index 8b0563201c7afba73e160fb86d173dbaa2eefd3a..082472301b4328e4c44f18bc1dbca77f7d54c6ce 100644 (file)
@@ -30,6 +30,9 @@
 
 . $STF_SUITE/include/libtest.shlib
 
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+
 function set_disks
 {
        set -A disk_array $(find_disks $DISKS)
diff --git a/tests/zfs-tests/tests/functional/grow_pool/setup.ksh b/tests/zfs-tests/tests/functional/grow_pool/setup.ksh
new file mode 100755 (executable)
index 0000000..4c12579
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
+
+verify_runnable "global"
+
+if ! $(is_physical_device $DISKS) ; then
+       log_unsupported "This directory cannot be run on raw files."
+fi
+
+if [[ -n $DISK ]]; then
+       log_note "No spare disks available. Using slices on $DISK"
+       for i in $SLICE0 $SLICE1 ; do
+               log_must set_partition $i "$cyl" $SIZE $DISK
+               cyl=$(get_endslice $DISK $i)
+       done
+       tmp=$DISK"s"$SLICE0
+else
+       log_must set_partition $SLICE "" $SIZE $DISK0
+       log_must set_partition $SLICE "" $SIZE $DISK1
+       tmp=$DISK0$SLICE_PREFIX$SLICE
+fi
+
+default_setup $tmp
index ebf32260211047f95aa83cda2b7b82e1951189ed..fd94555baf3e220169b0d8724b376ce88dc5f02c 100644 (file)
@@ -31,6 +31,9 @@
 
 . $STF_SUITE/include/libtest.shlib
 
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+
 function set_disks
 {
        set -A disk_array $(find_disks $DISKS)
index 6a5b9d9f96f940e5a5cc5c5261d400a6a190a8e2..d0075cafd323c0ac8ae4a558b3c561bf53ddfd48 100755 (executable)
 
 verify_runnable "global"
 
+if is_32bit; then
+       log_unsupported "Test case fails on 32-bit systems"
+fi
+
 if ! is_physical_device $DISKS; then
        log_unsupported "This test case cannot be run on raw files"
 fi
@@ -96,7 +100,9 @@ for pooltype in "mirror" "raidz"; do
 
        # $DISK will be set if we're using slices on one disk
        if [[ -n $DISK ]]; then
-               log_must zpool add $TESTPOOL $pooltype ${DISK}s3 ${DISK}s4
+               log_must zpool add $TESTPOOL $pooltype \
+                   ${DISK}${SLICE_PREFIX}${SLICE3} \
+                   ${DISK}${SLICE_PREFIX}${SLICE4}
        else
                [[ -z $DISK2 || -z $DISK3 ]] && 
                    log_unsupported "No spare disks available"
index a2da831c5cce7b0f9fa8cf6710ea355940e595a6..865d72086fa0c911e89813e19899c5e7e7cb93f1 100755 (executable)
 
 verify_runnable "global"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/5657
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        if datasetexists $fs ; then
index 996c7658c32c6ad3b09bda8252c39ba793d5e83b..f77fa2fc3621f65837ecda2cf52fc8ab2f879c0a 100755 (executable)
 
 verify_runnable "global"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/5658
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        if datasetexists $root_testfs; then
index a0891fde19caaddee95ae3cbaca3181fad464a91..bbc32f1f108d8914ad5c8cc999ba7c2226e5cc0d 100644 (file)
@@ -31,7 +31,8 @@
 . $STF_SUITE/include/libtest.shlib
 
 if is_linux; then
-       DISK_ARRAY_NUM=2
+       export DISKSARRAY=$DISKS
+       export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
        set_device_dir
        set_slice_prefix
        export SLICE0=1
index 4ba57c3627a7d24ea8c5f65093d78648b17ef898..63c68e66e4e448961a7742bfb05c09b823dcb930 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Test case isn't applicable to Linux"
+fi
+
 function cleanup
 {
        #
index 932955b4fb10f33d997040473d349150d3ae414e..bdd79d9c4c4fe10c778987a0142d9f24480adfee 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Test case isn't applicable to Linux"
+fi
+
 function cleanup
 {
        poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
@@ -94,12 +98,14 @@ typeset restored_files="${UFSMP}/restored_files"
 typeset -i dirnum=0
 typeset -i filenum=0
 typeset cwd=""
+typeset cyl=""
 
 for num in 0 1 2; do
        eval typeset slice=\${FS_SIDE$num}
        disk=${slice%s*}
        slice=${slice##*${SLICE_PREFIX}}
-       log_must set_partition $slice "" $FS_SIZE $disk
+       log_must set_partition $slice "$cyl" $FS_SIZE $disk
+       cyl=$(get_endslice $disk $slice)
 done
 
 log_note "Make a ufs filesystem on source $rawdisk1"
index 04fa30b27216f69b4eaff8a55522939c48ba987f..9dd1e25bf408fcd68ed0b414f8b05e00db364a94 100755 (executable)
 
 verify_runnable "global"
 
+if ! is_physical_device $FS_DISK0; then
+       log_unsupported "This directory cannot be run on raw files."
+fi
+
 function cleanup
 {
        poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
@@ -79,11 +83,18 @@ typeset -i i=0
 unset NOINUSE_CHECK
 while (( i < ${#vdevs[*]} )); do
 
+       for num in 0 1 2 3 ; do
+               eval typeset disk=\${FS_DISK$num}
+               zero_partitions $disk
+       done
+
+       typeset cyl=""
        for num in 0 1 2 3 ; do
                eval typeset slice=\${FS_SIDE$num}
                disk=${slice%${SLICE_PREFIX}*}
                slice=${slice##*${SLICE_PREFIX}}
-               log_must set_partition $slice "" $FS_SIZE $disk
+               log_must set_partition $slice "$cyl" $FS_SIZE $disk
+               cyl=$(get_endslice $disk $slice)
        done
 
        if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
index 6d2e88d91d50de59a7dd51fc3fabfa6f48c28224..0ce45a661c59cb4ea2f971195998b07723ba9710 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Test case isn't applicable to Linux"
+fi
+
 function cleanup
 {
        if [[ -n $PREVDUMPDEV ]]; then
@@ -82,6 +86,11 @@ PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
 unset NOINUSE_CHECK
 while (( i < ${#vdevs[*]} )); do
 
+       for num in 0 1 2 3 ; do
+               eval typeset disk=\${FS_DISK$num}
+               zero_partitions $disk
+       done
+
        for num in 0 1 2 3 ; do
                eval typeset slice=\${FS_SIDE$num}
                disk=${slice%${SLICE_PREFIX}*}
index d4c6d0a0b2f0d3b01da933155fa4ae6407654f64..22ac064ef3e51e99b1db3b455b22b40ae87eed6c 100755 (executable)
 
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Test case isn't applicable to Linux"
+fi
+
 function cleanup
 {
        if [[ -n $PREVDUMPDEV ]]; then
@@ -86,6 +90,11 @@ PREVDUMPDEV=`dumpadm | grep "Dump device" | awk '{print $3}'`
 
 while (( i < ${#vdevs[*]} )); do
 
+       for num in 0 1 2 3 ; do
+               eval typeset disk=\${FS_DISK$num}
+               zero_partitions $disk
+       done
+
        for num in 0 1 2 3 ; do
                eval typeset slice=\${FS_SIDE$num}
                disk=${slice%${SLICE_PREFIX}*}
index 5144054a976a07e4d8ccc22fb9647ead4f278d51..95d39d958c3fcd8f8af4deb3bf560854c0b8f055 100755 (executable)
 
 verify_runnable "global"
 
+if ! is_physical_device $FS_DISK0; then
+       log_unsupported "This directory cannot be run on raw files."
+fi
+
 function cleanup
 {
        poolexists $TESTPOOL1 || zpool import $TESTPOOL1 >/dev/null 2>&1
@@ -78,12 +82,19 @@ log_onexit cleanup
 set -A vdevs "" "mirror" "raidz" "raidz1" "raidz2"
 
 typeset -i i=0
+typeset cyl=""
+
+for num in 0 1 2 3 ; do
+       eval typeset disk=\${FS_DISK$num}
+       zero_partitions $disk
+done
 
 for num in 0 1 2 3 ; do
        eval typeset slice=\${FS_SIDE$num}
        disk=${slice%${SLICE_PREFIX}*}
        slice=${slice##*${SLICE_PREFIX}}
-       log_must set_partition $slice "" $FS_SIZE $disk
+       log_must set_partition $slice "$cyl" $FS_SIZE $disk
+       cyl=$(get_endslice $disk $slice)
 done
 
 while (( i < ${#vdevs[*]} )); do
index f76913fb55cec844da1878956bead768a956b30e..6a9b9623cc878170bdbabc5570b868ebd2610de2 100755 (executable)
 
 verify_runnable "global"
 
+if ! is_physical_device $FS_DISK0; then
+       log_unsupported "This directory cannot be run on raw files."
+fi
+
 function cleanup
 {
        poolexists $TESTPOOL1 || zpool import $TESTPOOL1 >/dev/null 2>&1
@@ -62,7 +66,7 @@ function verify_assertion #disks
        typeset targets=$1
 
        for t in $targets; do
-               log_must set_partition 0 "" 0mb $t
+               log_must zero_partitions $t
        done
 
        return 0
@@ -78,11 +82,13 @@ typeset -i i=0
 
 while (( i < ${#vdevs[*]} )); do
 
+       typeset cyl=""
        for num in 0 1 2 3 ; do
                eval typeset slice=\${FS_SIDE$num}
                disk=${slice%${SLICE_PREFIX}*}
                slice=${slice##*${SLICE_PREFIX}}
-               log_must set_partition $slice "" $FS_SIZE $disk
+               log_must set_partition $slice "$cyl" $FS_SIZE $disk
+               cyl=$(get_endslice $disk $slice)
        done
 
        if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
index 3be20356ea0ee7ab1707ac829212a6b5795de403..277add1390242b8e1c53dbb7c9760a8df736ed78 100755 (executable)
 
 . $STF_SUITE/include/libtest.shlib
 
+if is_32bit; then
+       log_unsupported "Test case fails on 32-bit systems"
+fi
+
 #
 # DESCRIPTION:
 # Write a file to the allowable ZFS fs size.
index bcb93d47d55fcea705726ee62bbd3c13b86e3933..13c157789dd7a1e5a9cbc2df43d629af0e828570 100644 (file)
@@ -30,7 +30,7 @@
 
 export TESTVOL=testvol$$
 export VOL_PATH=${ZVOL_DEVDIR}/${TESTPOOL2}/$TESTVOL
-export VOLSIZES=${VOLSIZES-"2pb 5pb 10pb 2eb 5eb 8eb 9eb"}
+export VOLSIZES=${VOLSIZES-"2pb 5pb 10pb 2eb 5eb 7eb"}
 
 # There're 3 different prompt messages while create
 # a volume that great than 1TB on 32-bit
index 4b25cf6163b45daeee28351b6286db4f6bf792e2..16bd45ef16e237a7223d2fbd44334edccc005a99 100755 (executable)
 
 verify_runnable "global"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 #
 # Parse the results of zpool & zfs creation with specified size
 #
@@ -137,6 +142,7 @@ for volsize in $VOLSIZES; do
                        log_fail "zfs create -sV $volsize $TESTPOOL2/$TESTVOL"
                fi
        fi
+       block_device_wait
 
        log_note "Create the largest pool allowed using the volume vdev"
        create_pool $TESTPOOL "$VOL_PATH"
index 71e617ecf5e1393df43b6aa8792f8644057c1a47..e121787cab6547906b6fcc0f87cef86f9041aa31 100755 (executable)
@@ -50,7 +50,9 @@ export ITERS=10
 export NUMFILES=10000
 
 # Detect and make sure this test must be executed on a multi-process system
-is_mp || log_fail "This test requires a multi-processor system."
+if ! is_mp; then
+       log_unsupported "This test requires a multi-processor system."
+fi
 
 log_must mkdir -p ${TESTDIR}/tmp
 
index f65a4c1c4a4d84007978d5810e4d573240ada4ed..1eda971041dc73a2a9005b2cef35b2478a55f336 100755 (executable)
@@ -48,7 +48,9 @@ log_assert "write()s to a file and mmap() that file at the same time does not "\
        "result in a deadlock."
 
 # Detect and make sure this test must be executed on a multi-process system
-is_mp || log_fail "This test requires a multi-processor system."
+if ! is_mp; then
+       log_unsupported "This test requires a multi-processor system."
+fi
 
 log_must chmod 777 $TESTDIR
 mmapwrite $TESTDIR/test-write-file &
index b5dc01e9bd05c5ccb2d92034ac073dd9adb70e89..b8c89c623ae6222b088129f1fc2a8bc97f4d9357 100755 (executable)
@@ -43,17 +43,34 @@ done
 zfs_list="/ /lib /sbin /tmp /usr /var /var/adm /var/run"
 
 # Append our ZFS filesystems to the list, not worrying about duplicates.
-for fs in $(mount -p | awk '{if ($4 == "zfs") print $3}'); do
+if is_linux; then
+       typeset mounts=$(mount | awk '{if ($5 == "zfs") print $3}')
+else
+       typeset mounts=$(mount -p | awk '{if ($4 == "zfs") print $3}')
+fi
+
+for fs in $mounts; do
        zfs_list="$zfs_list $fs"
 done
 
+if is_linux; then
+       mounts=$(umount --fake -av -t zfs 2>&1 | \
+           grep "successfully umounted" | awk '{print $1}')
+       # Fallback to /proc/mounts for umount(8) (util-linux-ng 2.17.2)
+       if [[ -z $mounts ]]; then
+               mounts=$(awk '/zfs/ { print $2 }' /proc/mounts)
+       fi
+else
+       mounts=$(umountall -n -F zfs 2>&1 | awk '{print $2}')
+fi
+
 fs=''
-for fs in $(umountall -n -F zfs 2>&1 | awk '{print $2}'); do
+for fs in $mounts; do
        for i in $zfs_list; do
                [[ $fs = $i ]] && continue 2
        done
        log_fail "umountall -n -F zfs tried to unmount $fs"
 done
-[[ -n $fs ]] || log_fail "umountall -n -F zfs produced no output"
+[[ -n $mounts ]] || log_fail "umountall -n -F zfs produced no output"
 
 log_pass "All ZFS file systems would have been unmounted"
index 1c0f0517db551303a2aba7e209fd44e0c0abc1d7..139b4b26e5ed0f46931b177e6593e85d7f3795ee 100644 (file)
@@ -27,7 +27,7 @@ function verify_nopwrite
        typeset low=1
        typeset high=99
 
-       sync
+       sync_pool
        for i in origin snap clone; do
                for j in used refer usedbychildren written; do
                        typeset ${i}_$j=$(get_prop $j $(eval echo \$$i))
index 9682767c0ab80e82166c28de9c7747544d32d51c..2fd913f40338b9fcf7ebb793c813266c506beccf 100755 (executable)
@@ -30,6 +30,7 @@
 #
 
 . $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/online_offline/online_offline.cfg
 
 #
 # DESCRIPTION:
@@ -70,19 +71,22 @@ file_trunc -f $((64 * 1024 * 1024)) -b 8192 -c 0 -r $TESTDIR/$TESTFILE1 &
 typeset killpid="$! "
 
 for disk in $DISKLIST; do
-        for i in 'do_offline' 'do_offline_while_already_offline'; do
+       for i in 'do_offline' 'do_offline_while_already_offline'; do
                log_must zpool offline $TESTPOOL $disk
                check_state $TESTPOOL $disk "offline"
-                if [[ $? != 0 ]]; then
-                        log_fail "$disk of $TESTPOOL is not offline."
-                fi
-        done
-
-        log_must zpool online $TESTPOOL $disk
-        check_state $TESTPOOL $disk "online"
-        if [[ $? != 0 ]]; then
-                log_fail "$disk of $TESTPOOL did not match online state"
-        fi
+               if [[ $? != 0 ]]; then
+                       log_fail "$disk of $TESTPOOL is not offline."
+               fi
+       done
+
+       log_must zpool online $TESTPOOL $disk
+       check_state $TESTPOOL $disk "online"
+       if [[ $? != 0 ]]; then
+               log_fail "$disk of $TESTPOOL did not match online state"
+       fi
+
+       # Delay for resilver to complete
+       sleep 3
 done
 
 log_must kill $killpid
index 2f32e21e90118f25aab160afcf94373a4885786e..99b9d6bf1eaf13d31c3c2078e950604be474faac 100755 (executable)
@@ -30,6 +30,7 @@
 #
 
 . $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/online_offline/online_offline.cfg
 
 #
 # DESCRIPTION:
@@ -89,6 +90,11 @@ while [[ $i -lt ${#disks[*]} ]]; do
                log_must zpool online $TESTPOOL ${disks[$i]}
                check_state $TESTPOOL ${disks[$i]} "online" || \
                    log_fail "Failed to set ${disks[$i]} online"
+               # Delay for resilver to complete
+               while ! is_pool_resilvered $TESTPOOL; do
+                       log_must sleep 1
+               done
+               log_must zpool clear $TESTPOOL
                while [[ $j -lt ${#disks[*]} ]]; do
                        if [[ $j -eq $i ]]; then
                                ((j++))
@@ -119,6 +125,11 @@ while [[ $i -lt ${#disks[*]} ]]; do
                log_must zpool online $TESTPOOL ${disks[$i]}
                check_state $TESTPOOL ${disks[$i]} "online" || \
                    log_fail "Failed to set ${disks[$i]} online"
+               # Delay for resilver to complete
+               while ! is_pool_resilvered $TESTPOOL; do
+                       log_must sleep 1
+               done
+               log_must zpool clear $TESTPOOL
        fi
        ((i++))
 done
index 7a2fe609500770cb3f1bca3246221d92258d3c3e..5e7ddc189d35da93102a475fca99222c91c468e4 100755 (executable)
 # We can only run this in the global zone
 verify_runnable "global"
 
+if is_linux; then
+       log_unsupported "Requires pfexec command"
+fi
+
 log_assert "The RBAC profile \"ZFS Storage Management\" works"
 
 ZFS_USER=$(cat /tmp/zfs-privs-test-user.txt)
index dcccb5401f39025dd140d5e1274d85567fc07127..5f68857026ee830f1062290f8b2602e43022b4b4 100755 (executable)
 
 verify_runnable "both"
 
+if is_linux; then
+       log_unsupported "Requires pfexec command"
+fi
+
 log_assert "The RBAC profile \"ZFS File System Management\" works"
 
 ZFS_USER=$(cat /tmp/zfs-privs-test-user.txt)
index 4332736a1ae53e7fe9ca3814a89b8e823b8fdb75..01b819dc62f93eaab142f14d087e670b9ce1fcb1 100755 (executable)
@@ -54,11 +54,11 @@ typeset -i cnt=$(random 2 5)
 setup_test_env $TESTPOOL "" $cnt
 
 damage_devs $TESTPOOL 1 "keep_label"
-log_must zpool clear $TESTPOOL
+log_must zpool scrub $TESTPOOL
 
-# Wait for the scrub intiated by the clear to wrap, or is_healthy will be wrong.
+# Wait for the scrub to wrap, or is_healthy will be wrong.
 while ! is_pool_scrubbed $TESTPOOL; do
-       sync
+       sleep 1
 done
 
 log_mustnot is_healthy $TESTPOOL
index 12224deec6830aacb53dafcd3bd931f7bd4ebb61..d7b6de1a2e0199a4790006c35111c1a57522d5d5 100755 (executable)
@@ -58,14 +58,14 @@ log_onexit cleanup
 cd $TESTDIR
 mkdir -p 1/2/3/4/5 a/b/c/d/e
 
-$RENAME_DIRS &
+rename_dir &
 
-sleep 500
+sleep 10
 typeset -i retval=1
-pgrep $RENAME_DIRS >/dev/null 2>&1
+pgrep -x rename_dir >/dev/null 2>&1
 retval=$?
 if (( $retval == 0 )); then
-       pkill -9 $RENAME_DIRS >/dev/null 2>&1
+       pkill -9 -x rename_dir >/dev/null 2>&1
 fi
 
 log_pass "ZFS handle race directory rename operation as expected."
index 8969ba9e8972d515e2c131028e286330d667a2f3..771d7e2f04a03160c4842b4534d1affb76ceffcb 100755 (executable)
 
 verify_runnable "both"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6086
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 log_assert "Verify resumability of a full and incremental ZFS send/receive " \
     "in the presence of a corrupted stream"
 log_onexit resume_cleanup $sendfs $streamfs
index 0ccea49921548a1f7154c2c8b46c13eb1a00a106..1436feab923741e7bbac52edb241cd781495b6b6 100755 (executable)
 
 verify_runnable "global"
 
+if ! $(is_physical_device $DISKS) ; then
+       log_unsupported "This directory cannot be run on raw files."
+fi
+
 function cleanup_testenv
 {
        cleanup
@@ -51,7 +55,11 @@ function cleanup_testenv
                log_must zpool destroy -f $TESTPOOL2
        fi
        if [[ -n $lofidev ]]; then
-               lofiadm -d $lofidev
+               if is_linux; then
+                       losetup -d $lofidev
+               else
+                       lofiadm -d $lofidev
+               fi
        fi
 }
 
@@ -72,9 +80,15 @@ ldev=$(random_get $LDEV)
 log_must verify_slog_device $TESTPOOL $ldev 'ONLINE'
 
 # Add lofi device
-lofidev=${LDEV2%% *}
-log_must lofiadm -a $lofidev
-lofidev=$(lofiadm $lofidev)
+if is_linux; then
+       lofidev=$(losetup -f)
+       lofidev=${lofidev##*/}
+       log_must losetup $lofidev ${LDEV2%% *}
+else
+       lofidev=${LDEV2%% *}
+       log_must lofiadm -a $lofidev
+       lofidev=$(lofiadm $lofidev)
+fi
 log_must zpool add $TESTPOOL log $lofidev
 log_must verify_slog_device $TESTPOOL $lofidev 'ONLINE'
 
@@ -89,4 +103,4 @@ log_must zpool add $TESTPOOL $mntpnt/vdev
 # Add ZFS volume
 vol=$TESTPOOL/vol
 log_must zpool create -V $MINVDEVSIZE $vol
-log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol
\ No newline at end of file
+log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol
index 2d5407849ade51c2c06d6ba08bcf5f31c88a7afa..bc0c2fec55d6649ba77b9f887383825f30d0e0cf 100755 (executable)
@@ -65,6 +65,7 @@ do
                log_must zpool scrub $TESTPOOL
                log_must display_status $TESTPOOL
                log_must zpool status $TESTPOOL 2>&1 >/dev/null
+               log_must zpool offline $TESTPOOL $VDIR/a
 
                zpool status -v $TESTPOOL | \
                        grep "state: DEGRADED" 2>&1 >/dev/null
@@ -78,6 +79,7 @@ do
                        log_fail "log device should display correct status"
                fi
 
+               log_must zpool online $TESTPOOL $VDIR/a
                log_must zpool destroy -f $TESTPOOL
        done
 done
index 4ba853162cb563620a4e0affef437d10258832ab..1a2078b2dd48b689dca424e1b93048cdbd54176a 100755 (executable)
 
 verify_runnable "both"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 # Setup array, 4 elements as a group, refer to:
 # i+0: name of a snapshot
 # i+1: mountpoint of the snapshot
index 9b1e400db56e075a3c7e84fd8d556b077ca99593..0a21a8ae01decd3ab782ac8a62b00e28f0fcaf9b 100755 (executable)
 
 verify_runnable "both"
 
+# https://github.com/zfsonlinux/zfs/issues/6143
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 function cleanup
 {
        typeset snap=""
index fa31645551012e301480dcc621f3fdd4e5dbb910..265903fe14699ef2ac3ea2953ec3834a316356d2 100644 (file)
@@ -47,7 +47,7 @@ export VOLSIZE=1gb
 export BLOCKSZ=8192
 export NUM_WRITES=20
 export DATA=0
-export LIMIT=524288 # tolerance measured in bytes, 512K
+export LIMIT=2097152 # tolerance measured in bytes, 2M
 export FSQUOTA=500m
 export FILESIZE=400m
 export FILESIZE1=200m
index 0f7b0621a4829c703899b382a2005c824112049c..d0ecb77fe0a0a41c1cb5db1c88b19fd29f7471dd 100755 (executable)
@@ -89,6 +89,7 @@ while [[ $i -lt $COUNT ]]; do
 done
 
 wait_freeing $TESTPOOL
+sync_pool
 
 new_size=`get_prop available $TESTPOOL`
 
index 66bf64833150bd505183f4eb418c1fd36a076fe2..b460c2b0c5dcc061425448118e67666d69483611 100755 (executable)
@@ -51,6 +51,8 @@ function cleanup
                log_must zfs destroy -Rf $SNAPFS
        datasetexists $TESTPOOL/$TESTFS@snap_a && \
                log_must zfs destroy -Rf $TESTPOOL/$TESTFS@snap_a
+       datasetexists $TESTPOOL/$TESTFS@snap_b && \
+               log_must zfs destroy -Rf $TESTPOOL/$TESTFS@snap_b
        datasetexists $TESTPOOL/$TESTCLONE@snap_a && \
                log_must zfs destroy -Rf $TESTPOOL/$TESTCLONE@snap_a
 
@@ -79,15 +81,15 @@ log_must zfs destroy $TESTPOOL/$TESTFS@snap_a
 
 log_must zfs snapshot $SNAPFS
 log_must zfs clone $SNAPFS $TESTPOOL/$TESTCLONE
-log_must mv $TESTDIR/$SNAPROOT/$TESTSNAP $TESTDIR/$SNAPROOT/snap_a
+log_must mv $TESTDIR/$SNAPROOT/$TESTSNAP $TESTDIR/$SNAPROOT/snap_b
 
-datasetexists $TESTPOOL/$TESTFS@snap_a || \
+datasetexists $TESTPOOL/$TESTFS@snap_b || \
         log_fail "rename snapshot via mv in .zfs/snapshot fails."
 log_must zfs promote $TESTPOOL/$TESTCLONE
 # promote back to $TESTPOOL/$TESTFS for scenario 3
 log_must zfs promote $TESTPOOL/$TESTFS
 log_must zfs destroy $TESTPOOL/$TESTCLONE
-log_must zfs destroy $TESTPOOL/$TESTFS@snap_a
+log_must zfs destroy $TESTPOOL/$TESTFS@snap_b
 
 # scenario 3
 
index b3e99d8611a5448fb043e79118978f6f4b6901c4..48c468b5661cce78e0749cf8f3d175626f29b656 100755 (executable)
 
 verify_runnable "both"
 
+# See issue: https://github.com/zfsonlinux/zfs/issues/6136
+if is_linux; then
+       log_unsupported "Test case occasionally fails"
+fi
+
 log_assert "Ensure multiple threads performing write appends to the same" \
        "ZFS file succeed"
 
@@ -67,7 +72,7 @@ fi
 # zfs_threadsappend tries to append to $TESTFILE using threads
 # so that the resulting file is $FILE_SIZE bytes in size
 #
-log_must $THREADSAPPEND ${TESTDIR}/${TESTFILE}
+log_must threadsappend ${TESTDIR}/${TESTFILE}
 
 #
 # Check the size of the resulting file