char *devname;
if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
- zed_log_msg(LOG_INFO, "zfs_deliver_event: no physpath");
+ zed_log_msg(LOG_INFO, "zfs_deliver_dle: no physpath");
return (-1);
}
if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
- zed_log_msg(LOG_INFO, "zfs_deliver_event: device '%s' not "
+ zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
"found", devname);
return (1);
}
struct udev_monitor *mon = arg;
char *tmp, *tmp2;
- zed_log_msg(LOG_INFO, "Waiting for new uduev disk events...");
+ zed_log_msg(LOG_INFO, "Waiting for new udev disk events...");
while (1) {
struct udev_device *dev;
post = cleanup
outputdir = /var/tmp/test_results
-# DISABLED: update to use ZFS_ACL_* variables and user_run helper.
+# Update to use ZFS_ACL_* variables and user_run helper.
# posix_001_pos
# posix_002_pos
[tests/functional/acl/posix]
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
-# DISABLED:
-# cache_001_pos - needs investigation
-# cache_010_neg - needs investigation
[tests/functional/cache]
-tests = ['cache_002_pos', 'cache_003_pos', 'cache_004_neg',
+tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
- 'cache_009_pos', 'cache_011_pos']
+ 'cache_009_pos', 'cache_010_neg', 'cache_011_pos']
-# DISABLED: needs investigation
-#[tests/functional/cachefile]
-#tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
-# 'cachefile_004_pos']
-#pre =
-#post =
+[tests/functional/cachefile]
+tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
+ 'cachefile_004_pos']
-# DISABLED: needs investigation
# 'sensitive_none_lookup', 'sensitive_none_delete',
# 'sensitive_formd_lookup', 'sensitive_formd_delete',
# 'insensitive_none_lookup', 'insensitive_none_delete',
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg']
-# DISABLED:
-# zfs_clone_005_pos - busy unmount
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
- 'zfs_clone_004_pos', 'zfs_clone_006_pos',
+ 'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos']
-# DISABLED:
-# zfs_copies_003_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# zfs_copies_005_neg - https://github.com/zfsonlinux/zfs/issues/3484
[tests/functional/cli_root/zfs_copies]
-tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_004_neg',
- 'zfs_copies_006_pos']
+tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
+ 'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos']
-# DISABLED:
-# zfs_get_004_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# zfs_get_006_neg - needs investigation
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
- 'zfs_get_005_neg', 'zfs_get_007_neg', 'zfs_get_008_pos',
- 'zfs_get_009_pos', 'zfs_get_010_neg']
+ 'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
+ 'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos']
-# DISABLED:
# zfs_mount_006_pos - https://github.com/zfsonlinux/zfs/issues/4990
-# zfs_mount_007_pos - needs investigation
-# zfs_mount_009_neg - needs investigation
-# zfs_mount_all_001_pos - needs investigation
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
- 'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_008_pos',
- 'zfs_mount_010_neg', 'zfs_mount_011_neg', 'zfs_mount_012_neg']
+ 'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
+ 'zfs_mount_008_pos', 'zfs_mount_009_neg', 'zfs_mount_010_neg',
+ 'zfs_mount_011_neg', 'zfs_mount_012_neg', 'zfs_mount_all_001_pos']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos']
-# DISABLED:
-# zfs_written_property_001_pos - https://github.com/zfsonlinux/zfs/issues/2441
[tests/functional/cli_root/zfs_property]
-tests = []
+tests = ['zfs_written_property_001_pos']
-# DISABLED:
# zfs_receive_004_neg - Fails for OpenZFS on illumos
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'receive-o-x_props_override']
-# DISABLED:
# zfs_rename_006_pos - https://github.com/zfsonlinux/zfs/issues/5647
# zfs_rename_009_neg - https://github.com/zfsonlinux/zfs/issues/5648
[tests/functional/cli_root/zfs_rename]
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
-# DISABLED:
-# zfs_rollback_001_pos - busy mountpoint behavior
-# zfs_rollback_002_pos - busy mountpoint behavior
[tests/functional/cli_root/zfs_rollback]
-tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg']
+tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
+ 'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
-# DISABLED:
-# zfs_unmount_005_pos - needs investigation
-# zfs_unmount_009_pos - needs investigation
-# zfs_unmount_all_001_pos - needs investigation
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
- 'zfs_unmount_004_pos', 'zfs_unmount_006_pos',
- 'zfs_unmount_007_neg', 'zfs_unmount_008_neg']
+ 'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
+ 'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
+ 'zfs_unmount_all_001_pos']
-# DISABLED:
-# zfs_unshare_002_pos - zfs set sharenfs=off won't unshare if it was already off
-# zfs_unshare_006_pos - some distros come with Samba "user shares" disabled
[tests/functional/cli_root/zfs_unshare]
-tests = ['zfs_unshare_001_pos', 'zfs_unshare_003_pos',
- 'zfs_unshare_004_neg', 'zfs_unshare_005_neg']
+tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
+ 'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos']
user =
+[tests/functional/cli_root/zpool_sync]
+tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
+
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg']
-[tests/functional/cli_root/zpool_sync]
-tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
-
-# DISABLED:
-# zfs_share_001_neg - requires additional dependencies
-# zfs_unshare_001_neg - requires additional dependencies
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
- 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
- 'zfs_unmount_001_neg', 'zfs_upgrade_001_neg',
+ 'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
+ 'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
-# DISABLED:
-# zfs_allow_010_pos - https://github.com/zfsonlinux/zfs/issues/5646
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
- 'zfs_allow_011_neg', 'zfs_allow_012_neg',
+ 'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
-# DISABLED:
-# devices_001_pos - needs investigation
-# devices_002_neg - needs investigation
[tests/functional/devices]
-tests = ['devices_003_pos']
+tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
-# DISABLED:
-# exec_002_neg - needs investigation
[tests/functional/exec]
-tests = ['exec_001_pos']
+tests = ['exec_001_pos', 'exec_002_neg']
[tests/functional/fault]
tests = ['auto_online_001_pos', 'auto_replace_001_pos']
'large_dnode_004_neg', 'large_dnode_005_pos', 'large_dnode_006_pos',
'large_dnode_007_neg']
-# DISABLED: needs investigation
-#[tests/functional/grow_pool]
-#tests = ['grow_pool_001_pos']
-#pre =
-#post =
-
-# DISABLED: needs investigation
-#[tests/functional/grow_replicas]
-#tests = ['grow_replicas_001_pos']
-#pre =
-#post =
-
-# DISABLED:
-# history_004_pos - https://github.com/zfsonlinux/zfs/issues/5664
-# history_006_neg - https://github.com/zfsonlinux/zfs/issues/5657
-# history_008_pos - https://github.com/zfsonlinux/zfs/issues/5658
+[tests/functional/grow_pool]
+tests = ['grow_pool_001_pos']
+
+[tests/functional/grow_replicas]
+tests = ['grow_replicas_001_pos']
+pre =
+post =
+
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
- 'history_005_neg',
- 'history_007_pos', 'history_009_pos',
+ 'history_004_pos', 'history_005_neg', 'history_006_neg',
+ 'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
-# DISABLED:
-# inuse_001_pos, inuse_007_pos - no dumpadm command
-# inuse_005_pos - partition issue
-# inuse_006_pos - partition issue
-# inuse_008_pos - partition issue
-# inuse_009_pos - partition issue
[tests/functional/inuse]
-tests = ['inuse_004_pos']
+tests = ['inuse_001_pos', 'inuse_003_pos', 'inuse_004_pos',
+ 'inuse_005_pos', 'inuse_006_pos', 'inuse_007_pos', 'inuse_008_pos',
+ 'inuse_009_pos']
post =
-# DISABLED: needs investigation
-# large_files_001_pos
[tests/functional/large_files]
-tests = ['large_files_002_pos']
+tests = ['large_files_001_pos', 'large_files_002_pos']
-# DISABLED: needs investigation
-#[tests/functional/largest_pool]
-#tests = ['largest_pool_001_pos']
-#pre =
-#post =
+[tests/functional/largest_pool]
+tests = ['largest_pool_001_pos']
+pre =
+post =
-# DISABLED: needs investigation
-#[tests/functional/link_count]
-#tests = ['link_count_001']
+[tests/functional/link_count]
+tests = ['link_count_001']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
-# DISABLED:
-# mmap_write_001_pos - needs investigation
[tests/functional/mmap]
-tests = ['mmap_read_001_pos']
+tests = ['mmap_write_001_pos', 'mmap_read_001_pos']
-# DISABLED:
-# umountall_001 - requires umountall command.
[tests/functional/mount]
-tests = ['umount_001']
+tests = ['umount_001', 'umountall_001']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos']
-# DISABLED:
-# nopwrite_volume - https://github.com/zfsonlinux/zfs/issues/5510
-# nopwrite_varying_compression - needs investigation
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
- 'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync']
+ 'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
+ 'nopwrite_varying_compression', 'nopwrite_volume']
-# DISABLED: needs investigation
-#[tests/functional/online_offline]
-#tests = ['online_offline_001_pos', 'online_offline_002_neg',
-# 'online_offline_003_neg']
+[tests/functional/online_offline]
+tests = ['online_offline_001_pos', 'online_offline_002_neg',
+ 'online_offline_003_neg']
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
-# DISABLED: requires pfexec command or 'RBAC profile'
-#[tests/functional/privilege]
-#tests = ['privilege_001_pos', 'privilege_002_pos']
+[tests/functional/privilege]
+tests = ['privilege_001_pos', 'privilege_002_pos']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg']
-# DISABLED:
-# refreserv_004_pos - needs investigation
+# refreserv_004_pos - Fails for OpenZFS on illumos
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_005_pos']
-# DISABLED:
-#[tests/functional/rename_dirs]
-#tests = ['rename_dirs_001_pos']
+[tests/functional/rename_dirs]
+tests = ['rename_dirs_001_pos']
[tests/functional/replacement]
tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos']
-# DISABLED:
# reservation_001_pos - https://github.com/zfsonlinux/zfs/issues/4445
# reservation_013_pos - https://github.com/zfsonlinux/zfs/issues/4444
# reservation_018_pos - https://github.com/zfsonlinux/zfs/issues/5642
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
-# DISABLED:
# rsend_008_pos - https://github.com/zfsonlinux/zfs/issues/6066
# rsend_009_pos - https://github.com/zfsonlinux/zfs/issues/5887
[tests/functional/rsend]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
-# DISABLED: Scripts need to be updated.
-# slog_012_neg - needs investigation
-# slog_013_pos - requires 'lofiadm' command.
-# slog_014_pos - needs investigation
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
- 'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_015_pos']
+ 'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
+ 'slog_013_pos', 'slog_014_pos', 'slog_015_pos']
-# DISABLED:
-# clone_001_pos - https://github.com/zfsonlinux/zfs/issues/3484
-# rollback_003_pos - Hangs in unmount and spins.
-# snapshot_016_pos - Problem with automount
-# snapshot_008_pos - https://github.com/zfsonlinux/zfs/issues/5784
[tests/functional/snapshot]
-tests = ['rollback_001_pos', 'rollback_002_pos',
- 'snapshot_001_pos', 'snapshot_002_pos',
+tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
+ 'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
- 'snapshot_006_pos', 'snapshot_007_pos',
+ 'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
- 'snapshot_015_pos', 'snapshot_017_pos']
+ 'snapshot_015_pos', 'snapshot_016_pos', 'snapshot_017_pos']
-# DISABLED:
# snapused_004_pos - https://github.com/zfsonlinux/zfs/issues/5513
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
[tests/functional/sparse]
tests = ['sparse_001_pos']
-# DISABLED: needs investigation
-#[tests/functional/threadsappend]
-#tests = ['threadsappend_001_pos']
+[tests/functional/threadsappend]
+tests = ['threadsappend_001_pos']
[tests/functional/tmpfile]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos']
'userspace_001_pos', 'userspace_002_pos', 'userspace_003_pos',
'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ]
-# DISABLED:
# vdev_zaps_007_pos -- fails due to a pre-existing issue with zpool split
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
-# DISABLED:
-# write_dirs_002_pos - needs investigation
[tests/functional/write_dirs]
-tests = ['write_dirs_001_pos']
+tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
-# DISABLED: No 'runat' command, replace the Linux equivalent and add xattrtest
#[tests/functional/xattr]
#tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
# 'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos',
extern "C" {
#endif
+#ifndef _FILE_OFFSET_BITS
+#define _FILE_OFFSET_BITS 64
+#endif
+
+#ifndef _LARGEFILE64_SOURCE
+#define _LARGEFILE64_SOURCE
+#endif
+
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
*/
/*
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
#include <stdio.h>
exit(3);
}
- while (i < count) {
+ for (i = 0; count == 0 || i < count; i++) {
(void) do_write(fd);
(void) do_trunc(fd);
-
- i++;
}
(void) close(fd);
exit(5);
}
- strcpy(buf, "ZFS Test Suite Truncation Test");
+ (void) strcpy(buf, "ZFS Test Suite Truncation Test");
if (write(fd, buf, bsize) < bsize) {
perror("write");
exit(6);
write_ret = write(fd, mybuf, 1);
if (write_ret < 0) {
- if (errno == EFBIG) {
- (void) printf("write errno=EFBIG: success\n");
+ if (errno == EFBIG || errno == EINVAL) {
+ (void) printf("write errno=EFBIG|EINVAL: success\n");
err = 0;
} else {
err = errno;
int
main(int argc, char *argv[])
{
- int fd;
+ int error, fd;
struct stat statbuf;
if (argc != 2) {
errno = 0;
if ((fd = open(argv[1], O_RDONLY)) < 0) {
+ error = errno;
perror("open");
- return (errno);
+ return (error);
}
if (fstat(fd, &statbuf) < 0) {
+ error = errno;
perror("fstat");
- return (errno);
+ return (error);
}
if (mmap(0, statbuf.st_size,
PROT_EXEC, MAP_SHARED, fd, 0) == MAP_FAILED) {
+ error = errno;
perror("mmap");
- return (errno);
+ return (error);
}
return (0);
getconf
getent
getfacl
+ getfattr
grep
groupadd
groupdel
ksh
ln
logname
+ losetup
ls
lsblk
lsmod
mpstat
mv
net
+ nproc
openssl
parted
pax
ZVOL_RDEVDIR="/dev/zvol"
DEV_RDSKDIR="/dev"
DEV_MPATHDIR="/dev/mapper"
+
ZEDLET_DIR="/var/tmp/zed"
+ VDEVID_CONF="$ZEDLET_DIR/vdev_id.conf"
+ VDEVID_CONF_ETC="/etc/zfs/vdev_id.conf"
+
NEWFS_DEFAULT_FS="ext2"
else
fi
export unpack_opts pack_opts verbose unpack_preserve pack_preserve \
ZVOL_DEVDIR ZVOL_RDEVDIR NEWFS_DEFAULT_FS DEV_RDSKDIR DEV_MPATHDIR \
- ZEDLET_DIR
+ ZEDLET_DIR VDEVID_CONF VDEVID_CONF_ETC
set_partition $i "" 0mb $diskname
done
fi
+
+ return 0
}
#
done
}
+#
+# Setup custom environment for the ZED.
+#
+function zed_setup
+{
+ if ! is_linux; then
+ return
+ fi
+
+ if [[ ! -d $ZEDLET_DIR ]]; then
+ log_must mkdir $ZEDLET_DIR
+ fi
+
+ if [[ ! -e $VDEVID_CONF ]]; then
+ log_must touch $VDEVID_CONF
+ fi
+
+ if [[ -e $VDEVID_CONF_ETC ]]; then
+ log_fail "Must not have $VDEVID_CONF_ETC file present on system"
+ fi
+
+ # Create a symlink for /etc/zfs/vdev_id.conf file.
+ log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
+
+ # Setup minimal ZED configuration. Individual test cases should
+ # add additional ZEDLETs as needed for their specific test.
+ log_must cp ${ZEDLETDIR}/zed.rc $ZEDLET_DIR
+ log_must cp ${ZEDLETDIR}/zed-functions.sh $ZEDLET_DIR
+ log_must cp ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
+
+ log_must zpool events -c
+}
+
+#
+# Cleanup custom ZED environment.
+#
+function zed_cleanup
+{
+ if ! is_linux; then
+ return
+ fi
+
+ log_must rm -f ${ZEDLET_DIR}/zed.rc
+ log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
+ log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
+ log_must rm -f ${ZEDLET_DIR}/zed.pid
+ log_must rm -f ${ZEDLET_DIR}/zedlog
+ log_must rm -f ${ZEDLET_DIR}/state
+ log_must rm -f $VDEVID_CONF_ETC
+ log_must rm -f $VDEVID_CONF
+ rmdir $ZEDLET_DIR
+}
+
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
- if is_linux; then
- # ZEDLET_DIR=/var/tmp/zed
- if [[ ! -d $ZEDLET_DIR ]]; then
- log_must mkdir $ZEDLET_DIR
- fi
-
- # Verify the ZED is not already running.
- pgrep -x zed > /dev/null
- if (($? == 0)); then
- log_fail "ZED already running"
- fi
+ if ! is_linux; then
+ return
+ fi
- # ZEDLETDIR=</etc/zfs/zed.d | ${SRCDIR}/cmd/zed/zed.d>
- log_must cp ${ZEDLETDIR}/all-syslog.sh $ZEDLET_DIR
+ # ZEDLET_DIR=/var/tmp/zed
+ if [[ ! -d $ZEDLET_DIR ]]; then
+ log_must mkdir $ZEDLET_DIR
+ fi
- log_note "Starting ZED"
- # run ZED in the background and redirect foreground logging
- # output to zedlog
- log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
- "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
+ # Verify the ZED is not already running.
+ pgrep -x zed > /dev/null
+ if (($? == 0)); then
+ log_fail "ZED already running"
fi
+
+ log_note "Starting ZED"
+ # run ZED in the background and redirect foreground logging
+ # output to zedlog
+ log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid" \
+ "-s $ZEDLET_DIR/state 2>${ZEDLET_DIR}/zedlog &"
}
#
#
function zed_stop
{
- if is_linux; then
- if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
- zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
- log_must kill $zedpid
- fi
- log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
- log_must rm -f ${ZEDLET_DIR}/zed.pid
- log_must rm -f ${ZEDLET_DIR}/zedlog
- log_must rm -f ${ZEDLET_DIR}/state
- log_must rmdir $ZEDLET_DIR
+ if ! is_linux; then
+ return
+ fi
+
+ if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
+ zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
+ log_must kill $zedpid
fi
}
verify_runnable "global"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup_testenv
{
cleanup
if [[ -n $lofidev ]]; then
- log_must lofiadm -d $lofidev
+ if is_linux; then
+ losetup -d $lofidev
+ else
+ lofiadm -d $lofidev
+ fi
fi
}
dsk1=${DISKS%% *}
log_must zpool create $TESTPOOL ${DISKS#$dsk1}
-if is_linux; then
- SLICE="p1"
-else
- SLICE="s0"
-fi
-
# Add nomal ${DEV_RDSKDIR} device
-log_mustnot zpool add $TESTPOOL cache ${DEV_RDSKDIR}/${dsk1}${SLICE}
-#log_must verify_cache_device $TESTPOOL $dsk1 'ONLINE'
+log_must zpool add $TESTPOOL cache \
+ ${DEV_RDSKDIR}/${dsk1}${SLICE_PREFIX}${SLICE0}
+log_must verify_cache_device $TESTPOOL $dsk1 'ONLINE'
-# Add nomal file
+# Add normal file
log_mustnot zpool add $TESTPOOL cache $VDEV2
-# Add /dev/rlofi device
-lofidev=${VDEV2%% *}
-log_must lofiadm -a $lofidev
-lofidev=$(lofiadm $lofidev)
-log_mustnot zpool add $TESTPOOL cache "/dev/rlofi/${lofidev#/dev/lofi/}"
-if [[ -n $lofidev ]]; then
+# Add /dev/rlofi device (allowed under Linux)
+if is_linux; then
+ lofidev=$(losetup -f)
+ lofidev=${lofidev##*/}
+ log_must losetup $lofidev ${VDEV2%% *}
+ log_must zpool add $TESTPOOL cache $lofidev
+ log_must zpool remove $TESTPOOL $lofidev
+ log_must losetup -d $lofidev
+ lofidev=""
+else
+ lofidev=${VDEV2%% *}
+ log_must lofiadm -a $lofidev
+ lofidev=$(lofiadm $lofidev)
+ log_mustnot zpool add $TESTPOOL cache "/dev/rlofi/${lofidev#/dev/lofi/}"
log_must lofiadm -d $lofidev
lofidev=""
fi
-# Add ${ZVOL_RDEVDIR} device
-log_must zpool create $TESTPOOL2 $VDEV2
-log_must zfs create -V $SIZE $TESTPOOL2/$TESTVOL
-log_mustnot zpool add $TESTPOOL cache ${ZVOL_RDEVDIR}/$TESTPOOL2/$TESTVOL
+# Add /dev/zvol/rdsk device (allowed under Linux)
+if ! is_linux; then
+ log_must zpool create $TESTPOOL2 $VDEV2
+ log_must zfs create -V $SIZE $TESTPOOL2/$TESTVOL
+ log_mustnot zpool add $TESTPOOL cache \
+ ${ZVOL_RDEVDIR}/$TESTPOOL2/$TESTVOL
+fi
log_pass "Cache device can only be block devices."
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cachefile
dist_pkgdata_SCRIPTS = \
+ cleanup.ksh \
+ setup.ksh \
cachefile.cfg \
cachefile.kshlib \
cachefile_001_pos.ksh \
#
export CPATH="/etc/zfs/zpool.cache"
-export CPATH1=/var/tmp/cachefile.$$
-export CPATH2=$TEST_BASE_DIR/cachefile.$$
+export CPATH1=$TEST_BASE_DIR/cachefile.1.$$
+export CPATH2=$TEST_BASE_DIR/cachefile.2.$$
+
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+set_device_dir
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cachefile/cachefile.cfg
+. $STF_SUITE/tests/functional/cachefile/cachefile.kshlib
+
+for pool in "$TESTPOOL" "$TESTPOOL2" "$TESTPOOL3" ; do
+ if poolexists $pool ; then
+ destroy_pool $pool
+ fi
+done
+
+for file in $CPATH1 $CPATH2 ; do
+ if [[ -f $file ]] ; then
+ log_must rm $file
+ fi
+done
+
+if pool_in_cache $TESTPOOL; then
+ log_unsupported "Skipping test group due to existing pool"
+fi
log_must zfs snapshot $SNAPFS1
typeset -i i=0
while (( $i < ${#RW_VOL_CLONE_PROP[*]} )); do
- log_must zfs clone -o ${RW_VOL_CLONE_PROP[$i]} $SNAPFS1 $TESTPOOL/$TESTCLONE
+ log_must zfs clone -o ${RW_VOL_CLONE_PROP[$i]} $SNAPFS1 \
+ $TESTPOOL/$TESTCLONE
+ block_device_wait
+
datasetexists $TESTPOOL/$TESTCLONE || \
log_fail "zfs clone $TESTPOOL/$TESTCLONE fail."
propertycheck $TESTPOOL/$TESTCLONE ${RW_VOL_CLONE_PROP[i]} || \
verify_runnable "global"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
if poolexists $ZPOOL_VERSION_1_NAME; then
log_assert "Verify that copies cannot be set with pool version 1"
log_onexit cleanup
-cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/blockfiles/$ZPOOL_VERSION_1_FILES $TESTDIR
+cp $STF_SUITE/tests/functional/cli_root/zpool_upgrade/$ZPOOL_VERSION_1_FILES $TESTDIR
bunzip2 $TESTDIR/$ZPOOL_VERSION_1_FILES
log_must zpool import -d $TESTDIR $ZPOOL_VERSION_1_NAME
log_must zfs create $ZPOOL_VERSION_1_NAME/$TESTFS
log_must zfs create -V 1m $ZPOOL_VERSION_1_NAME/$TESTVOL
+block_device_wait
for val in 3 2 1; do
for ds in $ZPOOL_VERSION_1_NAME/$TESTFS $ZPOOL_VERSION_1_NAME/$TESTVOL; do
verify_runnable "both"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
[[ -e $propfile ]] && rm -f $propfile
"all -rph $TESTPOOL" "all,available,reservation $TESTPOOL" \
"all $TESTPOOL?" "all $TESTPOOL*" "all nonexistpool"
+export POSIXLY_CORRECT=1
+
typeset -i i=0
while (( i < ${#bad_combine[*]} ))
do
(( i = i + 1 ))
done
+unset POSIXLY_CORRECT
+
log_pass "'zfs get all' fails with invalid combinations scenarios as expected."
typeset prop=$2
# Define property value: "reverse if value=on" "reverse if value=off"
- set -A values "noatime" "atime" \
- "nodevices" "devices" \
- "noexec" "exec" \
- "rw" "ro" \
- "nosetuid" "setuid"
+ if is_linux; then
+ set -A values "noatime" "atime" \
+ "nodev" "dev" \
+ "noexec" "exec" \
+ "rw" "ro" \
+ "nosuid" "suid"
+ else
+ set -A values "noatime" "atime" \
+ "nodevices" "devices" \
+ "noexec" "exec" \
+ "rw" "ro" \
+ "nosetuid" "setuid"
+ fi
typeset -i i=0
while (( i < ${#properties[@]} )); do
log_must zfs snapshot $TESTPOOL/$TESTFS1@snap$i
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/testfile.$i bs=1M \
count=$blocks
- log_must sync
- log_must sleep 10
+ sync_pool
written=$(get_prop written $TESTPOOL/$TESTFS1)
((expected_written=blocks * mb_block))
within_percent $written $expected_written 99.5 || \
before_written=$(get_prop written $TESTPOOL/$TESTFS1)
log_must rm /$TESTPOOL/$TESTFS1/testfile.3
snap3_size=0
-log_must sync
-log_must sleep 10
+sync_pool
written=$(get_prop written $TESTPOOL/$TESTFS1)
writtenat3=$(get_prop written@snap3 $TESTPOOL/$TESTFS1)
[[ $written -eq $writtenat3 ]] || \
blocks=20
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/testfile.3 bs=1M \
count=$blocks
-log_must sync
-log_must sleep 10
+sync_pool
written=$(get_prop written $TESTPOOL/$TESTFS1)
writtenat1=$(get_prop written@snap1 $TESTPOOL/$TESTFS1)
writtenat2=$(get_prop written@snap2 $TESTPOOL/$TESTFS1)
log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/snap1.clone/testfile bs=1M \
count=40
after_clone=$(get_prop written $TESTPOOL/$TESTFS1)
-[[ $before_clone -eq $after_clone ]] || \
+within_percent $before_clone $after_clone 99.5 || \
log_fail "unexpected written for clone $before_clone $after_clone"
log_note "deleted snapshot"
typeset -l snap_before_written3=$(get_prop_mb written $TESTPOOL/$TESTFS1@snap3)
log_must zfs destroy $TESTPOOL/$TESTFS1@snap2
log_mustnot snapexists $TESTPOOL/$TESTFS1@snap2
-log_must sync
-log_must sleep 10
+sync_pool
written1=$(get_prop_mb written@snap1 $TESTPOOL/$TESTFS1)
written3=$(get_prop_mb written@snap3 $TESTPOOL/$TESTFS1)
[[ $before_written1 -eq $written1 && $before_written3 -eq $written3 ]] || \
[[ $writtenat -ne 0 ]] && \
log_fail "Unexpected written@ value"
log_must dd if=/dev/urandom of=/$ds/testfile bs=1M count=$blocks
- log_must sync
- log_must sleep 10
+ sync_pool
writtenat=$(get_prop written@now $ds)
((expected_writtenat = blocks * mb_block))
within_percent $writtenat $expected_writtenat 0.1 || \
log_must zfs snapshot $ds@current
log_must dd if=/dev/urandom of=/$ds/testfile bs=1M \
count=$blocks
- log_must sync
- log_must sleep 10
+ sync_pool
done
recursive_output=$(zfs get -r written@current $TESTPOOL | \
grep -v $TESTFS1@ | grep -v $TESTFS2@ | grep -v $TESTFS3@ | \
log_fail "Unsupported testing condition."
# Clean up the test environment
+ if pgrep -x dd 2>/dev/null; then
+ pkill -x dd
+ fi
+
datasetexists $FS && log_must zfs destroy -Rf $FS
if datasetexists $VOL; then
- df -lhF ufs "$ZVOL_DEVDIR/$VOL" > /dev/null 2>&1
- (( $? == 0 )) && log_must umount -f $TESTDIR1
+ if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
+ log_must umount -f $TESTDIR1
+ fi
log_must zfs destroy -Rf $VOL
fi
fi
if [[ $dtst == $VOL ]]; then
- log_must umount -f $TESTDIR1
+ if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
+ log_must umount -f $TESTDIR1
+ fi
log_must zfs rollback $opt $dtst@$snap_point
log_must mount \
$ZVOL_DEVDIR/$TESTPOOL/$TESTVOL $TESTDIR1
# Make sure the ufs|ext2 filesystem hasn't been mounted,
# then mount the new ufs|ext2 filesystem.
- if ! ismounted "$ZVOL_DEVDIR/$VOL" $NEWFS_DEFAULT_FS; then
+ if ! ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
log_must mount \
$ZVOL_DEVDIR/$TESTPOOL/$TESTVOL $TESTDIR1
fi
if [[ $createclone == "true" ]]; then
if datasetnonexists $clone; then
log_must zfs clone $snap $clone
+ block_device_wait
fi
fi
(( ind += 1 ))
typeset dtst
typeset snap
- pkill ${DD##*/}
+ pkill -x dd
if ismounted $TESTDIR1 $NEWFS_DEFAULT_FS; then
log_must umount -f $TESTDIR1
function cleanup
{
- poolexists $TESTPOOL && log_must zpool export $TESTPOOL
+ poolexists $TESTPOOL && log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
datasetexists $TESTPOOL@snap && \
while (( i < ${#options[*]} )); do
if [[ ${options[i]} == "-f" ]]; then
- do_unmount_multiple "${options[i]}"
+ if is_linux; then
+ do_unmount_multiple "${options[i]}" 1
+ else
+ do_unmount_multiple "${options[i]}"
+ fi
else
do_unmount_multiple "${options[i]}" 1
fi
log_must cd .zfs/snapshot/$TESTSNAP
log_mustnot zfs unmount -a
- log_must zfs unmount -fa
- log_mustnot ls
+ if is_linux; then
+ log_mustnot zfs unmount -fa
+ log_must ls
+ else
+ log_must zfs unmount -fa
+ log_mustnot ls
+ fi
log_must cd /
log_must zfs mount -a
log_must cd .zfs/snapshot/$TESTSNAP
if is_global_zone || [[ $fs != $TESTPOOL ]] ; then
- log_must zfs destroy -rf $fs
- log_mustnot ls
+ if is_linux; then
+ log_mustnot zfs destroy -rf $fs
+ log_must ls
+ else
+ log_must zfs destroy -rf $fs
+ log_mustnot ls
+ fi
log_must cd /
fi
done
if is_global_zone ; then
- log_must zpool destroy -f $TESTPOOL
- log_mustnot ls
+ if is_linux; then
+ log_mustnot zpool destroy -f $TESTPOOL
+ log_must ls
+ else
+ log_must zpool destroy -f $TESTPOOL
+ log_mustnot ls
+ fi
log_must cd /
fi
fi
export __ZFS_POOL_RESTRICT="$TESTPOOL"
+ if [[ $opt == "-fa" ]] && is_linux; then
+ log_mustnot zfs unmount $opt
+ cd /tmp
+ fi
log_must zfs unmount $opt
unset __ZFS_POOL_RESTRICT
verify_runnable "global"
+if is_linux; then
+ log_unsupported "zfs set sharenfs=off won't unshare if already off"
+fi
+
function cleanup
{
typeset -i i=0
verify_runnable "global"
+if is_linux; then
+ log_unsupported "some distros come with Samba "user shares" disabled"
+fi
+
function cleanup
{
log_must zfs unshare -a
verify_runnable "global"
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# https://github.com/zfsonlinux/zfs/issues/6145
if is_linux; then
- log_unsupported "Creating a pool containing a zvol may deadlock"
+ log_unsupported "Test case occasionally fails"
fi
function cleanup
verify_runnable "global"
+# https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
partition_disk $SLICE_SIZE $DISK 2
create_pool "$TESTPOOL" "${DISK}${SLICE_PREFIX}${SLICE0}"
-
-if is_linux; then
- # Layering a pool on a zvol can deadlock and isn't supported.
- create_pool "$TESTPOOL2" "${DISK}${SLICE_PREFIX}${SLICE1}"
-else
- create_pool "$TESTPOOL1" "${DISK}${SLICE_PREFIX}${SLICE1}"
- log_must zfs create -s -V $VOLSIZE $TESTPOOL1/$TESTVOL
- create_pool "$TESTPOOL2" "${ZVOL_DEVDIR}/$TESTPOOL1/$TESTVOL"
-fi
+create_pool "$TESTPOOL1" "${DISK}${SLICE_PREFIX}${SLICE1}"
+log_must zfs create -s -V $VOLSIZE $TESTPOOL1/$TESTVOL
+block_device_wait
+create_pool "$TESTPOOL2" "${ZVOL_DEVDIR}/$TESTPOOL1/$TESTVOL"
typeset -i i=0
while (( i < ${#datasets[*]} )); do
verify_runnable "global"
+zed_stop
+zed_cleanup
+
default_cleanup
verify_runnable "global"
+zed_setup
+zed_start
+
DISK=${DISKS%% *}
default_setup $DISK
verify_runnable "global"
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# See issue: https://github.com/zfsonlinux/zfs/issues/5771
if is_linux; then
- log_unsupported "Creating a pool containing a zvol may deadlock"
+ log_unsupported "Requires additional ZED support"
fi
function cleanup
for i in 1 2 3; do
log_must zfs create -V $org_size $VFS/vol$i
done
+block_device_wait
for type in " " mirror raidz raidz2; do
verify_runnable "global"
-# See issue: https://github.com/zfsonlinux/zfs/issues/6065
+# See issue: https://github.com/zfsonlinux/zfs/issues/5771
if is_linux; then
- log_unsupported "Creating a pool containing a zvol may deadlock"
+ log_unsupported "Requires additional ZED support"
fi
function cleanup
for i in 1 2 3; do
log_must zfs create -V $org_size $VFS/vol$i
done
+block_device_wait
for type in " " mirror raidz raidz2; do
log_must zpool create $TESTPOOL1 $type ${ZVOL_DEVDIR}/$VFS/vol1 \
verify_runnable "global"
+# https://github.com/zfsonlinux/zfs/issues/6141
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
for config in $CONFIGS; do
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Requires additional dependencies"
+fi
+
log_assert "zfs share returns an error when run as a user"
if is_shared $TESTDIR/unshared
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Requires additional dependencies"
+fi
+
log_assert "zfs unshare returns an error when run as a user"
# verify that the filesystem was shared initially
log_must zfs snapshot $dtstsnap
log_must eval "zfs send $dtstsnap > $bak_root"
- log_must zfs destroy -rf $dtst
+ log_must_busy zfs destroy -rf $dtst
user_run $user eval "zfs receive $dtst < $bak_root"
if datasetexists $dtstsnap ; then
# check the data integrity
log_must eval "zfs send $dtstsnap > $bak_user"
- log_must zfs destroy -rf $dtst
+ log_must_busy zfs destroy -rf $dtst
log_must eval "zfs receive $dtst < $bak_root"
log_must eval "zfs send $dtstsnap > $bak_root"
- log_must zfs destroy -rf $dtst
+ log_must_busy zfs destroy -rf $dtst
if [[ $(checksum $bak_user) != $(checksum $bak_root) ]]; then
return 1
fi
return 1
fi
- log_must zfsallow $user mount $fs
- user_run $user zfscreate -V 150m $newvol
+ log_must zfs allow $user mount $fs
+ user_run $user zfs create -V 150m $newvol
block_device_wait
- log_must zfsunallow $user mount $fs
+ log_must zfs unallow $user mount $fs
if datasetexists $newvol ; then
return 1
fi
- log_must zfsallow $user reservation $fs
- user_run $user zfscreate -V 150m $newvol
+ log_must zfs allow $user reservation $fs
+ user_run $user zfs create -V 150m $newvol
block_device_wait
- log_must zfsunallow $user reservation $fs
+ log_must zfs unallow $user reservation $fs
if datasetexists $newvol ; then
return 1
fi
- log_must zfsallow $user refreservation $fs
- user_run $user zfscreate -V 150m $newvol
+ log_must zfs allow $user refreservation $fs
+ user_run $user zfs create -V 150m $newvol
block_device_wait
- log_must zfsunallow $user refreservation $fs
+ log_must zfs unallow $user refreservation $fs
if datasetexists $newvol ; then
return 1
fi
fi
block_device_wait
- log_must zfsdestroy $newvol
+ log_must zfs destroy $newvol
block_device_wait
fi
typeset fs=$3
value="2k"
- user_run $user zfsset dnodesize=$value $fs
+ user_run $user zfs set dnodesize=$value $fs
if [[ $value != $(get_prop dnodesize $fs) ]]; then
return 1
fi
verify_runnable "global"
+if is_32bit; then
+ log_unsupported "Test case fails on 32-bit systems"
+fi
+
log_assert "Setting devices=on on file system, the devices files in this file" \
"system can be used."
log_onexit cleanup
verify_runnable "global"
+if is_32bit; then
+ log_unsupported "Test case fails on 32-bit systems"
+fi
+
log_assert "Setting devices=off on file system, the devices files in this file"\
"system can not be used."
log_onexit cleanup
case $filetype in
b)
if is_linux; then
- devtype=$(df -T / | awk '{print $2}')
- else
- devtype=$(df -n / | awk '{print $3}')
+ major=$(awk '/[hsv]d/ { print $1; exit }' \
+ /proc/partitions)
+ minor=$(awk '/[hsv]d/ { print $2; exit }' \
+ /proc/partitions)
+ log_must mknod $filename b $major $minor
+ return 0
fi
+
+ devtype=$(df -n / | awk '{print $3}')
case $devtype in
zfs)
rootpool=$(df / | \
[[ -z $devstr ]] && \
log_fail "Can not get block device file."
;;
- ext2)
- # TODO: Linux version
- ;;
*)
log_unsupported "Unsupported fstype " \
"for / ($devtype)," \
#
# Create device file '/dev/null'
#
- log_must mknod $filename c $(getmajor mm) 2
+ if is_linux; then
+ major=$(stat -c %t /dev/null)
+ minor=$(stat -c %T /dev/null)
+ log_must mknod $filename c $major $minor
+ else
+ log_must mknod $filename c $(getmajor mm) 2
+ fi
;;
*)
log_fail "'$filetype' is wrong."
log_must cp $STF_PATH/ls $TESTDIR/myls
log_must zfs set exec=on $TESTPOOL/$TESTFS
log_must $TESTDIR/myls
-log_must $MMAP_EXEC $TESTDIR/myls
+log_must mmap_exec $TESTDIR/myls
log_pass "Setting exec=on on filesystem testing passed."
"from this file system."
log_onexit cleanup
-log_must cp /usr/bin/ls $TESTDIR/myls
+log_must cp $STF_PATH/ls $TESTDIR/myls
log_must zfs set exec=off $TESTPOOL/$TESTFS
-log_must exec_n_check 126 $TESTDIR/myls
-log_must exec_n_check 13 $MMAP_EXEC $TESTDIR/myls
+if is_linux; then
+ log_must exec_n_check 126 $TESTDIR/myls
+ log_must exec_n_check 1 mmap_exec $TESTDIR/myls # EPERM
+else
+ log_must exec_n_check 126 $TESTDIR/myls
+ log_must exec_n_check 13 mmap_exec $TESTDIR/myls # EACCES
+fi
log_pass "Setting exec=off on filesystem testing passed."
cleanup_devices $DISKS
-# Remove symlink and vdev_id.conf in-tree file
-rm -f $VDEVID_CONF_ETC
-rm -f $VDEVID_CONF
zed_stop
+zed_cleanup
SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}')
SDDEVICE=$(echo $SD | nawk -F / '{print $3}')
export DISK2=$(echo $DISKS | nawk '{print $2}')
export DISK3=$(echo $DISKS | nawk '{print $3}')
-export VDEVID_CONF=$ZEDLET_DIR/vdev_id.conf
-export VDEVID_CONF_ETC=/etc/zfs/vdev_id.conf
-
if is_linux; then
set_slice_prefix
set_device_dir
verify_runnable "global"
-if [[ ! -d $ZEDLET_DIR ]]; then
- log_must mkdir $ZEDLET_DIR
-fi
-
-if [[ ! -e $VDEVID_CONF ]]; then
- log_must touch $VDEVID_CONF
-fi
-
-if [[ -e $VDEVID_CONF_ETC ]]; then
- log_fail "Must not have $VDEVID_CONF_ETC file present on system"
-fi
-
-# Create a symlink for /etc/zfs/vdev_id.conf file
-log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
-
+zed_setup
zed_start
# Create a scsi_debug device to be used with auto-online (if using loop devices)
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/grow_pool
dist_pkgdata_SCRIPTS = \
+ setup.ksh \
+ cleanup.ksh \
grow_pool.cfg \
grow_pool_001_pos.ksh
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
+
+verify_runnable "global"
+
+ismounted $TESTFS && \
+ log_must zfs umount $TESTDIR
+destroy_pool "$TESTPOOL"
+
+#
+# Here we create & destroy a zpool using the disks
+# because this resets the partitions to normal
+#
+if [[ -z $DISK ]]; then
+ create_pool ZZZ "$DISK0 $DISK1"
+ destroy_pool ZZZ
+else
+ create_pool ZZZ "$DISK"
+ destroy_pool ZZZ
+fi
+
+log_pass
. $STF_SUITE/include/libtest.shlib
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+
function set_disks
{
set -A disk_array $(find_disks $DISKS)
--- /dev/null
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2013 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/grow_pool/grow_pool.cfg
+
+verify_runnable "global"
+
+if ! $(is_physical_device $DISKS) ; then
+ log_unsupported "This directory cannot be run on raw files."
+fi
+
+if [[ -n $DISK ]]; then
+ log_note "No spare disks available. Using slices on $DISK"
+ for i in $SLICE0 $SLICE1 ; do
+ log_must set_partition $i "$cyl" $SIZE $DISK
+ cyl=$(get_endslice $DISK $i)
+ done
+ tmp=$DISK"s"$SLICE0
+else
+ log_must set_partition $SLICE "" $SIZE $DISK0
+ log_must set_partition $SLICE "" $SIZE $DISK1
+ tmp=$DISK0$SLICE_PREFIX$SLICE
+fi
+
+default_setup $tmp
. $STF_SUITE/include/libtest.shlib
+export DISKSARRAY=$DISKS
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+
function set_disks
{
set -A disk_array $(find_disks $DISKS)
verify_runnable "global"
+if is_32bit; then
+ log_unsupported "Test case fails on 32-bit systems"
+fi
+
if ! is_physical_device $DISKS; then
log_unsupported "This test case cannot be run on raw files"
fi
# $DISK will be set if we're using slices on one disk
if [[ -n $DISK ]]; then
- log_must zpool add $TESTPOOL $pooltype ${DISK}s3 ${DISK}s4
+ log_must zpool add $TESTPOOL $pooltype \
+ ${DISK}${SLICE_PREFIX}${SLICE3} \
+ ${DISK}${SLICE_PREFIX}${SLICE4}
else
[[ -z $DISK2 || -z $DISK3 ]] &&
log_unsupported "No spare disks available"
verify_runnable "global"
+# See issue: https://github.com/zfsonlinux/zfs/issues/5657
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
if datasetexists $fs ; then
verify_runnable "global"
+# See issue: https://github.com/zfsonlinux/zfs/issues/5658
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
if datasetexists $root_testfs; then
. $STF_SUITE/include/libtest.shlib
if is_linux; then
- DISK_ARRAY_NUM=2
+ export DISKSARRAY=$DISKS
+ export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
set_device_dir
set_slice_prefix
export SLICE0=1
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Test case isn't applicable to Linux"
+fi
+
function cleanup
{
#
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Test case isn't applicable to Linux"
+fi
+
function cleanup
{
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
typeset -i dirnum=0
typeset -i filenum=0
typeset cwd=""
+typeset cyl=""
for num in 0 1 2; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%s*}
slice=${slice##*${SLICE_PREFIX}}
- log_must set_partition $slice "" $FS_SIZE $disk
+ log_must set_partition $slice "$cyl" $FS_SIZE $disk
+ cyl=$(get_endslice $disk $slice)
done
log_note "Make a ufs filesystem on source $rawdisk1"
verify_runnable "global"
+if ! is_physical_device $FS_DISK0; then
+ log_unsupported "This directory cannot be run on raw files."
+fi
+
function cleanup
{
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
unset NOINUSE_CHECK
while (( i < ${#vdevs[*]} )); do
+ for num in 0 1 2 3 ; do
+ eval typeset disk=\${FS_DISK$num}
+ zero_partitions $disk
+ done
+
+ typeset cyl=""
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
slice=${slice##*${SLICE_PREFIX}}
- log_must set_partition $slice "" $FS_SIZE $disk
+ log_must set_partition $slice "$cyl" $FS_SIZE $disk
+ cyl=$(get_endslice $disk $slice)
done
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Test case isn't applicable to Linux"
+fi
+
function cleanup
{
if [[ -n $PREVDUMPDEV ]]; then
unset NOINUSE_CHECK
while (( i < ${#vdevs[*]} )); do
+ for num in 0 1 2 3 ; do
+ eval typeset disk=\${FS_DISK$num}
+ zero_partitions $disk
+ done
+
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Test case isn't applicable to Linux"
+fi
+
function cleanup
{
if [[ -n $PREVDUMPDEV ]]; then
while (( i < ${#vdevs[*]} )); do
+ for num in 0 1 2 3 ; do
+ eval typeset disk=\${FS_DISK$num}
+ zero_partitions $disk
+ done
+
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
verify_runnable "global"
+if ! is_physical_device $FS_DISK0; then
+ log_unsupported "This directory cannot be run on raw files."
+fi
+
function cleanup
{
poolexists $TESTPOOL1 || zpool import $TESTPOOL1 >/dev/null 2>&1
set -A vdevs "" "mirror" "raidz" "raidz1" "raidz2"
typeset -i i=0
+typeset cyl=""
+
+for num in 0 1 2 3 ; do
+ eval typeset disk=\${FS_DISK$num}
+ zero_partitions $disk
+done
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
slice=${slice##*${SLICE_PREFIX}}
- log_must set_partition $slice "" $FS_SIZE $disk
+ log_must set_partition $slice "$cyl" $FS_SIZE $disk
+ cyl=$(get_endslice $disk $slice)
done
while (( i < ${#vdevs[*]} )); do
verify_runnable "global"
+if ! is_physical_device $FS_DISK0; then
+ log_unsupported "This directory cannot be run on raw files."
+fi
+
function cleanup
{
poolexists $TESTPOOL1 || zpool import $TESTPOOL1 >/dev/null 2>&1
typeset targets=$1
for t in $targets; do
- log_must set_partition 0 "" 0mb $t
+ log_must zero_partitions $t
done
return 0
while (( i < ${#vdevs[*]} )); do
+ typeset cyl=""
for num in 0 1 2 3 ; do
eval typeset slice=\${FS_SIDE$num}
disk=${slice%${SLICE_PREFIX}*}
slice=${slice##*${SLICE_PREFIX}}
- log_must set_partition $slice "" $FS_SIZE $disk
+ log_must set_partition $slice "$cyl" $FS_SIZE $disk
+ cyl=$(get_endslice $disk $slice)
done
if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
. $STF_SUITE/include/libtest.shlib
+if is_32bit; then
+ log_unsupported "Test case fails on 32-bit systems"
+fi
+
#
# DESCRIPTION:
# Write a file to the allowable ZFS fs size.
export TESTVOL=testvol$$
export VOL_PATH=${ZVOL_DEVDIR}/${TESTPOOL2}/$TESTVOL
-export VOLSIZES=${VOLSIZES-"2pb 5pb 10pb 2eb 5eb 8eb 9eb"}
+export VOLSIZES=${VOLSIZES-"2pb 5pb 10pb 2eb 5eb 7eb"}
# There're 3 different prompt messages while create
# a volume that great than 1TB on 32-bit
verify_runnable "global"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
#
# Parse the results of zpool & zfs creation with specified size
#
log_fail "zfs create -sV $volsize $TESTPOOL2/$TESTVOL"
fi
fi
+ block_device_wait
log_note "Create the largest pool allowed using the volume vdev"
create_pool $TESTPOOL "$VOL_PATH"
export NUMFILES=10000
# Detect and make sure this test must be executed on a multi-process system
-is_mp || log_fail "This test requires a multi-processor system."
+if ! is_mp; then
+ log_unsupported "This test requires a multi-processor system."
+fi
log_must mkdir -p ${TESTDIR}/tmp
"result in a deadlock."
# Detect and make sure this test must be executed on a multi-process system
-is_mp || log_fail "This test requires a multi-processor system."
+if ! is_mp; then
+ log_unsupported "This test requires a multi-processor system."
+fi
log_must chmod 777 $TESTDIR
mmapwrite $TESTDIR/test-write-file &
zfs_list="/ /lib /sbin /tmp /usr /var /var/adm /var/run"
# Append our ZFS filesystems to the list, not worrying about duplicates.
-for fs in $(mount -p | awk '{if ($4 == "zfs") print $3}'); do
+if is_linux; then
+ typeset mounts=$(mount | awk '{if ($5 == "zfs") print $3}')
+else
+ typeset mounts=$(mount -p | awk '{if ($4 == "zfs") print $3}')
+fi
+
+for fs in $mounts; do
zfs_list="$zfs_list $fs"
done
+if is_linux; then
+ mounts=$(umount --fake -av -t zfs 2>&1 | \
+ grep "successfully umounted" | awk '{print $1}')
+ # Fallback to /proc/mounts for umount(8) (util-linux-ng 2.17.2)
+ if [[ -z $mounts ]]; then
+ mounts=$(awk '/zfs/ { print $2 }' /proc/mounts)
+ fi
+else
+ mounts=$(umountall -n -F zfs 2>&1 | awk '{print $2}')
+fi
+
fs=''
-for fs in $(umountall -n -F zfs 2>&1 | awk '{print $2}'); do
+for fs in $mounts; do
for i in $zfs_list; do
[[ $fs = $i ]] && continue 2
done
log_fail "umountall -n -F zfs tried to unmount $fs"
done
-[[ -n $fs ]] || log_fail "umountall -n -F zfs produced no output"
+[[ -n $mounts ]] || log_fail "umountall -n -F zfs produced no output"
log_pass "All ZFS file systems would have been unmounted"
typeset low=1
typeset high=99
- sync
+ sync_pool
for i in origin snap clone; do
for j in used refer usedbychildren written; do
typeset ${i}_$j=$(get_prop $j $(eval echo \$$i))
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/online_offline/online_offline.cfg
#
# DESCRIPTION:
typeset killpid="$! "
for disk in $DISKLIST; do
- for i in 'do_offline' 'do_offline_while_already_offline'; do
+ for i in 'do_offline' 'do_offline_while_already_offline'; do
log_must zpool offline $TESTPOOL $disk
check_state $TESTPOOL $disk "offline"
- if [[ $? != 0 ]]; then
- log_fail "$disk of $TESTPOOL is not offline."
- fi
- done
-
- log_must zpool online $TESTPOOL $disk
- check_state $TESTPOOL $disk "online"
- if [[ $? != 0 ]]; then
- log_fail "$disk of $TESTPOOL did not match online state"
- fi
+ if [[ $? != 0 ]]; then
+ log_fail "$disk of $TESTPOOL is not offline."
+ fi
+ done
+
+ log_must zpool online $TESTPOOL $disk
+ check_state $TESTPOOL $disk "online"
+ if [[ $? != 0 ]]; then
+ log_fail "$disk of $TESTPOOL did not match online state"
+ fi
+
+ # Delay for resilver to complete
+ sleep 3
done
log_must kill $killpid
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/online_offline/online_offline.cfg
#
# DESCRIPTION:
log_must zpool online $TESTPOOL ${disks[$i]}
check_state $TESTPOOL ${disks[$i]} "online" || \
log_fail "Failed to set ${disks[$i]} online"
+ # Delay for resilver to complete
+ while ! is_pool_resilvered $TESTPOOL; do
+ log_must sleep 1
+ done
+ log_must zpool clear $TESTPOOL
while [[ $j -lt ${#disks[*]} ]]; do
if [[ $j -eq $i ]]; then
((j++))
log_must zpool online $TESTPOOL ${disks[$i]}
check_state $TESTPOOL ${disks[$i]} "online" || \
log_fail "Failed to set ${disks[$i]} online"
+ # Delay for resilver to complete
+ while ! is_pool_resilvered $TESTPOOL; do
+ log_must sleep 1
+ done
+ log_must zpool clear $TESTPOOL
fi
((i++))
done
# We can only run this in the global zone
verify_runnable "global"
+if is_linux; then
+ log_unsupported "Requires pfexec command"
+fi
+
log_assert "The RBAC profile \"ZFS Storage Management\" works"
ZFS_USER=$(cat /tmp/zfs-privs-test-user.txt)
verify_runnable "both"
+if is_linux; then
+ log_unsupported "Requires pfexec command"
+fi
+
log_assert "The RBAC profile \"ZFS File System Management\" works"
ZFS_USER=$(cat /tmp/zfs-privs-test-user.txt)
setup_test_env $TESTPOOL "" $cnt
damage_devs $TESTPOOL 1 "keep_label"
-log_must zpool clear $TESTPOOL
+log_must zpool scrub $TESTPOOL
-# Wait for the scrub intiated by the clear to wrap, or is_healthy will be wrong.
+# Wait for the scrub to wrap, or is_healthy will be wrong.
while ! is_pool_scrubbed $TESTPOOL; do
- sync
+ sleep 1
done
log_mustnot is_healthy $TESTPOOL
cd $TESTDIR
mkdir -p 1/2/3/4/5 a/b/c/d/e
-$RENAME_DIRS &
+rename_dir &
-sleep 500
+sleep 10
typeset -i retval=1
-pgrep $RENAME_DIRS >/dev/null 2>&1
+pgrep -x rename_dir >/dev/null 2>&1
retval=$?
if (( $retval == 0 )); then
- pkill -9 $RENAME_DIRS >/dev/null 2>&1
+ pkill -9 -x rename_dir >/dev/null 2>&1
fi
log_pass "ZFS handle race directory rename operation as expected."
verify_runnable "both"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6086
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
log_assert "Verify resumability of a full and incremental ZFS send/receive " \
"in the presence of a corrupted stream"
log_onexit resume_cleanup $sendfs $streamfs
verify_runnable "global"
+if ! $(is_physical_device $DISKS) ; then
+ log_unsupported "This directory cannot be run on raw files."
+fi
+
function cleanup_testenv
{
cleanup
log_must zpool destroy -f $TESTPOOL2
fi
if [[ -n $lofidev ]]; then
- lofiadm -d $lofidev
+ if is_linux; then
+ losetup -d $lofidev
+ else
+ lofiadm -d $lofidev
+ fi
fi
}
log_must verify_slog_device $TESTPOOL $ldev 'ONLINE'
# Add lofi device
-lofidev=${LDEV2%% *}
-log_must lofiadm -a $lofidev
-lofidev=$(lofiadm $lofidev)
+if is_linux; then
+ lofidev=$(losetup -f)
+ lofidev=${lofidev##*/}
+ log_must losetup $lofidev ${LDEV2%% *}
+else
+ lofidev=${LDEV2%% *}
+ log_must lofiadm -a $lofidev
+ lofidev=$(lofiadm $lofidev)
+fi
log_must zpool add $TESTPOOL log $lofidev
log_must verify_slog_device $TESTPOOL $lofidev 'ONLINE'
# Add ZFS volume
vol=$TESTPOOL/vol
log_must zpool create -V $MINVDEVSIZE $vol
-log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol
\ No newline at end of file
+log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol
log_must zpool scrub $TESTPOOL
log_must display_status $TESTPOOL
log_must zpool status $TESTPOOL 2>&1 >/dev/null
+ log_must zpool offline $TESTPOOL $VDIR/a
zpool status -v $TESTPOOL | \
grep "state: DEGRADED" 2>&1 >/dev/null
log_fail "log device should display correct status"
fi
+ log_must zpool online $TESTPOOL $VDIR/a
log_must zpool destroy -f $TESTPOOL
done
done
verify_runnable "both"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6145
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
# Setup array, 4 elements as a group, refer to:
# i+0: name of a snapshot
# i+1: mountpoint of the snapshot
verify_runnable "both"
+# https://github.com/zfsonlinux/zfs/issues/6143
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
function cleanup
{
typeset snap=""
export BLOCKSZ=8192
export NUM_WRITES=20
export DATA=0
-export LIMIT=524288 # tolerance measured in bytes, 512K
+export LIMIT=2097152 # tolerance measured in bytes, 2M
export FSQUOTA=500m
export FILESIZE=400m
export FILESIZE1=200m
done
wait_freeing $TESTPOOL
+sync_pool
new_size=`get_prop available $TESTPOOL`
log_must zfs destroy -Rf $SNAPFS
datasetexists $TESTPOOL/$TESTFS@snap_a && \
log_must zfs destroy -Rf $TESTPOOL/$TESTFS@snap_a
+ datasetexists $TESTPOOL/$TESTFS@snap_b && \
+ log_must zfs destroy -Rf $TESTPOOL/$TESTFS@snap_b
datasetexists $TESTPOOL/$TESTCLONE@snap_a && \
log_must zfs destroy -Rf $TESTPOOL/$TESTCLONE@snap_a
log_must zfs snapshot $SNAPFS
log_must zfs clone $SNAPFS $TESTPOOL/$TESTCLONE
-log_must mv $TESTDIR/$SNAPROOT/$TESTSNAP $TESTDIR/$SNAPROOT/snap_a
+log_must mv $TESTDIR/$SNAPROOT/$TESTSNAP $TESTDIR/$SNAPROOT/snap_b
-datasetexists $TESTPOOL/$TESTFS@snap_a || \
+datasetexists $TESTPOOL/$TESTFS@snap_b || \
log_fail "rename snapshot via mv in .zfs/snapshot fails."
log_must zfs promote $TESTPOOL/$TESTCLONE
# promote back to $TESTPOOL/$TESTFS for scenario 3
log_must zfs promote $TESTPOOL/$TESTFS
log_must zfs destroy $TESTPOOL/$TESTCLONE
-log_must zfs destroy $TESTPOOL/$TESTFS@snap_a
+log_must zfs destroy $TESTPOOL/$TESTFS@snap_b
# scenario 3
verify_runnable "both"
+# See issue: https://github.com/zfsonlinux/zfs/issues/6136
+if is_linux; then
+ log_unsupported "Test case occasionally fails"
+fi
+
log_assert "Ensure multiple threads performing write appends to the same" \
"ZFS file succeed"
# zfs_threadsappend tries to append to $TESTFILE using threads
# so that the resulting file is $FILE_SIZE bytes in size
#
-log_must $THREADSAPPEND ${TESTDIR}/${TESTFILE}
+log_must threadsappend ${TESTDIR}/${TESTFILE}
#
# Check the size of the resulting file