From 8fe453b6718c089fdd248f45fd0adee26bd42b66 Mon Sep 17 00:00:00 2001 From: cao Date: Tue, 23 Aug 2016 10:12:41 +0800 Subject: [PATCH] Update zfs_destroy_004.ksh script Issues: Under Linux, when executing zfs_destroy_004.ksh destroy $fs is an error. The key issue here is that illumos kernel treats this case differently than the Linux kernel. On illumos you can unmount and destroy a filesystem which is busy and all consumers of it get EIO. On Linux the expected behavior is to prevent the unmount and destroy. Cause analysis: When create $fs file system and mount file system to $mntp. cd $mntp, linux isn't allow to destroy $fs in this mount contents. No matter what destroy with parameters. Solution: So log_mustnot $ZFS destroy $fs is ok. cd $olddir and destroy $fs. Signed-off-by: caoxuewen cao.xuewen@zte.com.cn Signed-off-by: Brian Behlendorf Closes #5012 --- tests/runfiles/linux.run | 7 +++---- .../cli_root/zfs_destroy/zfs_destroy_004_pos.ksh | 12 +++++++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index a4a2686a1..5685ea21b 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -96,7 +96,6 @@ tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos', # DISABLED: # zfs_destroy_001_pos - busy mountpoint behavior -# zfs_destroy_004_pos - busy mountpoint behavior # zfs_destroy_005_neg - busy mountpoint behavior # zfs_destroy_008_pos - busy mountpoint behavior # zfs_destroy_009_pos - busy mountpoint behavior @@ -105,9 +104,9 @@ tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos', # zfs_destroy_012_pos - busy mountpoint behavior # zfs_destroy_013_neg - busy mountpoint behavior [tests/functional/cli_root/zfs_destroy] -tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos', 'zfs_destroy_006_neg', - 'zfs_destroy_007_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos', - 'zfs_destroy_016_pos'] +tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos', 'zfs_destroy_004_pos', + 'zfs_destroy_006_neg', 'zfs_destroy_007_neg', 'zfs_destroy_014_pos', + 'zfs_destroy_015_pos', 'zfs_destroy_016_pos'] # DISABLED: # zfs_get_004_pos - nested pools diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh index 6919bdf42..317e37869 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_004_pos.ksh @@ -110,9 +110,15 @@ for arg in "$fs1 $mntp1" "$clone $mntp2"; do cd $mntp log_mustnot $ZFS destroy $fs - log_must $ZFS destroy -f $fs - datasetexists $fs && \ - log_fail "'zfs destroy -f' fails to destroy busy filesystem." + if is_linux; then + log_mustnot $ZFS destroy -f $fs + datasetnonexists $fs && \ + log_fail "'zfs destroy -f' destroyed busy filesystem." + else + log_must $ZFS destroy -f $fs + datasetexists $fs && \ + log_fail "'zfs destroy -f' fail to destroy busy filesystem." + fi cd $olddir done -- 2.40.0