bzero(labels, sizeof (labels));
+ /*
+ * Check if we were given absolute path and use it as is.
+ * Otherwise if the provided vdev name doesn't point to a file,
+ * try prepending expected disk paths and partition numbers.
+ */
(void) strlcpy(path, dev, sizeof (path));
+ if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
+ int error;
+
+ error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
+ if (error == 0 && zfs_dev_is_whole_disk(path)) {
+ if (zfs_append_partition(path, MAXPATHLEN) == -1)
+ error = ENOENT;
+ }
+
+ if (error || (stat64(path, &statbuf) != 0)) {
+ (void) printf("failed to find device %s, try "
+ "specifying absolute path instead\n", dev);
+ return (1);
+ }
+ }
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
+ if (ioctl(fd, BLKFLSBUF) != 0)
+ (void) printf("failed to invalidate cache '%s' : %s\n", path,
+ strerror(errno));
+
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
}
/*
- * zpool labelclear <vdev>
+ * zpool labelclear [-f] <vdev>
+ *
+ * -f Force clearing the label for the vdevs which are members of
+ * the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
int
zpool_do_labelclear(int argc, char **argv)
{
- char *vdev, *name;
+ char vdev[MAXPATHLEN];
+ char *name = NULL;
+ struct stat st;
int c, fd = -1, ret = 0;
+ nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* get vdev name */
if (argc < 1) {
- (void) fprintf(stderr, gettext("missing vdev device name\n"));
+ (void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
-
- vdev = argv[0];
- if ((fd = open(vdev, O_RDWR)) < 0) {
- (void) fprintf(stderr, gettext("Unable to open %s\n"), vdev);
- return (B_FALSE);
+ if (argc > 1) {
+ (void) fprintf(stderr, gettext("too many arguments\n"));
+ usage(B_FALSE);
}
- name = NULL;
- if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0) {
- if (force)
- goto wipe_label;
+ /*
+ * Check if we were given absolute path and use it as is.
+ * Otherwise if the provided vdev name doesn't point to a file,
+ * try prepending expected disk paths and partition numbers.
+ */
+ (void) strlcpy(vdev, argv[0], sizeof (vdev));
+ if (vdev[0] != '/' && stat(vdev, &st) != 0) {
+ int error;
+
+ error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
+ if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
+ if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
+ error = ENOENT;
+ }
- (void) fprintf(stderr,
- gettext("Unable to determine pool state for %s\n"
- "Use -f to force the clearing any label data\n"), vdev);
+ if (error || (stat(vdev, &st) != 0)) {
+ (void) fprintf(stderr, gettext(
+ "failed to find device %s, try specifying absolute "
+ "path instead\n"), argv[0]);
+ return (1);
+ }
+ }
+ if ((fd = open(vdev, O_RDWR)) < 0) {
+ (void) fprintf(stderr, gettext("failed to open %s: %s\n"),
+ vdev, strerror(errno));
return (1);
}
- if (inuse) {
- switch (state) {
- default:
- case POOL_STATE_ACTIVE:
- case POOL_STATE_SPARE:
- case POOL_STATE_L2CACHE:
- (void) fprintf(stderr,
- gettext("labelclear operation failed.\n"
- "\tVdev %s is a member (%s), of pool \"%s\".\n"
- "\tTo remove label information from this device, "
- "export or destroy\n\tthe pool, or remove %s from "
- "the configuration of this pool\n\tand retry the "
- "labelclear operation.\n"),
- vdev, zpool_pool_state_to_name(state), name, vdev);
- ret = 1;
- goto errout;
+ if (zpool_read_label(fd, &config, NULL) != 0 || config == NULL) {
+ (void) fprintf(stderr,
+ gettext("failed to check state for %s\n"), vdev);
+ return (1);
+ }
+ nvlist_free(config);
- case POOL_STATE_EXPORTED:
- if (force)
- break;
+ ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
+ if (ret != 0) {
+ (void) fprintf(stderr,
+ gettext("failed to check state for %s\n"), vdev);
+ return (1);
+ }
- (void) fprintf(stderr,
- gettext("labelclear operation failed.\n\tVdev "
- "%s is a member of the exported pool \"%s\".\n"
- "\tUse \"zpool labelclear -f %s\" to force the "
- "removal of label\n\tinformation.\n"),
- vdev, name, vdev);
- ret = 1;
- goto errout;
+ if (!inuse)
+ goto wipe_label;
- case POOL_STATE_POTENTIALLY_ACTIVE:
- if (force)
- break;
+ switch (state) {
+ default:
+ case POOL_STATE_ACTIVE:
+ case POOL_STATE_SPARE:
+ case POOL_STATE_L2CACHE:
+ (void) fprintf(stderr, gettext(
+ "%s is a member (%s) of pool \"%s\"\n"),
+ vdev, zpool_pool_state_to_name(state), name);
+ ret = 1;
+ goto errout;
- (void) fprintf(stderr,
- gettext("labelclear operation failed.\n"
- "\tVdev %s is a member of the pool \"%s\".\n"
- "\tThis pool is unknown to this system, but may "
- "be active on\n\tanother system. Use "
- "\'zpool labelclear -f %s\' to force the\n"
- "\tremoval of label information.\n"),
- vdev, name, vdev);
- ret = 1;
- goto errout;
+ case POOL_STATE_EXPORTED:
+ if (force)
+ break;
+ (void) fprintf(stderr, gettext(
+ "use '-f' to override the following error:\n"
+ "%s is a member of exported pool \"%s\"\n"),
+ vdev, name);
+ ret = 1;
+ goto errout;
- case POOL_STATE_DESTROYED:
- /* inuse should never be set for a destroyed pool... */
+ case POOL_STATE_POTENTIALLY_ACTIVE:
+ if (force)
break;
- }
+ (void) fprintf(stderr, gettext(
+ "use '-f' to override the following error:\n"
+ "%s is a member of potentially active pool \"%s\"\n"),
+ vdev, name);
+ ret = 1;
+ goto errout;
+
+ case POOL_STATE_DESTROYED:
+ /* inuse should never be set for a destroyed pool */
+ assert(0);
+ break;
}
wipe_label:
- if (zpool_clear_label(fd) != 0) {
+ ret = zpool_clear_label(fd);
+ if (ret != 0) {
(void) fprintf(stderr,
- gettext("Label clear failed on vdev %s\n"), vdev);
- ret = 1;
+ gettext("failed to clear label for %s\n"), vdev);
}
errout:
- close(fd);
- if (name != NULL)
- free(name);
+ free(name);
+ (void) close(fd);
return (ret);
}
return (error);
}
-/*
- * By "whole disk" we mean an entire physical disk (something we can
- * label, toggle the write cache on, etc.) as opposed to the full
- * capacity of a pseudo-device such as lofi or did. We act as if we
- * are labeling the disk, which should be a pretty good test of whether
- * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
- * it isn't.
- */
-static boolean_t
-is_whole_disk(const char *path)
-{
- struct dk_gpt *label;
- int fd;
-
- if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
- return (B_FALSE);
- if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
- (void) close(fd);
- return (B_FALSE);
- }
- efi_free(label);
- (void) close(fd);
- return (B_TRUE);
-}
-
/*
* This may be a shorthand device path or it could be total gibberish.
* Check to see if it is a known device available in zfs_vdev_paths.
error = zfs_resolve_shortname(arg, path, path_size);
if (error == 0) {
- *wholedisk = is_whole_disk(path);
+ *wholedisk = zfs_dev_is_whole_disk(path);
if (*wholedisk || (stat64(path, statbuf) == 0))
return (0);
}
/*
* Complete device or file path. Exact type is determined by
* examining the file descriptor afterwards. Symbolic links
- * are resolved to their real paths for the is_whole_disk()
+ * are resolved to their real paths to determine whole disk
* and S_ISBLK/S_ISREG type checks. However, we are careful
* to store the given path as ZPOOL_CONFIG_PATH to ensure we
* can leverage udev's persistent device labels.
return (NULL);
}
- wholedisk = is_whole_disk(path);
+ wholedisk = zfs_dev_is_whole_disk(path);
if (!wholedisk && (stat64(path, &statbuf) != 0)) {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
return (NULL);
}
- /* After is_whole_disk() check restore original passed path */
+ /* After whole disk check restore original passed path */
strlcpy(path, arg, sizeof (path));
} else {
err = is_shorthand_path(arg, path, sizeof (path),
tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile
+ tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile
extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *);
int zfs_dev_is_dm(char *dev_name);
+int zfs_dev_is_whole_disk(char *dev_name);
char *zfs_get_underlying_path(char *dev_name);
char *zfs_get_enclosure_sysfs_path(char *dev_name);
* The module will do it for us in vdev_disk_open().
*/
error = efi_use_whole_disk(fd);
+
+ /* Flush the buffers to disk and invalidate the page cache. */
+ (void) fsync(fd);
+ (void) ioctl(fd, BLKFLSBUF);
+
(void) close(fd);
if (error && error != VT_ENOSPC) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to read disk capacity"), path);
return (zfs_error(hdl, EZFS_NOCAP, msg));
}
+
return (0);
}
(void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
strrchr(path, '/'));
- if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
+ if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
struct dk_gpt *vtoc;
if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
struct dk_gpt *vtoc;
int fd, err;
- if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
+ if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (errno);
if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
vtoc->efi_parts[8].p_size = resv;
vtoc->efi_parts[8].p_tag = V_RESERVED;
- if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
- /*
- * Some block drivers (like pcata) may not support EFI
- * GPT labels. Print out a helpful error message dir-
- * ecting the user to manually label the disk and give
- * a specific slice.
- */
+ rval = efi_write(fd, vtoc);
+
+ /* Flush the buffers to disk and invalidate the page cache. */
+ (void) fsync(fd);
+ (void) ioctl(fd, BLKFLSBUF);
+
+ if (rval == 0)
+ rval = efi_rescan(fd);
+
+ /*
+ * Some block drivers (like pcata) may not support EFI GPT labels.
+ * Print out a helpful error message directing the user to manually
+ * label the disk and give a specific slice.
+ */
+ if (rval != 0) {
(void) close(fd);
efi_free(vtoc);
return (1);
}
+/*
+ * By "whole disk" we mean an entire physical disk (something we can
+ * label, toggle the write cache on, etc.) as opposed to the full
+ * capacity of a pseudo-device such as lofi or did. We act as if we
+ * are labeling the disk, which should be a pretty good test of whether
+ * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
+ * it isn't.
+ */
+int
+zfs_dev_is_whole_disk(char *dev_name)
+{
+ struct dk_gpt *label;
+ int fd;
+
+ if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
+ return (0);
+
+ if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
+ (void) close(fd);
+ return (0);
+ }
+
+ efi_free(label);
+ (void) close(fd);
+
+ return (1);
+}
+
/*
* Lookup the underlying device for a device name
*
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos']
+[tests/functional/cli_root/zpool_labelclear]
+tests = ['zpool_labelclear_active', 'zpool_labelclear_exported']
+pre =
+post =
+
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg']
zpool_get \
zpool_history \
zpool_import \
+ zpool_labelclear \
zpool_offline \
zpool_online \
zpool_remove \
log_mustnot zpool add -o ashift="$badval" $disk2
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk1
- log_must zpool labelclear $disk2
+ log_mustnot zpool labelclear $disk2
done
log_pass "zpool add -o ashift=<n>' works with different ashift values"
--- /dev/null
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_labelclear
+dist_pkgdata_SCRIPTS = \
+ labelclear.cfg \
+ zpool_labelclear_active.ksh \
+ zpool_labelclear_exported.ksh
--- /dev/null
+#!/bin/ksh -p
+#
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Nexenta Systems, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+typeset LABELCLEAR="zpool labelclear"
+typeset LABELREAD="zdb -lq"
+
+typeset disks=(${DISKS[*]})
+typeset disk1=${disks[0]}
+typeset disk2=${disks[1]}
+typeset disk3=${disks[2]}
--- /dev/null
+#!/bin/ksh -p
+#
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Nexenta Systems, Inc.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_labelclear/labelclear.cfg
+
+# DESCRIPTION:
+# Check that zpool labelclear will refuse to clear the label
+# (with or without -f) on any vdevs of the imported pool.
+#
+# STRATEGY:
+# 1. Create the pool with log device.
+# 2. Try clearing the label on data and log devices.
+# 3. Add auxilary (cache/spare) vdevs.
+# 4. Try clearing the label on auxilary vdevs.
+# 5. Check that zpool labelclear will return non-zero and
+# labels are intact.
+
+verify_runnable "global"
+
+function cleanup
+{
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+log_assert "zpool labelclear will fail on all vdevs of imported pool"
+
+# Create simple pool, skip any mounts
+log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2
+
+# Check that labelclear [-f] will fail on ACTIVE pool vdevs
+log_mustnot $LABELCLEAR $disk1
+log_must $LABELREAD $disk1
+log_mustnot $LABELCLEAR -f $disk1
+log_must $LABELREAD $disk1
+log_mustnot $LABELCLEAR $disk2
+log_must $LABELREAD $disk2
+log_mustnot $LABELCLEAR -f $disk2
+log_must $LABELREAD $disk2
+
+# Add a cache/spare to the pool, check that labelclear [-f] will fail
+# on the vdev and will succeed once it's removed from pool config
+for vdevtype in "cache" "spare"; do
+ log_must zpool add $TESTPOOL $vdevtype $disk3
+ log_mustnot $LABELCLEAR $disk3
+ log_must $LABELREAD $disk3
+ log_mustnot $LABELCLEAR -f $disk3
+ log_must $LABELREAD $disk3
+ log_must zpool remove $TESTPOOL $disk3
+ log_must $LABELCLEAR $disk3
+ log_mustnot $LABELREAD $disk3
+done
+
+log_pass "zpool labelclear will fail on all vdevs of imported pool"
--- /dev/null
+#!/bin/ksh -p
+#
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2016 Nexenta Systems, Inc.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_labelclear/labelclear.cfg
+
+# DESCRIPTION:
+# Check that zpool labelclear will refuse to clear the label
+# on ACTIVE vdevs of exported pool without -f, and will succeeded with -f.
+#
+# STRATEGY:
+# 1. Create a pool with log device.
+# 2. Export the pool.
+# 3. Check that zpool labelclear returns non-zero when trying to
+# clear the label on ACTIVE vdevs, and succeeds with -f.
+# 4. Add auxilary vdevs (cache/spare).
+# 5. Check that zpool labelclear succeeds on auxilary vdevs of
+# exported pool.
+
+verify_runnable "global"
+
+function cleanup
+{
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_onexit cleanup
+log_assert "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \
+ "succeed with -f"
+
+for vdevtype in "" "cache" "spare"; do
+ # Create simple pool, skip any mounts
+ log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2
+ # Add auxilary vdevs (cache/spare)
+ if [[ -n $vdevtype ]]; then
+ log_must zpool add $TESTPOOL $vdevtype $disk3
+ fi
+ # Export the pool
+ log_must zpool export $TESTPOOL
+
+ # Check that labelclear will fail without -f
+ log_mustnot $LABELCLEAR $disk1
+ log_must $LABELREAD $disk1
+ log_mustnot $LABELCLEAR $disk2
+ log_must $LABELREAD $disk2
+
+ # Check that labelclear will succeed with -f
+ log_must $LABELCLEAR -f $disk1
+ log_mustnot $LABELREAD $disk1
+ log_must $LABELCLEAR -f $disk2
+ log_mustnot $LABELREAD $disk2
+
+ # Check that labelclear on auxilary vdevs will succeed
+ if [[ -n $vdevtype ]]; then
+ log_must $LABELCLEAR $disk3
+ log_mustnot $LABELREAD $disk3
+ fi
+done
+
+log_pass "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \
+ "succeed with -f"
# Clear disk labels
for i in {0..2}
do
- log_must zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
+ zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
done
if is_loop_device $DISK1; then
# Clear disk labels
for i in {0..2}
do
- log_must zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
+ zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
done
setup