4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
35 #include <sys/efi_partition.h>
37 #include <sys/zfs_ioctl.h>
40 #include "zfs_namecheck.h"
42 #include "libzfs_impl.h"
43 #include "zfs_comutil.h"
45 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
47 #define DISK_ROOT "/dev/dsk"
48 #define RDISK_ROOT "/dev/rdsk"
49 #define BACKUP_SLICE "s2"
51 typedef struct prop_flags {
52 int create:1; /* Validate property on creation */
53 int import:1; /* Validate property on import */
57 * ====================================================================
58 * zpool property functions
59 * ====================================================================
63 zpool_get_all_props(zpool_handle_t *zhp)
65 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
66 libzfs_handle_t *hdl = zhp->zpool_hdl;
68 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
70 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74 if (errno == ENOMEM) {
75 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76 zcmd_free_nvlists(&zc);
80 zcmd_free_nvlists(&zc);
85 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86 zcmd_free_nvlists(&zc);
90 zcmd_free_nvlists(&zc);
96 zpool_props_refresh(zpool_handle_t *zhp)
100 old_props = zhp->zpool_props;
102 if (zpool_get_all_props(zhp) != 0)
105 nvlist_free(old_props);
110 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116 zprop_source_t source;
118 nvl = zhp->zpool_props;
119 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
122 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
124 source = ZPROP_SRC_DEFAULT;
125 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
136 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 zprop_source_t source;
142 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
144 * zpool_get_all_props() has most likely failed because
145 * the pool is faulted, but if all we need is the top level
146 * vdev's guid then get it from the zhp config nvlist.
148 if ((prop == ZPOOL_PROP_GUID) &&
149 (nvlist_lookup_nvlist(zhp->zpool_config,
150 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 return (zpool_prop_default_numeric(prop));
158 nvl = zhp->zpool_props;
159 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
162 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
164 source = ZPROP_SRC_DEFAULT;
165 value = zpool_prop_default_numeric(prop);
175 * Map VDEV STATE to printed strings.
178 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 case VDEV_STATE_CLOSED:
182 case VDEV_STATE_OFFLINE:
183 return (gettext("OFFLINE"));
184 case VDEV_STATE_REMOVED:
185 return (gettext("REMOVED"));
186 case VDEV_STATE_CANT_OPEN:
187 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
188 return (gettext("FAULTED"));
189 else if (aux == VDEV_AUX_SPLIT_POOL)
190 return (gettext("SPLIT"));
192 return (gettext("UNAVAIL"));
193 case VDEV_STATE_FAULTED:
194 return (gettext("FAULTED"));
195 case VDEV_STATE_DEGRADED:
196 return (gettext("DEGRADED"));
197 case VDEV_STATE_HEALTHY:
198 return (gettext("ONLINE"));
201 return (gettext("UNKNOWN"));
205 * Get a zpool property value for 'prop' and return the value in
206 * a pre-allocated buffer.
209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210 zprop_source_t *srctype)
214 zprop_source_t src = ZPROP_SRC_NONE;
219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
221 case ZPOOL_PROP_NAME:
222 (void) strlcpy(buf, zpool_get_name(zhp), len);
225 case ZPOOL_PROP_HEALTH:
226 (void) strlcpy(buf, "FAULTED", len);
229 case ZPOOL_PROP_GUID:
230 intval = zpool_get_prop_int(zhp, prop, &src);
231 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
234 case ZPOOL_PROP_ALTROOT:
235 case ZPOOL_PROP_CACHEFILE:
236 if (zhp->zpool_props != NULL ||
237 zpool_get_all_props(zhp) == 0) {
239 zpool_get_prop_string(zhp, prop, &src),
247 (void) strlcpy(buf, "-", len);
256 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
257 prop != ZPOOL_PROP_NAME)
260 switch (zpool_prop_get_type(prop)) {
261 case PROP_TYPE_STRING:
262 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
266 case PROP_TYPE_NUMBER:
267 intval = zpool_get_prop_int(zhp, prop, &src);
270 case ZPOOL_PROP_SIZE:
271 case ZPOOL_PROP_ALLOCATED:
272 case ZPOOL_PROP_FREE:
273 (void) zfs_nicenum(intval, buf, len);
276 case ZPOOL_PROP_CAPACITY:
277 (void) snprintf(buf, len, "%llu%%",
278 (u_longlong_t)intval);
281 case ZPOOL_PROP_DEDUPRATIO:
282 (void) snprintf(buf, len, "%llu.%02llux",
283 (u_longlong_t)(intval / 100),
284 (u_longlong_t)(intval % 100));
287 case ZPOOL_PROP_HEALTH:
288 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
289 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
290 verify(nvlist_lookup_uint64_array(nvroot,
291 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
294 (void) strlcpy(buf, zpool_state_to_name(intval,
298 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
302 case PROP_TYPE_INDEX:
303 intval = zpool_get_prop_int(zhp, prop, &src);
304 if (zpool_prop_index_to_string(prop, intval, &strval)
307 (void) strlcpy(buf, strval, len);
321 * Check if the bootfs name has the same pool name as it is set to.
322 * Assuming bootfs is a valid dataset name.
325 bootfs_name_valid(const char *pool, char *bootfs)
327 int len = strlen(pool);
329 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
332 if (strncmp(pool, bootfs, len) == 0 &&
333 (bootfs[len] == '/' || bootfs[len] == '\0'))
340 * Inspect the configuration to determine if any of the devices contain
344 pool_uses_efi(nvlist_t *config)
349 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
350 &child, &children) != 0)
351 return (read_efi_label(config, NULL) >= 0);
353 for (c = 0; c < children; c++) {
354 if (pool_uses_efi(child[c]))
361 pool_is_bootable(zpool_handle_t *zhp)
363 char bootfs[ZPOOL_MAXNAMELEN];
365 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
366 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
367 sizeof (bootfs)) != 0);
372 * Given an nvlist of zpool properties to be set, validate that they are
373 * correct, and parse any numeric properties (index, boolean, etc) if they are
374 * specified as strings.
377 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
378 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
386 struct stat64 statbuf;
390 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
391 (void) no_memory(hdl);
396 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
397 const char *propname = nvpair_name(elem);
400 * Make sure this property is valid and applies to this type.
402 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
404 "invalid property '%s'"), propname);
405 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
409 if (zpool_prop_readonly(prop)) {
410 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
411 "is readonly"), propname);
412 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
416 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
417 &strval, &intval, errbuf) != 0)
421 * Perform additional checking for specific properties.
424 case ZPOOL_PROP_VERSION:
425 if (intval < version || intval > SPA_VERSION) {
426 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
427 "property '%s' number %d is invalid."),
429 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
434 case ZPOOL_PROP_BOOTFS:
435 if (flags.create || flags.import) {
436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
437 "property '%s' cannot be set at creation "
438 "or import time"), propname);
439 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
443 if (version < SPA_VERSION_BOOTFS) {
444 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
445 "pool must be upgraded to support "
446 "'%s' property"), propname);
447 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
452 * bootfs property value has to be a dataset name and
453 * the dataset has to be in the same pool as it sets to.
455 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
458 "is an invalid name"), strval);
459 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
463 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
465 "could not open pool '%s'"), poolname);
466 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
469 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
470 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
473 * bootfs property cannot be set on a disk which has
476 if (pool_uses_efi(nvroot)) {
477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
478 "property '%s' not supported on "
479 "EFI labeled devices"), propname);
480 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
487 case ZPOOL_PROP_ALTROOT:
488 if (!flags.create && !flags.import) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "property '%s' can only be set during pool "
491 "creation or import"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
496 if (strval[0] != '/') {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 "bad alternate root '%s'"), strval);
499 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
504 case ZPOOL_PROP_CACHEFILE:
505 if (strval[0] == '\0')
508 if (strcmp(strval, "none") == 0)
511 if (strval[0] != '/') {
512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 "property '%s' must be empty, an "
514 "absolute path, or 'none'"), propname);
515 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
519 slash = strrchr(strval, '/');
521 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
522 strcmp(slash, "/..") == 0) {
523 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
524 "'%s' is not a valid file"), strval);
525 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
531 if (strval[0] != '\0' &&
532 (stat64(strval, &statbuf) != 0 ||
533 !S_ISDIR(statbuf.st_mode))) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "'%s' is not a valid directory"),
537 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
544 case ZPOOL_PROP_READONLY:
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
547 "property '%s' can only be set at "
548 "import time"), propname);
549 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
558 nvlist_free(retprops);
563 * Set zpool property : propname=propval.
566 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
568 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
571 nvlist_t *nvl = NULL;
574 prop_flags_t flags = { 0 };
576 (void) snprintf(errbuf, sizeof (errbuf),
577 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
580 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
581 return (no_memory(zhp->zpool_hdl));
583 if (nvlist_add_string(nvl, propname, propval) != 0) {
585 return (no_memory(zhp->zpool_hdl));
588 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
589 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
590 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
599 * Execute the corresponding ioctl() to set this property.
601 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
603 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
608 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
610 zcmd_free_nvlists(&zc);
614 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
616 (void) zpool_props_refresh(zhp);
622 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
624 libzfs_handle_t *hdl = zhp->zpool_hdl;
626 char buf[ZFS_MAXPROPLEN];
628 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
631 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
636 if (entry->pl_prop != ZPROP_INVAL &&
637 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
639 if (strlen(buf) > entry->pl_width)
640 entry->pl_width = strlen(buf);
649 * Don't start the slice at the default block of 34; many storage
650 * devices will use a stripe width of 128k, so start there instead.
652 #define NEW_START_BLOCK 256
655 * Validate the given pool name, optionally putting an extended error message in
659 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
665 ret = pool_namecheck(pool, &why, &what);
668 * The rules for reserved pool names were extended at a later point.
669 * But we need to support users with existing pools that may now be
670 * invalid. So we only check for this expanded set of names during a
671 * create (or import), and only in userland.
673 if (ret == 0 && !isopen &&
674 (strncmp(pool, "mirror", 6) == 0 ||
675 strncmp(pool, "raidz", 5) == 0 ||
676 strncmp(pool, "spare", 5) == 0 ||
677 strcmp(pool, "log") == 0)) {
680 dgettext(TEXT_DOMAIN, "name is reserved"));
688 case NAME_ERR_TOOLONG:
690 dgettext(TEXT_DOMAIN, "name is too long"));
693 case NAME_ERR_INVALCHAR:
695 dgettext(TEXT_DOMAIN, "invalid character "
696 "'%c' in pool name"), what);
699 case NAME_ERR_NOLETTER:
700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701 "name must begin with a letter"));
704 case NAME_ERR_RESERVED:
705 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
706 "name is reserved"));
709 case NAME_ERR_DISKLIKE:
710 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
711 "pool name is reserved"));
714 case NAME_ERR_LEADING_SLASH:
715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
716 "leading slash in name"));
719 case NAME_ERR_EMPTY_COMPONENT:
720 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
721 "empty component in name"));
724 case NAME_ERR_TRAILING_SLASH:
725 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
726 "trailing slash in name"));
729 case NAME_ERR_MULTIPLE_AT:
730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
731 "multiple '@' delimiters in name"));
743 * Open a handle to the given pool, even if the pool is currently in the FAULTED
747 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
753 * Make sure the pool name is valid.
755 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
756 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
757 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
762 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
765 zhp->zpool_hdl = hdl;
766 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
768 if (zpool_refresh_stats(zhp, &missing) != 0) {
774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
775 (void) zfs_error_fmt(hdl, EZFS_NOENT,
776 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
785 * Like the above, but silent on error. Used when iterating over pools (because
786 * the configuration cache may be out of date).
789 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
794 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
797 zhp->zpool_hdl = hdl;
798 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
800 if (zpool_refresh_stats(zhp, &missing) != 0) {
816 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
820 zpool_open(libzfs_handle_t *hdl, const char *pool)
824 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
827 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
828 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
829 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
838 * Close the handle. Simply frees the memory associated with the handle.
841 zpool_close(zpool_handle_t *zhp)
843 if (zhp->zpool_config)
844 nvlist_free(zhp->zpool_config);
845 if (zhp->zpool_old_config)
846 nvlist_free(zhp->zpool_old_config);
847 if (zhp->zpool_props)
848 nvlist_free(zhp->zpool_props);
853 * Return the name of the pool.
856 zpool_get_name(zpool_handle_t *zhp)
858 return (zhp->zpool_name);
863 * Return the state of the pool (ACTIVE or UNAVAILABLE)
866 zpool_get_state(zpool_handle_t *zhp)
868 return (zhp->zpool_state);
872 * Create the named pool, using the provided vdev list. It is assumed
873 * that the consumer has already validated the contents of the nvlist, so we
874 * don't have to worry about error semantics.
877 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
878 nvlist_t *props, nvlist_t *fsprops)
880 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
881 nvlist_t *zc_fsprops = NULL;
882 nvlist_t *zc_props = NULL;
887 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
888 "cannot create '%s'"), pool);
890 if (!zpool_name_valid(hdl, B_FALSE, pool))
891 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
893 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
897 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
899 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
900 SPA_VERSION_1, flags, msg)) == NULL) {
909 zoned = ((nvlist_lookup_string(fsprops,
910 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
911 strcmp(zonestr, "on") == 0);
913 if ((zc_fsprops = zfs_valid_proplist(hdl,
914 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
918 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
921 if (nvlist_add_nvlist(zc_props,
922 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
927 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
930 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
932 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
934 zcmd_free_nvlists(&zc);
935 nvlist_free(zc_props);
936 nvlist_free(zc_fsprops);
941 * This can happen if the user has specified the same
942 * device multiple times. We can't reliably detect this
943 * until we try to add it and see we already have a
946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
947 "one or more vdevs refer to the same device"));
948 return (zfs_error(hdl, EZFS_BADDEV, msg));
952 * This occurs when one of the devices is below
953 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
954 * device was the problem device since there's no
955 * reliable way to determine device size from userland.
960 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
962 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
963 "one or more devices is less than the "
964 "minimum size (%s)"), buf);
966 return (zfs_error(hdl, EZFS_BADDEV, msg));
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "one or more devices is out of space"));
971 return (zfs_error(hdl, EZFS_BADDEV, msg));
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "cache device must be a disk or disk slice"));
976 return (zfs_error(hdl, EZFS_BADDEV, msg));
979 return (zpool_standard_error(hdl, errno, msg));
984 * If this is an alternate root pool, then we automatically set the
985 * mountpoint of the root dataset to be '/'.
987 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
991 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
992 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
999 zcmd_free_nvlists(&zc);
1000 nvlist_free(zc_props);
1001 nvlist_free(zc_fsprops);
1006 * Destroy the given pool. It is up to the caller to ensure that there are no
1007 * datasets left in the pool.
1010 zpool_destroy(zpool_handle_t *zhp)
1012 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1013 zfs_handle_t *zfp = NULL;
1014 libzfs_handle_t *hdl = zhp->zpool_hdl;
1017 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1018 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1021 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1023 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1024 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1025 "cannot destroy '%s'"), zhp->zpool_name);
1027 if (errno == EROFS) {
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "one or more devices is read only"));
1030 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1032 (void) zpool_standard_error(hdl, errno, msg);
1041 remove_mountpoint(zfp);
1049 * Add the given vdevs to the pool. The caller must have already performed the
1050 * necessary verification to ensure that the vdev specification is well-formed.
1053 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1055 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1057 libzfs_handle_t *hdl = zhp->zpool_hdl;
1059 nvlist_t **spares, **l2cache;
1060 uint_t nspares, nl2cache;
1062 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1063 "cannot add to '%s'"), zhp->zpool_name);
1065 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1066 SPA_VERSION_SPARES &&
1067 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1068 &spares, &nspares) == 0) {
1069 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1070 "upgraded to add hot spares"));
1071 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1074 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1075 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1078 for (s = 0; s < nspares; s++) {
1081 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1082 &path) == 0 && pool_uses_efi(spares[s])) {
1083 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1084 "device '%s' contains an EFI label and "
1085 "cannot be used on root pools."),
1086 zpool_vdev_name(hdl, NULL, spares[s],
1088 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1093 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1094 SPA_VERSION_L2CACHE &&
1095 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1096 &l2cache, &nl2cache) == 0) {
1097 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1098 "upgraded to add cache devices"));
1099 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1102 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1104 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1106 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1110 * This can happen if the user has specified the same
1111 * device multiple times. We can't reliably detect this
1112 * until we try to add it and see we already have a
1115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1116 "one or more vdevs refer to the same device"));
1117 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1122 * This occurrs when one of the devices is below
1123 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1124 * device was the problem device since there's no
1125 * reliable way to determine device size from userland.
1130 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1132 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1133 "device is less than the minimum "
1136 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1141 "pool must be upgraded to add these vdevs"));
1142 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1146 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1147 "root pool can not have multiple vdevs"
1148 " or separate logs"));
1149 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1154 "cache device must be a disk or disk slice"));
1155 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1159 (void) zpool_standard_error(hdl, errno, msg);
1167 zcmd_free_nvlists(&zc);
1173 * Exports the pool from the system. The caller must ensure that there are no
1174 * mounted datasets in the pool.
1177 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1179 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1182 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1183 "cannot export '%s'"), zhp->zpool_name);
1185 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1186 zc.zc_cookie = force;
1187 zc.zc_guid = hardforce;
1189 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1192 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1193 "use '-f' to override the following errors:\n"
1194 "'%s' has an active shared spare which could be"
1195 " used by other pools once '%s' is exported."),
1196 zhp->zpool_name, zhp->zpool_name);
1197 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1200 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1209 zpool_export(zpool_handle_t *zhp, boolean_t force)
1211 return (zpool_export_common(zhp, force, B_FALSE));
1215 zpool_export_force(zpool_handle_t *zhp)
1217 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1221 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1224 nvlist_t *nv = NULL;
1230 if (!hdl->libzfs_printerr || config == NULL)
1233 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
1236 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1238 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1240 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1241 strftime(timestr, 128, "%c", &t) != 0) {
1243 (void) printf(dgettext(TEXT_DOMAIN,
1244 "Would be able to return %s "
1245 "to its state as of %s.\n"),
1248 (void) printf(dgettext(TEXT_DOMAIN,
1249 "Pool %s returned to its state as of %s.\n"),
1253 (void) printf(dgettext(TEXT_DOMAIN,
1254 "%s approximately %lld "),
1255 dryrun ? "Would discard" : "Discarded",
1256 ((longlong_t)loss + 30) / 60);
1257 (void) printf(dgettext(TEXT_DOMAIN,
1258 "minutes of transactions.\n"));
1259 } else if (loss > 0) {
1260 (void) printf(dgettext(TEXT_DOMAIN,
1261 "%s approximately %lld "),
1262 dryrun ? "Would discard" : "Discarded",
1264 (void) printf(dgettext(TEXT_DOMAIN,
1265 "seconds of transactions.\n"));
1271 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1274 nvlist_t *nv = NULL;
1276 uint64_t edata = UINT64_MAX;
1281 if (!hdl->libzfs_printerr)
1285 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1287 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1289 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1290 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1291 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1294 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1295 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1298 (void) printf(dgettext(TEXT_DOMAIN,
1299 "Recovery is possible, but will result in some data loss.\n"));
1301 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1302 strftime(timestr, 128, "%c", &t) != 0) {
1303 (void) printf(dgettext(TEXT_DOMAIN,
1304 "\tReturning the pool to its state as of %s\n"
1305 "\tshould correct the problem. "),
1308 (void) printf(dgettext(TEXT_DOMAIN,
1309 "\tReverting the pool to an earlier state "
1310 "should correct the problem.\n\t"));
1314 (void) printf(dgettext(TEXT_DOMAIN,
1315 "Approximately %lld minutes of data\n"
1316 "\tmust be discarded, irreversibly. "),
1317 ((longlong_t)loss + 30) / 60);
1318 } else if (loss > 0) {
1319 (void) printf(dgettext(TEXT_DOMAIN,
1320 "Approximately %lld seconds of data\n"
1321 "\tmust be discarded, irreversibly. "),
1324 if (edata != 0 && edata != UINT64_MAX) {
1326 (void) printf(dgettext(TEXT_DOMAIN,
1327 "After rewind, at least\n"
1328 "\tone persistent user-data error will remain. "));
1330 (void) printf(dgettext(TEXT_DOMAIN,
1331 "After rewind, several\n"
1332 "\tpersistent user-data errors will remain. "));
1335 (void) printf(dgettext(TEXT_DOMAIN,
1336 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1337 reason >= 0 ? "clear" : "import", name);
1339 (void) printf(dgettext(TEXT_DOMAIN,
1340 "A scrub of the pool\n"
1341 "\tis strongly recommended after recovery.\n"));
1345 (void) printf(dgettext(TEXT_DOMAIN,
1346 "Destroy and re-create the pool from\n\ta backup source.\n"));
1350 * zpool_import() is a contracted interface. Should be kept the same
1353 * Applications should use zpool_import_props() to import a pool with
1354 * new properties value to be set.
1357 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1360 nvlist_t *props = NULL;
1363 if (altroot != NULL) {
1364 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1365 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1366 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1370 if (nvlist_add_string(props,
1371 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1372 nvlist_add_string(props,
1373 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1375 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1376 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1381 ret = zpool_import_props(hdl, config, newname, props,
1389 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1395 uint64_t is_log = 0;
1397 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1401 (void) printf("\t%*s%s%s\n", indent, "", name,
1402 is_log ? " [log]" : "");
1404 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1405 &child, &children) != 0)
1408 for (c = 0; c < children; c++) {
1409 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1410 print_vdev_tree(hdl, vname, child[c], indent + 2);
1416 * Import the given pool using the known configuration and a list of
1417 * properties to be set. The configuration should have come from
1418 * zpool_find_import(). The 'newname' parameters control whether the pool
1419 * is imported with a different name.
1422 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1423 nvlist_t *props, int flags)
1425 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1426 zpool_rewind_policy_t policy;
1427 nvlist_t *nv = NULL;
1428 nvlist_t *nvinfo = NULL;
1429 nvlist_t *missing = NULL;
1436 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1439 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1440 "cannot import pool '%s'"), origname);
1442 if (newname != NULL) {
1443 if (!zpool_name_valid(hdl, B_FALSE, newname))
1444 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1445 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1447 thename = (char *)newname;
1454 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1456 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1459 if ((props = zpool_valid_proplist(hdl, origname,
1460 props, version, flags, errbuf)) == NULL) {
1462 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1468 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1470 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1473 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1477 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1482 zc.zc_cookie = flags;
1483 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1485 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1486 zcmd_free_nvlists(&zc);
1493 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1494 zpool_get_rewind_policy(config, &policy);
1500 * Dry-run failed, but we print out what success
1501 * looks like if we found a best txg
1503 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1504 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1510 if (newname == NULL)
1511 (void) snprintf(desc, sizeof (desc),
1512 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1515 (void) snprintf(desc, sizeof (desc),
1516 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1522 * Unsupported version.
1524 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1528 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1533 "one or more devices is read only"));
1534 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1538 if (nv && nvlist_lookup_nvlist(nv,
1539 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1540 nvlist_lookup_nvlist(nvinfo,
1541 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1542 (void) printf(dgettext(TEXT_DOMAIN,
1543 "The devices below are missing, use "
1544 "'-m' to import the pool anyway:\n"));
1545 print_vdev_tree(hdl, NULL, missing, 2);
1546 (void) printf("\n");
1548 (void) zpool_standard_error(hdl, error, desc);
1552 (void) zpool_standard_error(hdl, error, desc);
1556 (void) zpool_standard_error(hdl, error, desc);
1557 zpool_explain_recover(hdl,
1558 newname ? origname : thename, -error, nv);
1565 zpool_handle_t *zhp;
1568 * This should never fail, but play it safe anyway.
1570 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1572 else if (zhp != NULL)
1574 if (policy.zrp_request &
1575 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1576 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1577 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1583 zcmd_free_nvlists(&zc);
1593 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1595 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1597 libzfs_handle_t *hdl = zhp->zpool_hdl;
1599 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1600 zc.zc_cookie = func;
1602 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1603 (errno == ENOENT && func != POOL_SCAN_NONE))
1606 if (func == POOL_SCAN_SCRUB) {
1607 (void) snprintf(msg, sizeof (msg),
1608 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1609 } else if (func == POOL_SCAN_NONE) {
1610 (void) snprintf(msg, sizeof (msg),
1611 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1614 assert(!"unexpected result");
1617 if (errno == EBUSY) {
1619 pool_scan_stat_t *ps = NULL;
1622 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1623 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1624 (void) nvlist_lookup_uint64_array(nvroot,
1625 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1626 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1627 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1629 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1630 } else if (errno == ENOENT) {
1631 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1633 return (zpool_standard_error(hdl, errno, msg));
1638 * This provides a very minimal check whether a given string is likely a
1639 * c#t#d# style string. Users of this are expected to do their own
1640 * verification of the s# part.
1642 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1645 * More elaborate version for ones which may start with "/dev/dsk/"
1649 ctd_check_path(char *str) {
1651 * If it starts with a slash, check the last component.
1653 if (str && str[0] == '/') {
1654 char *tmp = strrchr(str, '/');
1657 * If it ends in "/old", check the second-to-last
1658 * component of the string instead.
1660 if (tmp != str && strcmp(tmp, "/old") == 0) {
1661 for (tmp--; *tmp != '/'; tmp--)
1666 return (CTD_CHECK(str));
1670 * Find a vdev that matches the search criteria specified. We use the
1671 * the nvpair name to determine how we should look for the device.
1672 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1673 * spare; but FALSE if its an INUSE spare.
1676 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1677 boolean_t *l2cache, boolean_t *log)
1684 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1686 /* Nothing to look for */
1687 if (search == NULL || pair == NULL)
1690 /* Obtain the key we will use to search */
1691 srchkey = nvpair_name(pair);
1693 switch (nvpair_type(pair)) {
1694 case DATA_TYPE_UINT64:
1695 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1696 uint64_t srchval, theguid;
1698 verify(nvpair_value_uint64(pair, &srchval) == 0);
1699 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1701 if (theguid == srchval)
1706 case DATA_TYPE_STRING: {
1707 char *srchval, *val;
1709 verify(nvpair_value_string(pair, &srchval) == 0);
1710 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1714 * Search for the requested value. Special cases:
1716 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1717 * "s0" or "s0/old". The "s0" part is hidden from the user,
1718 * but included in the string, so this matches around it.
1719 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1721 * Otherwise, all other searches are simple string compares.
1723 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1724 ctd_check_path(val)) {
1725 uint64_t wholedisk = 0;
1727 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1730 int slen = strlen(srchval);
1731 int vlen = strlen(val);
1733 if (slen != vlen - 2)
1737 * make_leaf_vdev() should only set
1738 * wholedisk for ZPOOL_CONFIG_PATHs which
1739 * will include "/dev/dsk/", giving plenty of
1740 * room for the indices used next.
1745 * strings identical except trailing "s0"
1747 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1748 strncmp(srchval, val, slen) == 0)
1752 * strings identical except trailing "s0/old"
1754 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1755 strcmp(&srchval[slen - 4], "/old") == 0 &&
1756 strncmp(srchval, val, slen - 4) == 0)
1761 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1762 char *type, *idx, *end, *p;
1763 uint64_t id, vdev_id;
1766 * Determine our vdev type, keeping in mind
1767 * that the srchval is composed of a type and
1768 * vdev id pair (i.e. mirror-4).
1770 if ((type = strdup(srchval)) == NULL)
1773 if ((p = strrchr(type, '-')) == NULL) {
1781 * If the types don't match then keep looking.
1783 if (strncmp(val, type, strlen(val)) != 0) {
1788 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1789 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1790 strncmp(type, VDEV_TYPE_MIRROR,
1791 strlen(VDEV_TYPE_MIRROR)) == 0);
1792 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1796 vdev_id = strtoull(idx, &end, 10);
1803 * Now verify that we have the correct vdev id.
1812 if (strcmp(srchval, val) == 0)
1821 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1822 &child, &children) != 0)
1825 for (c = 0; c < children; c++) {
1826 if ((ret = vdev_to_nvlist_iter(child[c], search,
1827 avail_spare, l2cache, NULL)) != NULL) {
1829 * The 'is_log' value is only set for the toplevel
1830 * vdev, not the leaf vdevs. So we always lookup the
1831 * log device from the root of the vdev tree (where
1832 * 'log' is non-NULL).
1835 nvlist_lookup_uint64(child[c],
1836 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1844 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1845 &child, &children) == 0) {
1846 for (c = 0; c < children; c++) {
1847 if ((ret = vdev_to_nvlist_iter(child[c], search,
1848 avail_spare, l2cache, NULL)) != NULL) {
1849 *avail_spare = B_TRUE;
1855 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1856 &child, &children) == 0) {
1857 for (c = 0; c < children; c++) {
1858 if ((ret = vdev_to_nvlist_iter(child[c], search,
1859 avail_spare, l2cache, NULL)) != NULL) {
1870 * Given a physical path (minus the "/devices" prefix), find the
1874 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1875 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1877 nvlist_t *search, *nvroot, *ret;
1879 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1880 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1882 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1885 *avail_spare = B_FALSE;
1889 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1890 nvlist_free(search);
1896 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1899 zpool_vdev_is_interior(const char *name)
1901 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1902 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1908 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1909 boolean_t *l2cache, boolean_t *log)
1911 char buf[MAXPATHLEN];
1913 nvlist_t *nvroot, *search, *ret;
1916 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1918 guid = strtoull(path, &end, 10);
1919 if (guid != 0 && *end == '\0') {
1920 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1921 } else if (zpool_vdev_is_interior(path)) {
1922 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1923 } else if (path[0] != '/') {
1924 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1925 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1927 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1930 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1933 *avail_spare = B_FALSE;
1937 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1938 nvlist_free(search);
1944 vdev_online(nvlist_t *nv)
1948 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1949 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1950 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1957 * Helper function for zpool_get_physpaths().
1960 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1961 size_t *bytes_written)
1963 size_t bytes_left, pos, rsz;
1967 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1969 return (EZFS_NODEVICE);
1971 pos = *bytes_written;
1972 bytes_left = physpath_size - pos;
1973 format = (pos == 0) ? "%s" : " %s";
1975 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1976 *bytes_written += rsz;
1978 if (rsz >= bytes_left) {
1979 /* if physpath was not copied properly, clear it */
1980 if (bytes_left != 0) {
1983 return (EZFS_NOSPC);
1989 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1990 size_t *rsz, boolean_t is_spare)
1995 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1996 return (EZFS_INVALCONFIG);
1998 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2000 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2001 * For a spare vdev, we only want to boot from the active
2006 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2009 return (EZFS_INVALCONFIG);
2012 if (vdev_online(nv)) {
2013 if ((ret = vdev_get_one_physpath(nv, physpath,
2014 phypath_size, rsz)) != 0)
2017 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2018 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2019 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2024 if (nvlist_lookup_nvlist_array(nv,
2025 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2026 return (EZFS_INVALCONFIG);
2028 for (i = 0; i < count; i++) {
2029 ret = vdev_get_physpaths(child[i], physpath,
2030 phypath_size, rsz, is_spare);
2031 if (ret == EZFS_NOSPC)
2036 return (EZFS_POOL_INVALARG);
2040 * Get phys_path for a root pool config.
2041 * Return 0 on success; non-zero on failure.
2044 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2047 nvlist_t *vdev_root;
2054 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2056 return (EZFS_INVALCONFIG);
2058 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2059 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2060 &child, &count) != 0)
2061 return (EZFS_INVALCONFIG);
2064 * root pool can not have EFI labeled disks and can only have
2065 * a single top-level vdev.
2067 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2068 pool_uses_efi(vdev_root))
2069 return (EZFS_POOL_INVALARG);
2071 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2074 /* No online devices */
2076 return (EZFS_NODEVICE);
2082 * Get phys_path for a root pool
2083 * Return 0 on success; non-zero on failure.
2086 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2088 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2093 * If the device has being dynamically expanded then we need to relabel
2094 * the disk to use the new unallocated space.
2097 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2099 char path[MAXPATHLEN];
2102 int (*_efi_use_whole_disk)(int);
2104 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2105 "efi_use_whole_disk")) == NULL)
2108 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2110 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2111 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2112 "relabel '%s': unable to open device"), name);
2113 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2117 * It's possible that we might encounter an error if the device
2118 * does not have any unallocated space left. If so, we simply
2119 * ignore that error and continue on.
2121 error = _efi_use_whole_disk(fd);
2123 if (error && error != VT_ENOSPC) {
2124 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2125 "relabel '%s': unable to read disk capacity"), name);
2126 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2132 * Bring the specified vdev online. The 'flags' parameter is a set of the
2133 * ZFS_ONLINE_* flags.
2136 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2137 vdev_state_t *newstate)
2139 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2142 boolean_t avail_spare, l2cache, islog;
2143 libzfs_handle_t *hdl = zhp->zpool_hdl;
2145 if (flags & ZFS_ONLINE_EXPAND) {
2146 (void) snprintf(msg, sizeof (msg),
2147 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2149 (void) snprintf(msg, sizeof (msg),
2150 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2153 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2154 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2156 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2158 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2161 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2163 if (flags & ZFS_ONLINE_EXPAND ||
2164 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2165 char *pathname = NULL;
2166 uint64_t wholedisk = 0;
2168 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2170 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2174 * XXX - L2ARC 1.0 devices can't support expansion.
2177 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2178 "cannot expand cache devices"));
2179 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2183 pathname += strlen(DISK_ROOT) + 1;
2184 (void) zpool_relabel_disk(hdl, pathname);
2188 zc.zc_cookie = VDEV_STATE_ONLINE;
2191 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2192 if (errno == EINVAL) {
2193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2194 "from this pool into a new one. Use '%s' "
2195 "instead"), "zpool detach");
2196 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2198 return (zpool_standard_error(hdl, errno, msg));
2201 *newstate = zc.zc_cookie;
2206 * Take the specified vdev offline
2209 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2211 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2214 boolean_t avail_spare, l2cache;
2215 libzfs_handle_t *hdl = zhp->zpool_hdl;
2217 (void) snprintf(msg, sizeof (msg),
2218 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2220 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2221 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2223 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2225 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2228 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2230 zc.zc_cookie = VDEV_STATE_OFFLINE;
2231 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2233 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2240 * There are no other replicas of this device.
2242 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2246 * The log device has unplayed logs
2248 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2251 return (zpool_standard_error(hdl, errno, msg));
2256 * Mark the given vdev faulted.
2259 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2261 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2263 libzfs_handle_t *hdl = zhp->zpool_hdl;
2265 (void) snprintf(msg, sizeof (msg),
2266 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2268 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2270 zc.zc_cookie = VDEV_STATE_FAULTED;
2273 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2280 * There are no other replicas of this device.
2282 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2285 return (zpool_standard_error(hdl, errno, msg));
2291 * Mark the given vdev degraded.
2294 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2296 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2298 libzfs_handle_t *hdl = zhp->zpool_hdl;
2300 (void) snprintf(msg, sizeof (msg),
2301 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2303 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2305 zc.zc_cookie = VDEV_STATE_DEGRADED;
2308 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2311 return (zpool_standard_error(hdl, errno, msg));
2315 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2319 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2325 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2327 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2330 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2331 children == 2 && child[which] == tgt)
2334 for (c = 0; c < children; c++)
2335 if (is_replacing_spare(child[c], tgt, which))
2343 * Attach new_disk (fully described by nvroot) to old_disk.
2344 * If 'replacing' is specified, the new disk will replace the old one.
2347 zpool_vdev_attach(zpool_handle_t *zhp,
2348 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2350 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2354 boolean_t avail_spare, l2cache, islog;
2359 nvlist_t *config_root;
2360 libzfs_handle_t *hdl = zhp->zpool_hdl;
2361 boolean_t rootpool = pool_is_bootable(zhp);
2364 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2365 "cannot replace %s with %s"), old_disk, new_disk);
2367 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2368 "cannot attach %s to %s"), new_disk, old_disk);
2371 * If this is a root pool, make sure that we're not attaching an
2372 * EFI labeled device.
2374 if (rootpool && pool_uses_efi(nvroot)) {
2375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2376 "EFI labeled devices are not supported on root pools."));
2377 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2380 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2381 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2383 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2386 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2389 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2391 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2392 zc.zc_cookie = replacing;
2394 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2395 &child, &children) != 0 || children != 1) {
2396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2397 "new device must be a single disk"));
2398 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2401 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2402 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2404 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2408 * If the target is a hot spare that has been swapped in, we can only
2409 * replace it with another hot spare.
2412 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2413 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2414 NULL) == NULL || !avail_spare) &&
2415 is_replacing_spare(config_root, tgt, 1)) {
2416 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2417 "can only be replaced by another hot spare"));
2419 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2424 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2427 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2429 zcmd_free_nvlists(&zc);
2434 * XXX need a better way to prevent user from
2435 * booting up a half-baked vdev.
2437 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2438 "sure to wait until resilver is done "
2439 "before rebooting.\n"));
2447 * Can't attach to or replace this type of vdev.
2450 uint64_t version = zpool_get_prop_int(zhp,
2451 ZPOOL_PROP_VERSION, NULL);
2454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2455 "cannot replace a log with a spare"));
2456 else if (version >= SPA_VERSION_MULTI_REPLACE)
2457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2458 "already in replacing/spare config; wait "
2459 "for completion or use 'zpool detach'"));
2461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2462 "cannot replace a replacing device"));
2464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2465 "can only attach to mirrors and top-level "
2468 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2473 * The new device must be a single disk.
2475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2476 "new device must be a single disk"));
2477 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2483 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2488 * The new device is too small.
2490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2491 "device is too small"));
2492 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2497 * The new device has a different alignment requirement.
2499 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2500 "devices have different sector alignment"));
2501 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2506 * The resulting top-level vdev spec won't fit in the label.
2508 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2512 (void) zpool_standard_error(hdl, errno, msg);
2519 * Detach the specified device.
2522 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2524 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2527 boolean_t avail_spare, l2cache;
2528 libzfs_handle_t *hdl = zhp->zpool_hdl;
2530 (void) snprintf(msg, sizeof (msg),
2531 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2533 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2534 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2536 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2539 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2542 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2544 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2546 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2553 * Can't detach from this type of vdev.
2555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2556 "applicable to mirror and replacing vdevs"));
2557 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2562 * There are no other replicas of this device.
2564 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2568 (void) zpool_standard_error(hdl, errno, msg);
2575 * Find a mirror vdev in the source nvlist.
2577 * The mchild array contains a list of disks in one of the top-level mirrors
2578 * of the source pool. The schild array contains a list of disks that the
2579 * user specified on the command line. We loop over the mchild array to
2580 * see if any entry in the schild array matches.
2582 * If a disk in the mchild array is found in the schild array, we return
2583 * the index of that entry. Otherwise we return -1.
2586 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2587 nvlist_t **schild, uint_t schildren)
2591 for (mc = 0; mc < mchildren; mc++) {
2593 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2594 mchild[mc], B_FALSE);
2596 for (sc = 0; sc < schildren; sc++) {
2597 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2598 schild[sc], B_FALSE);
2599 boolean_t result = (strcmp(mpath, spath) == 0);
2615 * Split a mirror pool. If newroot points to null, then a new nvlist
2616 * is generated and it is the responsibility of the caller to free it.
2619 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2620 nvlist_t *props, splitflags_t flags)
2622 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2624 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2625 nvlist_t **varray = NULL, *zc_props = NULL;
2626 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2627 libzfs_handle_t *hdl = zhp->zpool_hdl;
2629 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2632 (void) snprintf(msg, sizeof (msg),
2633 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2635 if (!zpool_name_valid(hdl, B_FALSE, newname))
2636 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2638 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2639 (void) fprintf(stderr, gettext("Internal error: unable to "
2640 "retrieve pool configuration\n"));
2644 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2646 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2649 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2650 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2651 props, vers, flags, msg)) == NULL)
2655 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2658 "Source pool is missing vdev tree"));
2660 nvlist_free(zc_props);
2664 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2667 if (*newroot == NULL ||
2668 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2669 &newchild, &newchildren) != 0)
2672 for (c = 0; c < children; c++) {
2673 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2675 nvlist_t **mchild, *vdev;
2680 * Unlike cache & spares, slogs are stored in the
2681 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2683 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2685 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2687 if (is_log || is_hole) {
2689 * Create a hole vdev and put it in the config.
2691 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2693 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2694 VDEV_TYPE_HOLE) != 0)
2696 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2701 varray[vcount++] = vdev;
2705 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2707 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2708 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2709 "Source pool must be composed only of mirrors\n"));
2710 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2714 verify(nvlist_lookup_nvlist_array(child[c],
2715 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2717 /* find or add an entry for this top-level vdev */
2718 if (newchildren > 0 &&
2719 (entry = find_vdev_entry(zhp, mchild, mchildren,
2720 newchild, newchildren)) >= 0) {
2721 /* We found a disk that the user specified. */
2722 vdev = mchild[entry];
2725 /* User didn't specify a disk for this vdev. */
2726 vdev = mchild[mchildren - 1];
2729 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2733 /* did we find every disk the user specified? */
2734 if (found != newchildren) {
2735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2736 "include at most one disk from each mirror"));
2737 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2741 /* Prepare the nvlist for populating. */
2742 if (*newroot == NULL) {
2743 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2746 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2747 VDEV_TYPE_ROOT) != 0)
2750 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2753 /* Add all the children we found */
2754 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2755 lastlog == 0 ? vcount : lastlog) != 0)
2759 * If we're just doing a dry run, exit now with success.
2762 memory_err = B_FALSE;
2767 /* now build up the config list & call the ioctl */
2768 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2771 if (nvlist_add_nvlist(newconfig,
2772 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2773 nvlist_add_string(newconfig,
2774 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2775 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2779 * The new pool is automatically part of the namespace unless we
2780 * explicitly export it.
2783 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2784 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2785 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2786 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2788 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2791 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2792 retval = zpool_standard_error(hdl, errno, msg);
2797 memory_err = B_FALSE;
2800 if (varray != NULL) {
2803 for (v = 0; v < vcount; v++)
2804 nvlist_free(varray[v]);
2807 zcmd_free_nvlists(&zc);
2809 nvlist_free(zc_props);
2811 nvlist_free(newconfig);
2813 nvlist_free(*newroot);
2821 return (no_memory(hdl));
2827 * Remove the given device. Currently, this is supported only for hot spares
2828 * and level 2 cache devices.
2831 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2833 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2836 boolean_t avail_spare, l2cache, islog;
2837 libzfs_handle_t *hdl = zhp->zpool_hdl;
2840 (void) snprintf(msg, sizeof (msg),
2841 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2843 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2844 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2846 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2848 * XXX - this should just go away.
2850 if (!avail_spare && !l2cache && !islog) {
2851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2852 "only inactive hot spares, cache, top-level, "
2853 "or log devices can be removed"));
2854 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2857 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2858 if (islog && version < SPA_VERSION_HOLES) {
2859 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2860 "pool must be upgrade to support log removal"));
2861 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2864 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2866 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2869 return (zpool_standard_error(hdl, errno, msg));
2873 * Clear the errors for the pool, or the particular device if specified.
2876 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2878 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2881 zpool_rewind_policy_t policy;
2882 boolean_t avail_spare, l2cache;
2883 libzfs_handle_t *hdl = zhp->zpool_hdl;
2884 nvlist_t *nvi = NULL;
2888 (void) snprintf(msg, sizeof (msg),
2889 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2892 (void) snprintf(msg, sizeof (msg),
2893 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2896 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2898 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2899 &l2cache, NULL)) == 0)
2900 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2903 * Don't allow error clearing for hot spares. Do allow
2904 * error clearing for l2cache devices.
2907 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2909 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2913 zpool_get_rewind_policy(rewindnvl, &policy);
2914 zc.zc_cookie = policy.zrp_request;
2916 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
2919 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
2922 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2924 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2925 zcmd_free_nvlists(&zc);
2930 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2931 errno != EPERM && errno != EACCES)) {
2932 if (policy.zrp_request &
2933 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2934 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2935 zpool_rewind_exclaim(hdl, zc.zc_name,
2936 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2940 zcmd_free_nvlists(&zc);
2944 zcmd_free_nvlists(&zc);
2945 return (zpool_standard_error(hdl, errno, msg));
2949 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2952 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2954 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2956 libzfs_handle_t *hdl = zhp->zpool_hdl;
2958 (void) snprintf(msg, sizeof (msg),
2959 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2960 (u_longlong_t)guid);
2962 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2964 zc.zc_cookie = ZPOOL_NO_REWIND;
2966 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2969 return (zpool_standard_error(hdl, errno, msg));
2973 * Convert from a devid string to a path.
2976 devid_to_path(char *devid_str)
2981 devid_nmlist_t *list = NULL;
2984 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2987 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2989 devid_str_free(minor);
2995 if ((path = strdup(list[0].devname)) == NULL)
2998 devid_free_nmlist(list);
3004 * Convert from a path to a devid string.
3007 path_to_devid(const char *path)
3013 if ((fd = open(path, O_RDONLY)) < 0)
3018 if (devid_get(fd, &devid) == 0) {
3019 if (devid_get_minor_name(fd, &minor) == 0)
3020 ret = devid_str_encode(devid, minor);
3022 devid_str_free(minor);
3031 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3032 * ignore any failure here, since a common case is for an unprivileged user to
3033 * type 'zpool status', and we'll display the correct information anyway.
3036 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3038 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3040 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3041 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3042 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3045 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3049 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3050 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3051 * We also check if this is a whole disk, in which case we strip off the
3052 * trailing 's0' slice name.
3054 * This routine is also responsible for identifying when disks have been
3055 * reconfigured in a new location. The kernel will have opened the device by
3056 * devid, but the path will still refer to the old location. To catch this, we
3057 * first do a path -> devid translation (which is fast for the common case). If
3058 * the devid matches, we're done. If not, we do a reverse devid -> path
3059 * translation and issue the appropriate ioctl() to update the path of the vdev.
3060 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3064 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3073 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3075 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3077 (void) snprintf(buf, sizeof (buf), "%llu",
3078 (u_longlong_t)value);
3080 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3083 * If the device is dead (faulted, offline, etc) then don't
3084 * bother opening it. Otherwise we may be forcing the user to
3085 * open a misbehaving device, which can have undesirable
3088 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3089 (uint64_t **)&vs, &vsc) != 0 ||
3090 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3092 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3094 * Determine if the current path is correct.
3096 char *newdevid = path_to_devid(path);
3098 if (newdevid == NULL ||
3099 strcmp(devid, newdevid) != 0) {
3102 if ((newpath = devid_to_path(devid)) != NULL) {
3104 * Update the path appropriately.
3106 set_path(zhp, nv, newpath);
3107 if (nvlist_add_string(nv,
3108 ZPOOL_CONFIG_PATH, newpath) == 0)
3109 verify(nvlist_lookup_string(nv,
3117 devid_str_free(newdevid);
3120 if (strncmp(path, "/dev/dsk/", 9) == 0)
3123 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3124 &value) == 0 && value) {
3125 int pathlen = strlen(path);
3126 char *tmp = zfs_strdup(hdl, path);
3129 * If it starts with c#, and ends with "s0", chop
3130 * the "s0" off, or if it ends with "s0/old", remove
3131 * the "s0" from the middle.
3133 if (CTD_CHECK(tmp)) {
3134 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3135 tmp[pathlen - 2] = '\0';
3136 } else if (pathlen > 6 &&
3137 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3138 (void) strcpy(&tmp[pathlen - 6],
3145 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3148 * If it's a raidz device, we need to stick in the parity level.
3150 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3151 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3153 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3154 (u_longlong_t)value);
3159 * We identify each top-level vdev by using a <type-id>
3160 * naming convention.
3165 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3167 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3173 return (zfs_strdup(hdl, path));
3177 zbookmark_compare(const void *a, const void *b)
3179 return (memcmp(a, b, sizeof (zbookmark_t)));
3183 * Retrieve the persistent error log, uniquify the members, and return to the
3187 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3189 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3191 zbookmark_t *zb = NULL;
3195 * Retrieve the raw error list from the kernel. If the number of errors
3196 * has increased, allocate more space and continue until we get the
3199 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3203 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3204 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3206 zc.zc_nvlist_dst_size = count;
3207 (void) strcpy(zc.zc_name, zhp->zpool_name);
3209 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3211 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3212 if (errno == ENOMEM) {
3213 count = zc.zc_nvlist_dst_size;
3214 if ((zc.zc_nvlist_dst = (uintptr_t)
3215 zfs_alloc(zhp->zpool_hdl, count *
3216 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3227 * Sort the resulting bookmarks. This is a little confusing due to the
3228 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3229 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3230 * _not_ copied as part of the process. So we point the start of our
3231 * array appropriate and decrement the total number of elements.
3233 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3234 zc.zc_nvlist_dst_size;
3235 count -= zc.zc_nvlist_dst_size;
3237 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3239 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3242 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3244 for (i = 0; i < count; i++) {
3247 /* ignoring zb_blkid and zb_level for now */
3248 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3249 zb[i-1].zb_object == zb[i].zb_object)
3252 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3254 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3255 zb[i].zb_objset) != 0) {
3259 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3260 zb[i].zb_object) != 0) {
3264 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3271 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3275 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3276 return (no_memory(zhp->zpool_hdl));
3280 * Upgrade a ZFS pool to the latest on-disk version.
3283 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3285 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3286 libzfs_handle_t *hdl = zhp->zpool_hdl;
3288 (void) strcpy(zc.zc_name, zhp->zpool_name);
3289 zc.zc_cookie = new_version;
3291 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3292 return (zpool_standard_error_fmt(hdl, errno,
3293 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3299 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3304 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3305 for (i = 1; i < argc; i++) {
3306 if (strlen(history_str) + 1 + strlen(argv[i]) >
3309 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3310 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3315 * Stage command history for logging.
3318 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3320 if (history_str == NULL)
3323 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3326 if (hdl->libzfs_log_str != NULL)
3327 free(hdl->libzfs_log_str);
3329 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3330 return (no_memory(hdl));
3336 * Perform ioctl to get some command history of a pool.
3338 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3339 * logical offset of the history buffer to start reading from.
3341 * Upon return, 'off' is the next logical offset to read from and
3342 * 'len' is the actual amount of bytes read into 'buf'.
3345 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3347 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3348 libzfs_handle_t *hdl = zhp->zpool_hdl;
3350 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3352 zc.zc_history = (uint64_t)(uintptr_t)buf;
3353 zc.zc_history_len = *len;
3354 zc.zc_history_offset = *off;
3356 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3359 return (zfs_error_fmt(hdl, EZFS_PERM,
3360 dgettext(TEXT_DOMAIN,
3361 "cannot show history for pool '%s'"),
3364 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3365 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3366 "'%s'"), zhp->zpool_name));
3368 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3369 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3370 "'%s', pool must be upgraded"), zhp->zpool_name));
3372 return (zpool_standard_error_fmt(hdl, errno,
3373 dgettext(TEXT_DOMAIN,
3374 "cannot get history for '%s'"), zhp->zpool_name));
3378 *len = zc.zc_history_len;
3379 *off = zc.zc_history_offset;
3385 * Process the buffer of nvlists, unpacking and storing each nvlist record
3386 * into 'records'. 'leftover' is set to the number of bytes that weren't
3387 * processed as there wasn't a complete record.
3390 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3391 nvlist_t ***records, uint_t *numrecords)
3397 while (bytes_read > sizeof (reclen)) {
3399 /* get length of packed record (stored as little endian) */
3400 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3401 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3403 if (bytes_read < sizeof (reclen) + reclen)
3407 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3409 bytes_read -= sizeof (reclen) + reclen;
3410 buf += sizeof (reclen) + reclen;
3412 /* add record to nvlist array */
3414 if (ISP2(*numrecords + 1)) {
3415 *records = realloc(*records,
3416 *numrecords * 2 * sizeof (nvlist_t *));
3418 (*records)[*numrecords - 1] = nv;
3421 *leftover = bytes_read;
3425 #define HIS_BUF_LEN (128*1024)
3428 * Retrieve the command history of a pool.
3431 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3433 char buf[HIS_BUF_LEN];
3435 nvlist_t **records = NULL;
3436 uint_t numrecords = 0;
3440 uint64_t bytes_read = sizeof (buf);
3443 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3446 /* if nothing else was read in, we're at EOF, just return */
3450 if ((err = zpool_history_unpack(buf, bytes_read,
3451 &leftover, &records, &numrecords)) != 0)
3459 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3460 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3461 records, numrecords) == 0);
3463 for (i = 0; i < numrecords; i++)
3464 nvlist_free(records[i]);
3471 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3472 char *pathname, size_t len)
3474 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3475 boolean_t mounted = B_FALSE;
3476 char *mntpnt = NULL;
3477 char dsname[MAXNAMELEN];
3480 /* special case for the MOS */
3481 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
3485 /* get the dataset's name */
3486 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3488 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3489 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3490 /* just write out a path of two object numbers */
3491 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3492 (longlong_t)dsobj, (longlong_t)obj);
3495 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3497 /* find out if the dataset is mounted */
3498 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3500 /* get the corrupted object's path */
3501 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3503 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3506 (void) snprintf(pathname, len, "%s%s", mntpnt,
3509 (void) snprintf(pathname, len, "%s:%s",
3510 dsname, zc.zc_value);
3513 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
3519 * Read the EFI label from the config, if a label does not exist then
3520 * pass back the error to the caller. If the caller has passed a non-NULL
3521 * diskaddr argument then we set it to the starting address of the EFI
3525 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3529 char diskname[MAXPATHLEN];
3532 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3535 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3536 strrchr(path, '/'));
3537 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3538 struct dk_gpt *vtoc;
3540 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3542 *sb = vtoc->efi_parts[0].p_start;
3551 * determine where a partition starts on a disk in the current
3555 find_start_block(nvlist_t *config)
3559 diskaddr_t sb = MAXOFFSET_T;
3562 if (nvlist_lookup_nvlist_array(config,
3563 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3564 if (nvlist_lookup_uint64(config,
3565 ZPOOL_CONFIG_WHOLE_DISK,
3566 &wholedisk) != 0 || !wholedisk) {
3567 return (MAXOFFSET_T);
3569 if (read_efi_label(config, &sb) < 0)
3574 for (c = 0; c < children; c++) {
3575 sb = find_start_block(child[c]);
3576 if (sb != MAXOFFSET_T) {
3580 return (MAXOFFSET_T);
3584 * Label an individual disk. The name provided is the short name,
3585 * stripped of any leading /dev path.
3588 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3590 char path[MAXPATHLEN];
3591 struct dk_gpt *vtoc;
3593 size_t resv = EFI_MIN_RESV_SIZE;
3594 uint64_t slice_size;
3595 diskaddr_t start_block;
3598 /* prepare an error message just in case */
3599 (void) snprintf(errbuf, sizeof (errbuf),
3600 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3605 if (pool_is_bootable(zhp)) {
3606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3607 "EFI labeled devices are not supported on root "
3609 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3612 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3613 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3615 if (zhp->zpool_start_block == 0)
3616 start_block = find_start_block(nvroot);
3618 start_block = zhp->zpool_start_block;
3619 zhp->zpool_start_block = start_block;
3622 start_block = NEW_START_BLOCK;
3625 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3628 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3630 * This shouldn't happen. We've long since verified that this
3631 * is a valid device.
3634 dgettext(TEXT_DOMAIN, "unable to open device"));
3635 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3638 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3640 * The only way this can fail is if we run out of memory, or we
3641 * were unable to read the disk's capacity
3643 if (errno == ENOMEM)
3644 (void) no_memory(hdl);
3647 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3648 "unable to read disk capacity"), name);
3650 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3653 slice_size = vtoc->efi_last_u_lba + 1;
3654 slice_size -= EFI_MIN_RESV_SIZE;
3655 if (start_block == MAXOFFSET_T)
3656 start_block = NEW_START_BLOCK;
3657 slice_size -= start_block;
3659 vtoc->efi_parts[0].p_start = start_block;
3660 vtoc->efi_parts[0].p_size = slice_size;
3663 * Why we use V_USR: V_BACKUP confuses users, and is considered
3664 * disposable by some EFI utilities (since EFI doesn't have a backup
3665 * slice). V_UNASSIGNED is supposed to be used only for zero size
3666 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3667 * etc. were all pretty specific. V_USR is as close to reality as we
3668 * can get, in the absence of V_OTHER.
3670 vtoc->efi_parts[0].p_tag = V_USR;
3671 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3673 vtoc->efi_parts[8].p_start = slice_size + start_block;
3674 vtoc->efi_parts[8].p_size = resv;
3675 vtoc->efi_parts[8].p_tag = V_RESERVED;
3677 if (efi_write(fd, vtoc) != 0) {
3679 * Some block drivers (like pcata) may not support EFI
3680 * GPT labels. Print out a helpful error message dir-
3681 * ecting the user to manually label the disk and give
3687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3688 "try using fdisk(1M) and then provide a specific slice"));
3689 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3698 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3704 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3705 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3706 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3707 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3708 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3709 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3710 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3711 "vdev type '%s' is not supported"), type);
3712 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3715 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3716 &child, &children) == 0) {
3717 for (c = 0; c < children; c++) {
3718 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3726 * check if this zvol is allowable for use as a dump device; zero if
3727 * it is, > 0 if it isn't, < 0 if it isn't a zvol
3730 zvol_check_dump_config(char *arg)
3732 zpool_handle_t *zhp = NULL;
3733 nvlist_t *config, *nvroot;
3737 libzfs_handle_t *hdl;
3739 char poolname[ZPOOL_MAXNAMELEN];
3740 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3743 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3747 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3748 "dump is not supported on device '%s'"), arg);
3750 if ((hdl = libzfs_init()) == NULL)
3752 libzfs_print_on_error(hdl, B_TRUE);
3754 volname = arg + pathlen;
3756 /* check the configuration of the pool */
3757 if ((p = strchr(volname, '/')) == NULL) {
3758 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3759 "malformed dataset name"));
3760 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3762 } else if (p - volname >= ZFS_MAXNAMELEN) {
3763 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3764 "dataset name is too long"));
3765 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3768 (void) strncpy(poolname, volname, p - volname);
3769 poolname[p - volname] = '\0';
3772 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3773 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3774 "could not open pool '%s'"), poolname);
3775 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3778 config = zpool_get_config(zhp, NULL);
3779 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3781 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3782 "could not obtain vdev configuration for '%s'"), poolname);
3783 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3787 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3788 &top, &toplevels) == 0);
3789 if (toplevels != 1) {
3790 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3791 "'%s' has multiple top level vdevs"), poolname);
3792 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3796 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {