It's still not clear to me why the default value here is large
enough Solaris. I hit this limit again when setting up 120 SATA
drives configured as 15 raidz2 groups each containing 8 drives.
We expect to go bigger so we may just want to spend a little
time and figure out how to make this all dynamic.
return (no_memory(hdl));
}
- if (zcmd_alloc_dst_nvlist(hdl, &zc, 32768) != 0)
+ if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
for (;;) {
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
- len = 2048;
+ len = 128*1024;
zc->zc_nvlist_dst_size = len;
if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t)
zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == NULL)