4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * The objective of this program is to provide a DMU/ZAP/SPA stress test
27 * that runs entirely in userland, is easy to use, and easy to extend.
29 * The overall design of the ztest program is as follows:
31 * (1) For each major functional area (e.g. adding vdevs to a pool,
32 * creating and destroying datasets, reading and writing objects, etc)
33 * we have a simple routine to test that functionality. These
34 * individual routines do not have to do anything "stressful".
36 * (2) We turn these simple functionality tests into a stress test by
37 * running them all in parallel, with as many threads as desired,
38 * and spread across as many datasets, objects, and vdevs as desired.
40 * (3) While all this is happening, we inject faults into the pool to
41 * verify that self-healing data really works.
43 * (4) Every time we open a dataset, we change its checksum and compression
44 * functions. Thus even individual objects vary from block to block
45 * in which checksum they use and whether they're compressed.
47 * (5) To verify that we never lose on-disk consistency after a crash,
48 * we run the entire test in a child of the main process.
49 * At random times, the child self-immolates with a SIGKILL.
50 * This is the software equivalent of pulling the power cord.
51 * The parent then runs the test again, using the existing
52 * storage pool, as many times as desired.
54 * (6) To verify that we don't have future leaks or temporal incursions,
55 * many of the functional tests record the transaction group number
56 * as part of their data. When reading old data, they verify that
57 * the transaction group number is less than the current, open txg.
58 * If you add a new test, please do this if applicable.
60 * (7) Threads are created with a reduced stack size, for sanity checking.
61 * Therefore, it's important not to allocate huge buffers on the stack.
63 * When run with no arguments, ztest runs for about five minutes and
64 * produces no output if successful. To get a little bit of information,
65 * specify -V. To get more information, specify -VV, and so on.
67 * To turn this into an overnight stress test, use -T to specify run time.
69 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
70 * to increase the pool capacity, fanout, and overall stress level.
72 * The -N(okill) option will suppress kills, so each child runs to completion.
73 * This can be useful when you're trying to distinguish temporal incursions
74 * from plain old race conditions.
77 #include <sys/zfs_context.h>
83 #include <sys/dmu_objset.h>
89 #include <sys/resource.h>
92 #include <sys/zil_impl.h>
93 #include <sys/vdev_impl.h>
94 #include <sys/vdev_file.h>
95 #include <sys/spa_impl.h>
96 #include <sys/metaslab_impl.h>
97 #include <sys/dsl_prop.h>
98 #include <sys/dsl_dataset.h>
99 #include <sys/dsl_scan.h>
100 #include <sys/zio_checksum.h>
101 #include <sys/refcount.h>
103 #include <stdio_ext.h>
111 #include <sys/fs/zfs.h>
112 #include <libnvpair.h>
114 static char cmdname[] = "ztest";
115 static char *zopt_pool = cmdname;
117 static uint64_t zopt_vdevs = 5;
118 static uint64_t zopt_vdevtime;
119 static int zopt_ashift = SPA_MINBLOCKSHIFT;
120 static int zopt_mirrors = 2;
121 static int zopt_raidz = 4;
122 static int zopt_raidz_parity = 1;
123 static size_t zopt_vdev_size = SPA_MINDEVSIZE;
124 static int zopt_datasets = 7;
125 static int zopt_threads = 23;
126 static uint64_t zopt_passtime = 60; /* 60 seconds */
127 static uint64_t zopt_killrate = 70; /* 70% kill rate */
128 static int zopt_verbose = 0;
129 static int zopt_init = 1;
130 static char *zopt_dir = "/tmp";
131 static uint64_t zopt_time = 300; /* 5 minutes */
132 static uint64_t zopt_maxloops = 50; /* max loops during spa_freeze() */
134 #define BT_MAGIC 0x123456789abcdefULL
135 #define MAXFAULTS() (MAX(zs->zs_mirrors, 1) * (zopt_raidz_parity + 1) - 1)
139 ZTEST_IO_WRITE_PATTERN,
140 ZTEST_IO_WRITE_ZEROES,
146 typedef struct ztest_block_tag {
156 typedef struct bufwad {
163 * XXX -- fix zfs range locks to be generic so we can use them here.
185 #define ZTEST_RANGE_LOCKS 64
186 #define ZTEST_OBJECT_LOCKS 64
189 * Object descriptor. Used as a template for object lookup/create/remove.
191 typedef struct ztest_od {
194 dmu_object_type_t od_type;
195 dmu_object_type_t od_crtype;
196 uint64_t od_blocksize;
197 uint64_t od_crblocksize;
200 char od_name[MAXNAMELEN];
206 typedef struct ztest_ds {
210 ztest_od_t *zd_od; /* debugging aid */
211 char zd_name[MAXNAMELEN];
212 kmutex_t zd_dirobj_lock;
213 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
214 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
218 * Per-iteration state.
220 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
222 typedef struct ztest_info {
223 ztest_func_t *zi_func; /* test function */
224 uint64_t zi_iters; /* iterations per execution */
225 uint64_t *zi_interval; /* execute every <interval> seconds */
226 uint64_t zi_call_count; /* per-pass count */
227 uint64_t zi_call_time; /* per-pass time */
228 uint64_t zi_call_next; /* next time to call this function */
232 * Note: these aren't static because we want dladdr() to work.
234 ztest_func_t ztest_dmu_read_write;
235 ztest_func_t ztest_dmu_write_parallel;
236 ztest_func_t ztest_dmu_object_alloc_free;
237 ztest_func_t ztest_dmu_commit_callbacks;
238 ztest_func_t ztest_zap;
239 ztest_func_t ztest_zap_parallel;
240 ztest_func_t ztest_zil_commit;
241 ztest_func_t ztest_dmu_read_write_zcopy;
242 ztest_func_t ztest_dmu_objset_create_destroy;
243 ztest_func_t ztest_dmu_prealloc;
244 ztest_func_t ztest_fzap;
245 ztest_func_t ztest_dmu_snapshot_create_destroy;
246 ztest_func_t ztest_dsl_prop_get_set;
247 ztest_func_t ztest_spa_prop_get_set;
248 ztest_func_t ztest_spa_create_destroy;
249 ztest_func_t ztest_fault_inject;
250 ztest_func_t ztest_ddt_repair;
251 ztest_func_t ztest_dmu_snapshot_hold;
252 ztest_func_t ztest_spa_rename;
253 ztest_func_t ztest_scrub;
254 ztest_func_t ztest_dsl_dataset_promote_busy;
255 ztest_func_t ztest_vdev_attach_detach;
256 ztest_func_t ztest_vdev_LUN_growth;
257 ztest_func_t ztest_vdev_add_remove;
258 ztest_func_t ztest_vdev_aux_add_remove;
259 ztest_func_t ztest_split_pool;
261 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
262 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
263 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
264 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
265 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
267 ztest_info_t ztest_info[] = {
268 { ztest_dmu_read_write, 1, &zopt_always },
269 { ztest_dmu_write_parallel, 10, &zopt_always },
270 { ztest_dmu_object_alloc_free, 1, &zopt_always },
271 { ztest_dmu_commit_callbacks, 1, &zopt_always },
272 { ztest_zap, 30, &zopt_always },
273 { ztest_zap_parallel, 100, &zopt_always },
274 { ztest_split_pool, 1, &zopt_always },
275 { ztest_zil_commit, 1, &zopt_incessant },
276 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
277 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
278 { ztest_dsl_prop_get_set, 1, &zopt_often },
279 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
281 { ztest_dmu_prealloc, 1, &zopt_sometimes },
283 { ztest_fzap, 1, &zopt_sometimes },
284 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
285 { ztest_spa_create_destroy, 1, &zopt_sometimes },
286 { ztest_fault_inject, 1, &zopt_sometimes },
287 { ztest_ddt_repair, 1, &zopt_sometimes },
288 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
289 { ztest_spa_rename, 1, &zopt_rarely },
290 { ztest_scrub, 1, &zopt_rarely },
291 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
292 { ztest_vdev_attach_detach, 1, &zopt_rarely },
293 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
294 { ztest_vdev_add_remove, 1, &zopt_vdevtime },
295 { ztest_vdev_aux_add_remove, 1, &zopt_vdevtime },
298 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
301 * The following struct is used to hold a list of uncalled commit callbacks.
302 * The callbacks are ordered by txg number.
304 typedef struct ztest_cb_list {
305 kmutex_t zcl_callbacks_lock;
306 list_t zcl_callbacks;
310 * Stuff we need to share writably between parent and child.
312 typedef struct ztest_shared {
315 hrtime_t zs_proc_start;
316 hrtime_t zs_proc_stop;
317 hrtime_t zs_thread_start;
318 hrtime_t zs_thread_stop;
319 hrtime_t zs_thread_kill;
320 uint64_t zs_enospc_count;
321 uint64_t zs_vdev_next_leaf;
322 uint64_t zs_vdev_aux;
325 kmutex_t zs_vdev_lock;
326 krwlock_t zs_name_lock;
327 ztest_info_t zs_info[ZTEST_FUNCS];
333 #define ID_PARALLEL -1ULL
335 static char ztest_dev_template[] = "%s/%s.%llua";
336 static char ztest_aux_template[] = "%s/%s.%s.%llu";
337 ztest_shared_t *ztest_shared;
340 static int ztest_random_fd;
341 static int ztest_dump_core = 1;
343 static boolean_t ztest_exiting;
345 /* Global commit callback list */
346 static ztest_cb_list_t zcl;
347 /* Commit cb delay */
348 static uint64_t zc_min_txg_delay = UINT64_MAX;
349 static int zc_cb_counter = 0;
352 * Minimum number of commit callbacks that need to be registered for us to check
353 * whether the minimum txg delay is acceptable.
355 #define ZTEST_COMMIT_CB_MIN_REG 100
358 * If a number of txgs equal to this threshold have been created after a commit
359 * callback has been registered but not called, then we assume there is an
360 * implementation bug.
362 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
364 extern uint64_t metaslab_gang_bang;
365 extern uint64_t metaslab_df_alloc_threshold;
366 static uint64_t metaslab_sz;
369 ZTEST_META_DNODE = 0,
374 static void usage(boolean_t) __NORETURN;
377 * These libumem hooks provide a reasonable set of defaults for the allocator's
378 * debugging facilities.
381 _umem_debug_init(void)
383 return ("default,verbose"); /* $UMEM_DEBUG setting */
387 _umem_logging_init(void)
389 return ("fail,contents"); /* $UMEM_LOGGING setting */
392 #define FATAL_MSG_SZ 1024
397 fatal(int do_perror, char *message, ...)
400 int save_errno = errno;
403 (void) fflush(stdout);
404 buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
406 va_start(args, message);
407 (void) sprintf(buf, "ztest: ");
409 (void) vsprintf(buf + strlen(buf), message, args);
412 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
413 ": %s", strerror(save_errno));
415 (void) fprintf(stderr, "%s\n", buf);
416 fatal_msg = buf; /* to ease debugging */
423 str2shift(const char *buf)
425 const char *ends = "BKMGTPEZ";
430 for (i = 0; i < strlen(ends); i++) {
431 if (toupper(buf[0]) == ends[i])
434 if (i == strlen(ends)) {
435 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
439 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
442 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
448 nicenumtoull(const char *buf)
453 val = strtoull(buf, &end, 0);
455 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
457 } else if (end[0] == '.') {
458 double fval = strtod(buf, &end);
459 fval *= pow(2, str2shift(end));
460 if (fval > UINT64_MAX) {
461 (void) fprintf(stderr, "ztest: value too large: %s\n",
465 val = (uint64_t)fval;
467 int shift = str2shift(end);
468 if (shift >= 64 || (val << shift) >> shift != val) {
469 (void) fprintf(stderr, "ztest: value too large: %s\n",
479 usage(boolean_t requested)
481 char nice_vdev_size[10];
482 char nice_gang_bang[10];
483 FILE *fp = requested ? stdout : stderr;
485 nicenum(zopt_vdev_size, nice_vdev_size);
486 nicenum(metaslab_gang_bang, nice_gang_bang);
488 (void) fprintf(fp, "Usage: %s\n"
489 "\t[-v vdevs (default: %llu)]\n"
490 "\t[-s size_of_each_vdev (default: %s)]\n"
491 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
492 "\t[-m mirror_copies (default: %d)]\n"
493 "\t[-r raidz_disks (default: %d)]\n"
494 "\t[-R raidz_parity (default: %d)]\n"
495 "\t[-d datasets (default: %d)]\n"
496 "\t[-t threads (default: %d)]\n"
497 "\t[-g gang_block_threshold (default: %s)]\n"
498 "\t[-i init_count (default: %d)] initialize pool i times\n"
499 "\t[-k kill_percentage (default: %llu%%)]\n"
500 "\t[-p pool_name (default: %s)]\n"
501 "\t[-f dir (default: %s)] file directory for vdev files\n"
502 "\t[-V] verbose (use multiple times for ever more blather)\n"
503 "\t[-E] use existing pool instead of creating new one\n"
504 "\t[-T time (default: %llu sec)] total run time\n"
505 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
506 "\t[-P passtime (default: %llu sec)] time per pass\n"
507 "\t[-h] (print help)\n"
510 (u_longlong_t)zopt_vdevs, /* -v */
511 nice_vdev_size, /* -s */
512 zopt_ashift, /* -a */
513 zopt_mirrors, /* -m */
515 zopt_raidz_parity, /* -R */
516 zopt_datasets, /* -d */
517 zopt_threads, /* -t */
518 nice_gang_bang, /* -g */
520 (u_longlong_t)zopt_killrate, /* -k */
523 (u_longlong_t)zopt_time, /* -T */
524 (u_longlong_t)zopt_maxloops, /* -F */
525 (u_longlong_t)zopt_passtime); /* -P */
526 exit(requested ? 0 : 1);
530 process_options(int argc, char **argv)
535 /* By default, test gang blocks for blocks 32K and greater */
536 metaslab_gang_bang = 32 << 10;
538 while ((opt = getopt(argc, argv,
539 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:")) != EOF) {
556 value = nicenumtoull(optarg);
563 zopt_vdev_size = MAX(SPA_MINDEVSIZE, value);
569 zopt_mirrors = value;
572 zopt_raidz = MAX(1, value);
575 zopt_raidz_parity = MIN(MAX(value, 1), 3);
578 zopt_datasets = MAX(1, value);
581 zopt_threads = MAX(1, value);
584 metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1, value);
590 zopt_killrate = value;
593 zopt_pool = strdup(optarg);
596 zopt_dir = strdup(optarg);
608 zopt_passtime = MAX(1, value);
611 zopt_maxloops = MAX(1, value);
623 zopt_raidz_parity = MIN(zopt_raidz_parity, zopt_raidz - 1);
625 zopt_vdevtime = (zopt_vdevs > 0 ? zopt_time * NANOSEC / zopt_vdevs :
630 ztest_kill(ztest_shared_t *zs)
632 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(zs->zs_spa));
633 zs->zs_space = metaslab_class_get_space(spa_normal_class(zs->zs_spa));
634 (void) kill(getpid(), SIGKILL);
638 ztest_random(uint64_t range)
645 if (read(ztest_random_fd, &r, sizeof (r)) != sizeof (r))
646 fatal(1, "short read from /dev/urandom");
653 ztest_record_enospc(const char *s)
655 ztest_shared->zs_enospc_count++;
659 ztest_get_ashift(void)
661 if (zopt_ashift == 0)
662 return (SPA_MINBLOCKSHIFT + ztest_random(3));
663 return (zopt_ashift);
667 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
673 pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
676 ashift = ztest_get_ashift();
682 vdev = ztest_shared->zs_vdev_aux;
683 (void) sprintf(path, ztest_aux_template,
684 zopt_dir, zopt_pool, aux, vdev);
686 vdev = ztest_shared->zs_vdev_next_leaf++;
687 (void) sprintf(path, ztest_dev_template,
688 zopt_dir, zopt_pool, vdev);
693 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
695 fatal(1, "can't open %s", path);
696 if (ftruncate(fd, size) != 0)
697 fatal(1, "can't ftruncate %s", path);
701 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
702 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
703 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
704 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
705 umem_free(pathbuf, MAXPATHLEN);
711 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r)
713 nvlist_t *raidz, **child;
717 return (make_vdev_file(path, aux, size, ashift));
718 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
720 for (c = 0; c < r; c++)
721 child[c] = make_vdev_file(path, aux, size, ashift);
723 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
724 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
725 VDEV_TYPE_RAIDZ) == 0);
726 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
727 zopt_raidz_parity) == 0);
728 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
731 for (c = 0; c < r; c++)
732 nvlist_free(child[c]);
734 umem_free(child, r * sizeof (nvlist_t *));
740 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift,
743 nvlist_t *mirror, **child;
747 return (make_vdev_raidz(path, aux, size, ashift, r));
749 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
751 for (c = 0; c < m; c++)
752 child[c] = make_vdev_raidz(path, aux, size, ashift, r);
754 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
755 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
756 VDEV_TYPE_MIRROR) == 0);
757 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
760 for (c = 0; c < m; c++)
761 nvlist_free(child[c]);
763 umem_free(child, m * sizeof (nvlist_t *));
769 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift,
770 int log, int r, int m, int t)
772 nvlist_t *root, **child;
777 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
779 for (c = 0; c < t; c++) {
780 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m);
781 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
785 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
786 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
787 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
790 for (c = 0; c < t; c++)
791 nvlist_free(child[c]);
793 umem_free(child, t * sizeof (nvlist_t *));
799 ztest_random_blocksize(void)
801 return (1 << (SPA_MINBLOCKSHIFT +
802 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
806 ztest_random_ibshift(void)
808 return (DN_MIN_INDBLKSHIFT +
809 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
813 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
816 vdev_t *rvd = spa->spa_root_vdev;
819 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
822 top = ztest_random(rvd->vdev_children);
823 tvd = rvd->vdev_child[top];
824 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
825 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
831 ztest_random_dsl_prop(zfs_prop_t prop)
836 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
837 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
843 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
846 const char *propname = zfs_prop_to_name(prop);
852 error = dsl_prop_set(osname, propname,
853 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
854 sizeof (value), 1, &value);
856 if (error == ENOSPC) {
857 ztest_record_enospc(FTAG);
860 ASSERT3U(error, ==, 0);
862 setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
863 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
864 1, &curval, setpoint), ==, 0);
866 if (zopt_verbose >= 6) {
867 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
868 (void) printf("%s %s = %s at '%s'\n",
869 osname, propname, valname, setpoint);
871 umem_free(setpoint, MAXPATHLEN);
877 ztest_spa_prop_set_uint64(ztest_shared_t *zs, zpool_prop_t prop, uint64_t value)
879 spa_t *spa = zs->zs_spa;
880 nvlist_t *props = NULL;
883 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
884 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
886 error = spa_prop_set(spa, props);
890 if (error == ENOSPC) {
891 ztest_record_enospc(FTAG);
894 ASSERT3U(error, ==, 0);
900 ztest_rll_init(rll_t *rll)
902 rll->rll_writer = NULL;
903 rll->rll_readers = 0;
904 mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
905 cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
909 ztest_rll_destroy(rll_t *rll)
911 ASSERT(rll->rll_writer == NULL);
912 ASSERT(rll->rll_readers == 0);
913 mutex_destroy(&rll->rll_lock);
914 cv_destroy(&rll->rll_cv);
918 ztest_rll_lock(rll_t *rll, rl_type_t type)
920 mutex_enter(&rll->rll_lock);
922 if (type == RL_READER) {
923 while (rll->rll_writer != NULL)
924 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
927 while (rll->rll_writer != NULL || rll->rll_readers)
928 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
929 rll->rll_writer = curthread;
932 mutex_exit(&rll->rll_lock);
936 ztest_rll_unlock(rll_t *rll)
938 mutex_enter(&rll->rll_lock);
940 if (rll->rll_writer) {
941 ASSERT(rll->rll_readers == 0);
942 rll->rll_writer = NULL;
944 ASSERT(rll->rll_readers != 0);
945 ASSERT(rll->rll_writer == NULL);
949 if (rll->rll_writer == NULL && rll->rll_readers == 0)
950 cv_broadcast(&rll->rll_cv);
952 mutex_exit(&rll->rll_lock);
956 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
958 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
960 ztest_rll_lock(rll, type);
964 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
966 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
968 ztest_rll_unlock(rll);
972 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
973 uint64_t size, rl_type_t type)
975 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
976 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
979 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
980 rl->rl_object = object;
981 rl->rl_offset = offset;
985 ztest_rll_lock(rll, type);
991 ztest_range_unlock(rl_t *rl)
993 rll_t *rll = rl->rl_lock;
995 ztest_rll_unlock(rll);
997 umem_free(rl, sizeof (*rl));
1001 ztest_zd_init(ztest_ds_t *zd, objset_t *os)
1004 zd->zd_zilog = dmu_objset_zil(os);
1006 dmu_objset_name(os, zd->zd_name);
1009 mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
1011 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1012 ztest_rll_init(&zd->zd_object_lock[l]);
1014 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1015 ztest_rll_init(&zd->zd_range_lock[l]);
1019 ztest_zd_fini(ztest_ds_t *zd)
1023 mutex_destroy(&zd->zd_dirobj_lock);
1025 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1026 ztest_rll_destroy(&zd->zd_object_lock[l]);
1028 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1029 ztest_rll_destroy(&zd->zd_range_lock[l]);
1032 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1035 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1041 * Attempt to assign tx to some transaction group.
1043 error = dmu_tx_assign(tx, txg_how);
1045 if (error == ERESTART) {
1046 ASSERT(txg_how == TXG_NOWAIT);
1049 ASSERT3U(error, ==, ENOSPC);
1050 ztest_record_enospc(tag);
1055 txg = dmu_tx_get_txg(tx);
1061 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1064 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1072 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1075 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1079 diff |= (value - *ip++);
1086 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1087 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1089 bt->bt_magic = BT_MAGIC;
1090 bt->bt_objset = dmu_objset_id(os);
1091 bt->bt_object = object;
1092 bt->bt_offset = offset;
1095 bt->bt_crtxg = crtxg;
1099 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1100 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1102 ASSERT(bt->bt_magic == BT_MAGIC);
1103 ASSERT(bt->bt_objset == dmu_objset_id(os));
1104 ASSERT(bt->bt_object == object);
1105 ASSERT(bt->bt_offset == offset);
1106 ASSERT(bt->bt_gen <= gen);
1107 ASSERT(bt->bt_txg <= txg);
1108 ASSERT(bt->bt_crtxg == crtxg);
1111 static ztest_block_tag_t *
1112 ztest_bt_bonus(dmu_buf_t *db)
1114 dmu_object_info_t doi;
1115 ztest_block_tag_t *bt;
1117 dmu_object_info_from_db(db, &doi);
1118 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1119 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1120 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1129 #define lrz_type lr_mode
1130 #define lrz_blocksize lr_uid
1131 #define lrz_ibshift lr_gid
1132 #define lrz_bonustype lr_rdev
1133 #define lrz_bonuslen lr_crtime[1]
1136 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1138 char *name = (void *)(lr + 1); /* name follows lr */
1139 size_t namesize = strlen(name) + 1;
1142 if (zil_replaying(zd->zd_zilog, tx))
1145 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1146 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1147 sizeof (*lr) + namesize - sizeof (lr_t));
1149 zil_itx_assign(zd->zd_zilog, itx, tx);
1153 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1155 char *name = (void *)(lr + 1); /* name follows lr */
1156 size_t namesize = strlen(name) + 1;
1159 if (zil_replaying(zd->zd_zilog, tx))
1162 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1163 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1164 sizeof (*lr) + namesize - sizeof (lr_t));
1166 itx->itx_oid = object;
1167 zil_itx_assign(zd->zd_zilog, itx, tx);
1171 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1174 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1176 if (zil_replaying(zd->zd_zilog, tx))
1179 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1180 write_state = WR_INDIRECT;
1182 itx = zil_itx_create(TX_WRITE,
1183 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1185 if (write_state == WR_COPIED &&
1186 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1187 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1188 zil_itx_destroy(itx);
1189 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1190 write_state = WR_NEED_COPY;
1192 itx->itx_private = zd;
1193 itx->itx_wr_state = write_state;
1194 itx->itx_sync = (ztest_random(8) == 0);
1195 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1197 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1198 sizeof (*lr) - sizeof (lr_t));
1200 zil_itx_assign(zd->zd_zilog, itx, tx);
1204 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1208 if (zil_replaying(zd->zd_zilog, tx))
1211 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1212 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1213 sizeof (*lr) - sizeof (lr_t));
1215 itx->itx_sync = B_FALSE;
1216 zil_itx_assign(zd->zd_zilog, itx, tx);
1220 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1224 if (zil_replaying(zd->zd_zilog, tx))
1227 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1228 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1229 sizeof (*lr) - sizeof (lr_t));
1231 itx->itx_sync = B_FALSE;
1232 zil_itx_assign(zd->zd_zilog, itx, tx);
1239 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1241 char *name = (void *)(lr + 1); /* name follows lr */
1242 objset_t *os = zd->zd_os;
1243 ztest_block_tag_t *bbt;
1250 byteswap_uint64_array(lr, sizeof (*lr));
1252 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1253 ASSERT(name[0] != '\0');
1255 tx = dmu_tx_create(os);
1257 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1259 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1260 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1262 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1265 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1269 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1271 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1272 if (lr->lr_foid == 0) {
1273 lr->lr_foid = zap_create(os,
1274 lr->lrz_type, lr->lrz_bonustype,
1275 lr->lrz_bonuslen, tx);
1277 error = zap_create_claim(os, lr->lr_foid,
1278 lr->lrz_type, lr->lrz_bonustype,
1279 lr->lrz_bonuslen, tx);
1282 if (lr->lr_foid == 0) {
1283 lr->lr_foid = dmu_object_alloc(os,
1284 lr->lrz_type, 0, lr->lrz_bonustype,
1285 lr->lrz_bonuslen, tx);
1287 error = dmu_object_claim(os, lr->lr_foid,
1288 lr->lrz_type, 0, lr->lrz_bonustype,
1289 lr->lrz_bonuslen, tx);
1294 ASSERT3U(error, ==, EEXIST);
1295 ASSERT(zd->zd_zilog->zl_replay);
1300 ASSERT(lr->lr_foid != 0);
1302 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1303 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1304 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1306 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1307 bbt = ztest_bt_bonus(db);
1308 dmu_buf_will_dirty(db, tx);
1309 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1310 dmu_buf_rele(db, FTAG);
1312 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1315 (void) ztest_log_create(zd, tx, lr);
1323 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1325 char *name = (void *)(lr + 1); /* name follows lr */
1326 objset_t *os = zd->zd_os;
1327 dmu_object_info_t doi;
1329 uint64_t object, txg;
1332 byteswap_uint64_array(lr, sizeof (*lr));
1334 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1335 ASSERT(name[0] != '\0');
1338 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1339 ASSERT(object != 0);
1341 ztest_object_lock(zd, object, RL_WRITER);
1343 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1345 tx = dmu_tx_create(os);
1347 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1348 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1350 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1352 ztest_object_unlock(zd, object);
1356 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1357 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1359 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1362 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1364 (void) ztest_log_remove(zd, tx, lr, object);
1368 ztest_object_unlock(zd, object);
1374 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1376 objset_t *os = zd->zd_os;
1377 void *data = lr + 1; /* data follows lr */
1378 uint64_t offset, length;
1379 ztest_block_tag_t *bt = data;
1380 ztest_block_tag_t *bbt;
1381 uint64_t gen, txg, lrtxg, crtxg;
1382 dmu_object_info_t doi;
1385 arc_buf_t *abuf = NULL;
1389 byteswap_uint64_array(lr, sizeof (*lr));
1391 offset = lr->lr_offset;
1392 length = lr->lr_length;
1394 /* If it's a dmu_sync() block, write the whole block */
1395 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1396 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1397 if (length < blocksize) {
1398 offset -= offset % blocksize;
1403 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1404 byteswap_uint64_array(bt, sizeof (*bt));
1406 if (bt->bt_magic != BT_MAGIC)
1409 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1410 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1412 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1414 dmu_object_info_from_db(db, &doi);
1416 bbt = ztest_bt_bonus(db);
1417 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1419 crtxg = bbt->bt_crtxg;
1420 lrtxg = lr->lr_common.lrc_txg;
1422 tx = dmu_tx_create(os);
1424 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1426 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1427 P2PHASE(offset, length) == 0)
1428 abuf = dmu_request_arcbuf(db, length);
1430 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1433 dmu_return_arcbuf(abuf);
1434 dmu_buf_rele(db, FTAG);
1435 ztest_range_unlock(rl);
1436 ztest_object_unlock(zd, lr->lr_foid);
1442 * Usually, verify the old data before writing new data --
1443 * but not always, because we also want to verify correct
1444 * behavior when the data was not recently read into cache.
1446 ASSERT(offset % doi.doi_data_block_size == 0);
1447 if (ztest_random(4) != 0) {
1448 int prefetch = ztest_random(2) ?
1449 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1450 ztest_block_tag_t rbt;
1452 VERIFY(dmu_read(os, lr->lr_foid, offset,
1453 sizeof (rbt), &rbt, prefetch) == 0);
1454 if (rbt.bt_magic == BT_MAGIC) {
1455 ztest_bt_verify(&rbt, os, lr->lr_foid,
1456 offset, gen, txg, crtxg);
1461 * Writes can appear to be newer than the bonus buffer because
1462 * the ztest_get_data() callback does a dmu_read() of the
1463 * open-context data, which may be different than the data
1464 * as it was when the write was generated.
1466 if (zd->zd_zilog->zl_replay) {
1467 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1468 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1473 * Set the bt's gen/txg to the bonus buffer's gen/txg
1474 * so that all of the usual ASSERTs will work.
1476 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1480 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1482 bcopy(data, abuf->b_data, length);
1483 dmu_assign_arcbuf(db, offset, abuf, tx);
1486 (void) ztest_log_write(zd, tx, lr);
1488 dmu_buf_rele(db, FTAG);
1492 ztest_range_unlock(rl);
1493 ztest_object_unlock(zd, lr->lr_foid);
1499 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1501 objset_t *os = zd->zd_os;
1507 byteswap_uint64_array(lr, sizeof (*lr));
1509 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1510 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1513 tx = dmu_tx_create(os);
1515 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1517 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1519 ztest_range_unlock(rl);
1520 ztest_object_unlock(zd, lr->lr_foid);
1524 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1525 lr->lr_length, tx) == 0);
1527 (void) ztest_log_truncate(zd, tx, lr);
1531 ztest_range_unlock(rl);
1532 ztest_object_unlock(zd, lr->lr_foid);
1538 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1540 objset_t *os = zd->zd_os;
1543 ztest_block_tag_t *bbt;
1544 uint64_t txg, lrtxg, crtxg;
1547 byteswap_uint64_array(lr, sizeof (*lr));
1549 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1551 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1553 tx = dmu_tx_create(os);
1554 dmu_tx_hold_bonus(tx, lr->lr_foid);
1556 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1558 dmu_buf_rele(db, FTAG);
1559 ztest_object_unlock(zd, lr->lr_foid);
1563 bbt = ztest_bt_bonus(db);
1564 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1565 crtxg = bbt->bt_crtxg;
1566 lrtxg = lr->lr_common.lrc_txg;
1568 if (zd->zd_zilog->zl_replay) {
1569 ASSERT(lr->lr_size != 0);
1570 ASSERT(lr->lr_mode != 0);
1574 * Randomly change the size and increment the generation.
1576 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1578 lr->lr_mode = bbt->bt_gen + 1;
1583 * Verify that the current bonus buffer is not newer than our txg.
1585 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1586 MAX(txg, lrtxg), crtxg);
1588 dmu_buf_will_dirty(db, tx);
1590 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1591 ASSERT3U(lr->lr_size, <=, db->db_size);
1592 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1593 bbt = ztest_bt_bonus(db);
1595 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1597 dmu_buf_rele(db, FTAG);
1599 (void) ztest_log_setattr(zd, tx, lr);
1603 ztest_object_unlock(zd, lr->lr_foid);
1608 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1609 NULL, /* 0 no such transaction type */
1610 (zil_replay_func_t *)ztest_replay_create, /* TX_CREATE */
1611 NULL, /* TX_MKDIR */
1612 NULL, /* TX_MKXATTR */
1613 NULL, /* TX_SYMLINK */
1614 (zil_replay_func_t *)ztest_replay_remove, /* TX_REMOVE */
1615 NULL, /* TX_RMDIR */
1617 NULL, /* TX_RENAME */
1618 (zil_replay_func_t *)ztest_replay_write, /* TX_WRITE */
1619 (zil_replay_func_t *)ztest_replay_truncate, /* TX_TRUNCATE */
1620 (zil_replay_func_t *)ztest_replay_setattr, /* TX_SETATTR */
1622 NULL, /* TX_CREATE_ACL */
1623 NULL, /* TX_CREATE_ATTR */
1624 NULL, /* TX_CREATE_ACL_ATTR */
1625 NULL, /* TX_MKDIR_ACL */
1626 NULL, /* TX_MKDIR_ATTR */
1627 NULL, /* TX_MKDIR_ACL_ATTR */
1628 NULL, /* TX_WRITE2 */
1632 * ZIL get_data callbacks
1636 ztest_get_done(zgd_t *zgd, int error)
1638 ztest_ds_t *zd = zgd->zgd_private;
1639 uint64_t object = zgd->zgd_rl->rl_object;
1642 dmu_buf_rele(zgd->zgd_db, zgd);
1644 ztest_range_unlock(zgd->zgd_rl);
1645 ztest_object_unlock(zd, object);
1647 if (error == 0 && zgd->zgd_bp)
1648 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1650 umem_free(zgd, sizeof (*zgd));
1654 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1656 ztest_ds_t *zd = arg;
1657 objset_t *os = zd->zd_os;
1658 uint64_t object = lr->lr_foid;
1659 uint64_t offset = lr->lr_offset;
1660 uint64_t size = lr->lr_length;
1661 blkptr_t *bp = &lr->lr_blkptr;
1662 uint64_t txg = lr->lr_common.lrc_txg;
1664 dmu_object_info_t doi;
1669 ztest_object_lock(zd, object, RL_READER);
1670 error = dmu_bonus_hold(os, object, FTAG, &db);
1672 ztest_object_unlock(zd, object);
1676 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1678 if (crtxg == 0 || crtxg > txg) {
1679 dmu_buf_rele(db, FTAG);
1680 ztest_object_unlock(zd, object);
1684 dmu_object_info_from_db(db, &doi);
1685 dmu_buf_rele(db, FTAG);
1688 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1689 zgd->zgd_zilog = zd->zd_zilog;
1690 zgd->zgd_private = zd;
1692 if (buf != NULL) { /* immediate write */
1693 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1696 error = dmu_read(os, object, offset, size, buf,
1697 DMU_READ_NO_PREFETCH);
1700 size = doi.doi_data_block_size;
1702 offset = P2ALIGN(offset, size);
1704 ASSERT(offset < size);
1708 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1711 error = dmu_buf_hold(os, object, offset, zgd, &db,
1712 DMU_READ_NO_PREFETCH);
1718 ASSERT(db->db_offset == offset);
1719 ASSERT(db->db_size == size);
1721 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1722 ztest_get_done, zgd);
1729 ztest_get_done(zgd, error);
1735 ztest_lr_alloc(size_t lrsize, char *name)
1738 size_t namesize = name ? strlen(name) + 1 : 0;
1740 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1743 bcopy(name, lr + lrsize, namesize);
1749 ztest_lr_free(void *lr, size_t lrsize, char *name)
1751 size_t namesize = name ? strlen(name) + 1 : 0;
1753 umem_free(lr, lrsize + namesize);
1757 * Lookup a bunch of objects. Returns the number of objects not found.
1760 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1766 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1768 for (i = 0; i < count; i++, od++) {
1770 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1771 sizeof (uint64_t), 1, &od->od_object);
1773 ASSERT(error == ENOENT);
1774 ASSERT(od->od_object == 0);
1778 ztest_block_tag_t *bbt;
1779 dmu_object_info_t doi;
1781 ASSERT(od->od_object != 0);
1782 ASSERT(missing == 0); /* there should be no gaps */
1784 ztest_object_lock(zd, od->od_object, RL_READER);
1785 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1786 od->od_object, FTAG, &db));
1787 dmu_object_info_from_db(db, &doi);
1788 bbt = ztest_bt_bonus(db);
1789 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1790 od->od_type = doi.doi_type;
1791 od->od_blocksize = doi.doi_data_block_size;
1792 od->od_gen = bbt->bt_gen;
1793 dmu_buf_rele(db, FTAG);
1794 ztest_object_unlock(zd, od->od_object);
1802 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1807 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1809 for (i = 0; i < count; i++, od++) {
1816 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1818 lr->lr_doid = od->od_dir;
1819 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1820 lr->lrz_type = od->od_crtype;
1821 lr->lrz_blocksize = od->od_crblocksize;
1822 lr->lrz_ibshift = ztest_random_ibshift();
1823 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1824 lr->lrz_bonuslen = dmu_bonus_max();
1825 lr->lr_gen = od->od_crgen;
1826 lr->lr_crtime[0] = time(NULL);
1828 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1829 ASSERT(missing == 0);
1833 od->od_object = lr->lr_foid;
1834 od->od_type = od->od_crtype;
1835 od->od_blocksize = od->od_crblocksize;
1836 od->od_gen = od->od_crgen;
1837 ASSERT(od->od_object != 0);
1840 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1847 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
1853 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1857 for (i = count - 1; i >= 0; i--, od--) {
1863 if (od->od_object == 0)
1866 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1868 lr->lr_doid = od->od_dir;
1870 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
1871 ASSERT3U(error, ==, ENOSPC);
1876 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1883 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
1889 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
1891 lr->lr_foid = object;
1892 lr->lr_offset = offset;
1893 lr->lr_length = size;
1895 BP_ZERO(&lr->lr_blkptr);
1897 bcopy(data, lr + 1, size);
1899 error = ztest_replay_write(zd, lr, B_FALSE);
1901 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
1907 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
1912 lr = ztest_lr_alloc(sizeof (*lr), NULL);
1914 lr->lr_foid = object;
1915 lr->lr_offset = offset;
1916 lr->lr_length = size;
1918 error = ztest_replay_truncate(zd, lr, B_FALSE);
1920 ztest_lr_free(lr, sizeof (*lr), NULL);
1926 ztest_setattr(ztest_ds_t *zd, uint64_t object)
1931 lr = ztest_lr_alloc(sizeof (*lr), NULL);
1933 lr->lr_foid = object;
1937 error = ztest_replay_setattr(zd, lr, B_FALSE);
1939 ztest_lr_free(lr, sizeof (*lr), NULL);
1945 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
1947 objset_t *os = zd->zd_os;
1952 txg_wait_synced(dmu_objset_pool(os), 0);
1954 ztest_object_lock(zd, object, RL_READER);
1955 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
1957 tx = dmu_tx_create(os);
1959 dmu_tx_hold_write(tx, object, offset, size);
1961 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1964 dmu_prealloc(os, object, offset, size, tx);
1966 txg_wait_synced(dmu_objset_pool(os), txg);
1968 (void) dmu_free_long_range(os, object, offset, size);
1971 ztest_range_unlock(rl);
1972 ztest_object_unlock(zd, object);
1976 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
1978 ztest_block_tag_t wbt;
1979 dmu_object_info_t doi;
1980 enum ztest_io_type io_type;
1984 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
1985 blocksize = doi.doi_data_block_size;
1986 data = umem_alloc(blocksize, UMEM_NOFAIL);
1989 * Pick an i/o type at random, biased toward writing block tags.
1991 io_type = ztest_random(ZTEST_IO_TYPES);
1992 if (ztest_random(2) == 0)
1993 io_type = ZTEST_IO_WRITE_TAG;
1997 case ZTEST_IO_WRITE_TAG:
1998 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
1999 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2002 case ZTEST_IO_WRITE_PATTERN:
2003 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2004 if (ztest_random(2) == 0) {
2006 * Induce fletcher2 collisions to ensure that
2007 * zio_ddt_collision() detects and resolves them
2008 * when using fletcher2-verify for deduplication.
2010 ((uint64_t *)data)[0] ^= 1ULL << 63;
2011 ((uint64_t *)data)[4] ^= 1ULL << 63;
2013 (void) ztest_write(zd, object, offset, blocksize, data);
2016 case ZTEST_IO_WRITE_ZEROES:
2017 bzero(data, blocksize);
2018 (void) ztest_write(zd, object, offset, blocksize, data);
2021 case ZTEST_IO_TRUNCATE:
2022 (void) ztest_truncate(zd, object, offset, blocksize);
2025 case ZTEST_IO_SETATTR:
2026 (void) ztest_setattr(zd, object);
2032 umem_free(data, blocksize);
2036 * Initialize an object description template.
2039 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2040 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2042 od->od_dir = ZTEST_DIROBJ;
2045 od->od_crtype = type;
2046 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2049 od->od_type = DMU_OT_NONE;
2050 od->od_blocksize = 0;
2053 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2054 tag, (longlong_t)id, (u_longlong_t)index);
2058 * Lookup or create the objects for a test using the od template.
2059 * If the objects do not all exist, or if 'remove' is specified,
2060 * remove any existing objects and create new ones. Otherwise,
2061 * use the existing objects.
2064 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2066 int count = size / sizeof (*od);
2069 mutex_enter(&zd->zd_dirobj_lock);
2070 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2071 (ztest_remove(zd, od, count) != 0 ||
2072 ztest_create(zd, od, count) != 0))
2075 mutex_exit(&zd->zd_dirobj_lock);
2082 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2084 zilog_t *zilog = zd->zd_zilog;
2086 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2089 * Remember the committed values in zd, which is in parent/child
2090 * shared memory. If we die, the next iteration of ztest_run()
2091 * will verify that the log really does contain this record.
2093 mutex_enter(&zilog->zl_lock);
2094 ASSERT(zd->zd_seq <= zilog->zl_commit_lr_seq);
2095 zd->zd_seq = zilog->zl_commit_lr_seq;
2096 mutex_exit(&zilog->zl_lock);
2100 * Verify that we can't destroy an active pool, create an existing pool,
2101 * or create a pool with a bad vdev spec.
2105 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2107 ztest_shared_t *zs = ztest_shared;
2112 * Attempt to create using a bad file.
2114 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2115 VERIFY3U(ENOENT, ==,
2116 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
2117 nvlist_free(nvroot);
2120 * Attempt to create using a bad mirror.
2122 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2123 VERIFY3U(ENOENT, ==,
2124 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
2125 nvlist_free(nvroot);
2128 * Attempt to create an existing pool. It shouldn't matter
2129 * what's in the nvroot; we should fail with EEXIST.
2131 (void) rw_enter(&zs->zs_name_lock, RW_READER);
2132 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2133 VERIFY3U(EEXIST, ==, spa_create(zs->zs_pool, nvroot, NULL, NULL, NULL));
2134 nvlist_free(nvroot);
2135 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG));
2136 VERIFY3U(EBUSY, ==, spa_destroy(zs->zs_pool));
2137 spa_close(spa, FTAG);
2139 (void) rw_exit(&zs->zs_name_lock);
2143 vdev_lookup_by_path(vdev_t *vd, const char *path)
2148 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2151 for (c = 0; c < vd->vdev_children; c++)
2152 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2160 * Find the first available hole which can be used as a top-level.
2163 find_vdev_hole(spa_t *spa)
2165 vdev_t *rvd = spa->spa_root_vdev;
2168 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2170 for (c = 0; c < rvd->vdev_children; c++) {
2171 vdev_t *cvd = rvd->vdev_child[c];
2173 if (cvd->vdev_ishole)
2180 * Verify that vdev_add() works as expected.
2184 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2186 ztest_shared_t *zs = ztest_shared;
2187 spa_t *spa = zs->zs_spa;
2193 mutex_enter(&zs->zs_vdev_lock);
2194 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * zopt_raidz;
2196 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2198 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2201 * If we have slogs then remove them 1/4 of the time.
2203 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2205 * Grab the guid from the head of the log class rotor.
2207 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2209 spa_config_exit(spa, SCL_VDEV, FTAG);
2212 * We have to grab the zs_name_lock as writer to
2213 * prevent a race between removing a slog (dmu_objset_find)
2214 * and destroying a dataset. Removing the slog will
2215 * grab a reference on the dataset which may cause
2216 * dmu_objset_destroy() to fail with EBUSY thus
2217 * leaving the dataset in an inconsistent state.
2219 rw_enter(&ztest_shared->zs_name_lock, RW_WRITER);
2220 error = spa_vdev_remove(spa, guid, B_FALSE);
2221 rw_exit(&ztest_shared->zs_name_lock);
2223 if (error && error != EEXIST)
2224 fatal(0, "spa_vdev_remove() = %d", error);
2226 spa_config_exit(spa, SCL_VDEV, FTAG);
2229 * Make 1/4 of the devices be log devices.
2231 nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0,
2232 ztest_random(4) == 0, zopt_raidz, zs->zs_mirrors, 1);
2234 error = spa_vdev_add(spa, nvroot);
2235 nvlist_free(nvroot);
2237 if (error == ENOSPC)
2238 ztest_record_enospc("spa_vdev_add");
2239 else if (error != 0)
2240 fatal(0, "spa_vdev_add() = %d", error);
2243 mutex_exit(&ztest_shared->zs_vdev_lock);
2247 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2251 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2253 ztest_shared_t *zs = ztest_shared;
2254 spa_t *spa = zs->zs_spa;
2255 vdev_t *rvd = spa->spa_root_vdev;
2256 spa_aux_vdev_t *sav;
2262 path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2264 if (ztest_random(2) == 0) {
2265 sav = &spa->spa_spares;
2266 aux = ZPOOL_CONFIG_SPARES;
2268 sav = &spa->spa_l2cache;
2269 aux = ZPOOL_CONFIG_L2CACHE;
2272 mutex_enter(&zs->zs_vdev_lock);
2274 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2276 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2278 * Pick a random device to remove.
2280 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2283 * Find an unused device we can add.
2285 zs->zs_vdev_aux = 0;
2288 (void) sprintf(path, ztest_aux_template, zopt_dir,
2289 zopt_pool, aux, zs->zs_vdev_aux);
2290 for (c = 0; c < sav->sav_count; c++)
2291 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2294 if (c == sav->sav_count &&
2295 vdev_lookup_by_path(rvd, path) == NULL)
2301 spa_config_exit(spa, SCL_VDEV, FTAG);
2307 nvlist_t *nvroot = make_vdev_root(NULL, aux,
2308 (zopt_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2309 error = spa_vdev_add(spa, nvroot);
2311 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2312 nvlist_free(nvroot);
2315 * Remove an existing device. Sometimes, dirty its
2316 * vdev state first to make sure we handle removal
2317 * of devices that have pending state changes.
2319 if (ztest_random(2) == 0)
2320 (void) vdev_online(spa, guid, 0, NULL);
2322 error = spa_vdev_remove(spa, guid, B_FALSE);
2323 if (error != 0 && error != EBUSY)
2324 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2327 mutex_exit(&zs->zs_vdev_lock);
2329 umem_free(path, MAXPATHLEN);
2333 * split a pool if it has mirror tlvdevs
2337 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2339 ztest_shared_t *zs = ztest_shared;
2340 spa_t *spa = zs->zs_spa;
2341 vdev_t *rvd = spa->spa_root_vdev;
2342 nvlist_t *tree, **child, *config, *split, **schild;
2343 uint_t c, children, schildren = 0, lastlogid = 0;
2346 mutex_enter(&zs->zs_vdev_lock);
2348 /* ensure we have a useable config; mirrors of raidz aren't supported */
2349 if (zs->zs_mirrors < 3 || zopt_raidz > 1) {
2350 mutex_exit(&zs->zs_vdev_lock);
2354 /* clean up the old pool, if any */
2355 (void) spa_destroy("splitp");
2357 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2359 /* generate a config from the existing config */
2360 mutex_enter(&spa->spa_props_lock);
2361 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2363 mutex_exit(&spa->spa_props_lock);
2365 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2368 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2369 for (c = 0; c < children; c++) {
2370 vdev_t *tvd = rvd->vdev_child[c];
2374 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2375 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2377 VERIFY(nvlist_add_string(schild[schildren],
2378 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2379 VERIFY(nvlist_add_uint64(schild[schildren],
2380 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2382 lastlogid = schildren;
2387 VERIFY(nvlist_lookup_nvlist_array(child[c],
2388 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2389 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2392 /* OK, create a config that can be used to split */
2393 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2394 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2395 VDEV_TYPE_ROOT) == 0);
2396 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2397 lastlogid != 0 ? lastlogid : schildren) == 0);
2399 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2400 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2402 for (c = 0; c < schildren; c++)
2403 nvlist_free(schild[c]);
2407 spa_config_exit(spa, SCL_VDEV, FTAG);
2409 (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
2410 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2411 (void) rw_exit(&zs->zs_name_lock);
2413 nvlist_free(config);
2416 (void) printf("successful split - results:\n");
2417 mutex_enter(&spa_namespace_lock);
2418 show_pool_stats(spa);
2419 show_pool_stats(spa_lookup("splitp"));
2420 mutex_exit(&spa_namespace_lock);
2424 mutex_exit(&zs->zs_vdev_lock);
2429 * Verify that we can attach and detach devices.
2433 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2435 ztest_shared_t *zs = ztest_shared;
2436 spa_t *spa = zs->zs_spa;
2437 spa_aux_vdev_t *sav = &spa->spa_spares;
2438 vdev_t *rvd = spa->spa_root_vdev;
2439 vdev_t *oldvd, *newvd, *pvd;
2443 uint64_t ashift = ztest_get_ashift();
2444 uint64_t oldguid, pguid;
2445 size_t oldsize, newsize;
2446 char *oldpath, *newpath;
2448 int oldvd_has_siblings = B_FALSE;
2449 int newvd_is_spare = B_FALSE;
2451 int error, expected_error;
2453 oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2454 newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2456 mutex_enter(&zs->zs_vdev_lock);
2457 leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz;
2459 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2462 * Decide whether to do an attach or a replace.
2464 replacing = ztest_random(2);
2467 * Pick a random top-level vdev.
2469 top = ztest_random_vdev_top(spa, B_TRUE);
2472 * Pick a random leaf within it.
2474 leaf = ztest_random(leaves);
2479 oldvd = rvd->vdev_child[top];
2480 if (zs->zs_mirrors >= 1) {
2481 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2482 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2483 oldvd = oldvd->vdev_child[leaf / zopt_raidz];
2485 if (zopt_raidz > 1) {
2486 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2487 ASSERT(oldvd->vdev_children == zopt_raidz);
2488 oldvd = oldvd->vdev_child[leaf % zopt_raidz];
2492 * If we're already doing an attach or replace, oldvd may be a
2493 * mirror vdev -- in which case, pick a random child.
2495 while (oldvd->vdev_children != 0) {
2496 oldvd_has_siblings = B_TRUE;
2497 ASSERT(oldvd->vdev_children >= 2);
2498 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2501 oldguid = oldvd->vdev_guid;
2502 oldsize = vdev_get_min_asize(oldvd);
2503 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2504 (void) strcpy(oldpath, oldvd->vdev_path);
2505 pvd = oldvd->vdev_parent;
2506 pguid = pvd->vdev_guid;
2509 * If oldvd has siblings, then half of the time, detach it.
2511 if (oldvd_has_siblings && ztest_random(2) == 0) {
2512 spa_config_exit(spa, SCL_VDEV, FTAG);
2513 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2514 if (error != 0 && error != ENODEV && error != EBUSY &&
2516 fatal(0, "detach (%s) returned %d", oldpath, error);
2521 * For the new vdev, choose with equal probability between the two
2522 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2524 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2525 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2526 newvd_is_spare = B_TRUE;
2527 (void) strcpy(newpath, newvd->vdev_path);
2529 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2530 zopt_dir, zopt_pool, top * leaves + leaf);
2531 if (ztest_random(2) == 0)
2532 newpath[strlen(newpath) - 1] = 'b';
2533 newvd = vdev_lookup_by_path(rvd, newpath);
2537 newsize = vdev_get_min_asize(newvd);
2540 * Make newsize a little bigger or smaller than oldsize.
2541 * If it's smaller, the attach should fail.
2542 * If it's larger, and we're doing a replace,
2543 * we should get dynamic LUN growth when we're done.
2545 newsize = 10 * oldsize / (9 + ztest_random(3));
2549 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2550 * unless it's a replace; in that case any non-replacing parent is OK.
2552 * If newvd is already part of the pool, it should fail with EBUSY.
2554 * If newvd is too small, it should fail with EOVERFLOW.
2556 if (pvd->vdev_ops != &vdev_mirror_ops &&
2557 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2558 pvd->vdev_ops == &vdev_replacing_ops ||
2559 pvd->vdev_ops == &vdev_spare_ops))
2560 expected_error = ENOTSUP;
2561 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2562 expected_error = ENOTSUP;
2563 else if (newvd == oldvd)
2564 expected_error = replacing ? 0 : EBUSY;
2565 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2566 expected_error = EBUSY;
2567 else if (newsize < oldsize)
2568 expected_error = EOVERFLOW;
2569 else if (ashift > oldvd->vdev_top->vdev_ashift)
2570 expected_error = EDOM;
2574 spa_config_exit(spa, SCL_VDEV, FTAG);
2577 * Build the nvlist describing newpath.
2579 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0,
2580 ashift, 0, 0, 0, 1);
2582 error = spa_vdev_attach(spa, oldguid, root, replacing);
2587 * If our parent was the replacing vdev, but the replace completed,
2588 * then instead of failing with ENOTSUP we may either succeed,
2589 * fail with ENODEV, or fail with EOVERFLOW.
2591 if (expected_error == ENOTSUP &&
2592 (error == 0 || error == ENODEV || error == EOVERFLOW))
2593 expected_error = error;
2596 * If someone grew the LUN, the replacement may be too small.
2598 if (error == EOVERFLOW || error == EBUSY)
2599 expected_error = error;
2601 /* XXX workaround 6690467 */
2602 if (error != expected_error && expected_error != EBUSY) {
2603 fatal(0, "attach (%s %llu, %s %llu, %d) "
2604 "returned %d, expected %d",
2605 oldpath, (longlong_t)oldsize, newpath,
2606 (longlong_t)newsize, replacing, error, expected_error);
2609 mutex_exit(&zs->zs_vdev_lock);
2611 umem_free(oldpath, MAXPATHLEN);
2612 umem_free(newpath, MAXPATHLEN);
2616 * Callback function which expands the physical size of the vdev.
2619 grow_vdev(vdev_t *vd, void *arg)
2621 ASSERTV(spa_t *spa = vd->vdev_spa);
2622 size_t *newsize = arg;
2626 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2627 ASSERT(vd->vdev_ops->vdev_op_leaf);
2629 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2632 fsize = lseek(fd, 0, SEEK_END);
2633 VERIFY(ftruncate(fd, *newsize) == 0);
2635 if (zopt_verbose >= 6) {
2636 (void) printf("%s grew from %lu to %lu bytes\n",
2637 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2644 * Callback function which expands a given vdev by calling vdev_online().
2648 online_vdev(vdev_t *vd, void *arg)
2650 spa_t *spa = vd->vdev_spa;
2651 vdev_t *tvd = vd->vdev_top;
2652 uint64_t guid = vd->vdev_guid;
2653 uint64_t generation = spa->spa_config_generation + 1;
2654 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2657 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2658 ASSERT(vd->vdev_ops->vdev_op_leaf);
2660 /* Calling vdev_online will initialize the new metaslabs */
2661 spa_config_exit(spa, SCL_STATE, spa);
2662 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2663 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2666 * If vdev_online returned an error or the underlying vdev_open
2667 * failed then we abort the expand. The only way to know that
2668 * vdev_open fails is by checking the returned newstate.
2670 if (error || newstate != VDEV_STATE_HEALTHY) {
2671 if (zopt_verbose >= 5) {
2672 (void) printf("Unable to expand vdev, state %llu, "
2673 "error %d\n", (u_longlong_t)newstate, error);
2677 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2680 * Since we dropped the lock we need to ensure that we're
2681 * still talking to the original vdev. It's possible this
2682 * vdev may have been detached/replaced while we were
2683 * trying to online it.
2685 if (generation != spa->spa_config_generation) {
2686 if (zopt_verbose >= 5) {
2687 (void) printf("vdev configuration has changed, "
2688 "guid %llu, state %llu, expected gen %llu, "
2691 (u_longlong_t)tvd->vdev_state,
2692 (u_longlong_t)generation,
2693 (u_longlong_t)spa->spa_config_generation);
2701 * Traverse the vdev tree calling the supplied function.
2702 * We continue to walk the tree until we either have walked all
2703 * children or we receive a non-NULL return from the callback.
2704 * If a NULL callback is passed, then we just return back the first
2705 * leaf vdev we encounter.
2708 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2712 if (vd->vdev_ops->vdev_op_leaf) {
2716 return (func(vd, arg));
2719 for (c = 0; c < vd->vdev_children; c++) {
2720 vdev_t *cvd = vd->vdev_child[c];
2721 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
2728 * Verify that dynamic LUN growth works as expected.
2732 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
2734 ztest_shared_t *zs = ztest_shared;
2735 spa_t *spa = zs->zs_spa;
2737 metaslab_class_t *mc;
2738 metaslab_group_t *mg;
2739 size_t psize, newsize;
2741 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
2743 mutex_enter(&zs->zs_vdev_lock);
2744 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2746 top = ztest_random_vdev_top(spa, B_TRUE);
2748 tvd = spa->spa_root_vdev->vdev_child[top];
2751 old_ms_count = tvd->vdev_ms_count;
2752 old_class_space = metaslab_class_get_space(mc);
2755 * Determine the size of the first leaf vdev associated with
2756 * our top-level device.
2758 vd = vdev_walk_tree(tvd, NULL, NULL);
2759 ASSERT3P(vd, !=, NULL);
2760 ASSERT(vd->vdev_ops->vdev_op_leaf);
2762 psize = vd->vdev_psize;
2765 * We only try to expand the vdev if it's healthy, less than 4x its
2766 * original size, and it has a valid psize.
2768 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
2769 psize == 0 || psize >= 4 * zopt_vdev_size) {
2770 spa_config_exit(spa, SCL_STATE, spa);
2771 mutex_exit(&zs->zs_vdev_lock);
2775 newsize = psize + psize / 8;
2776 ASSERT3U(newsize, >, psize);
2778 if (zopt_verbose >= 6) {
2779 (void) printf("Expanding LUN %s from %lu to %lu\n",
2780 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
2784 * Growing the vdev is a two step process:
2785 * 1). expand the physical size (i.e. relabel)
2786 * 2). online the vdev to create the new metaslabs
2788 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
2789 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
2790 tvd->vdev_state != VDEV_STATE_HEALTHY) {
2791 if (zopt_verbose >= 5) {
2792 (void) printf("Could not expand LUN because "
2793 "the vdev configuration changed.\n");
2795 spa_config_exit(spa, SCL_STATE, spa);
2796 mutex_exit(&zs->zs_vdev_lock);
2800 spa_config_exit(spa, SCL_STATE, spa);
2803 * Expanding the LUN will update the config asynchronously,
2804 * thus we must wait for the async thread to complete any
2805 * pending tasks before proceeding.
2809 mutex_enter(&spa->spa_async_lock);
2810 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
2811 mutex_exit(&spa->spa_async_lock);
2814 txg_wait_synced(spa_get_dsl(spa), 0);
2815 (void) poll(NULL, 0, 100);
2818 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2820 tvd = spa->spa_root_vdev->vdev_child[top];
2821 new_ms_count = tvd->vdev_ms_count;
2822 new_class_space = metaslab_class_get_space(mc);
2824 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
2825 if (zopt_verbose >= 5) {
2826 (void) printf("Could not verify LUN expansion due to "
2827 "intervening vdev offline or remove.\n");
2829 spa_config_exit(spa, SCL_STATE, spa);
2830 mutex_exit(&zs->zs_vdev_lock);
2835 * Make sure we were able to grow the vdev.
2837 if (new_ms_count <= old_ms_count)
2838 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
2839 old_ms_count, new_ms_count);
2842 * Make sure we were able to grow the pool.
2844 if (new_class_space <= old_class_space)
2845 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
2846 old_class_space, new_class_space);
2848 if (zopt_verbose >= 5) {
2849 char oldnumbuf[6], newnumbuf[6];
2851 nicenum(old_class_space, oldnumbuf);
2852 nicenum(new_class_space, newnumbuf);
2853 (void) printf("%s grew from %s to %s\n",
2854 spa->spa_name, oldnumbuf, newnumbuf);
2857 spa_config_exit(spa, SCL_STATE, spa);
2858 mutex_exit(&zs->zs_vdev_lock);
2862 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
2866 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
2869 * Create the objects common to all ztest datasets.
2871 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
2872 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
2876 ztest_dataset_create(char *dsname)
2878 uint64_t zilset = ztest_random(100);
2879 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
2880 ztest_objset_create_cb, NULL);
2882 if (err || zilset < 80)
2885 (void) printf("Setting dataset %s to sync always\n", dsname);
2886 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
2887 ZFS_SYNC_ALWAYS, B_FALSE));
2892 ztest_objset_destroy_cb(const char *name, void *arg)
2895 dmu_object_info_t doi;
2899 * Verify that the dataset contains a directory object.
2901 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
2902 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
2903 if (error != ENOENT) {
2904 /* We could have crashed in the middle of destroying it */
2905 ASSERT3U(error, ==, 0);
2906 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
2907 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
2909 dmu_objset_rele(os, FTAG);
2912 * Destroy the dataset.
2914 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
2919 ztest_snapshot_create(char *osname, uint64_t id)
2921 char snapname[MAXNAMELEN];
2924 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
2927 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
2928 NULL, NULL, B_FALSE, B_FALSE, -1);
2929 if (error == ENOSPC) {
2930 ztest_record_enospc(FTAG);
2933 if (error != 0 && error != EEXIST)
2934 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
2939 ztest_snapshot_destroy(char *osname, uint64_t id)
2941 char snapname[MAXNAMELEN];
2944 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
2947 error = dmu_objset_destroy(snapname, B_FALSE);
2948 if (error != 0 && error != ENOENT)
2949 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
2955 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
2957 ztest_shared_t *zs = ztest_shared;
2966 zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
2967 name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
2969 (void) rw_enter(&zs->zs_name_lock, RW_READER);
2971 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
2972 zs->zs_pool, (u_longlong_t)id);
2975 * If this dataset exists from a previous run, process its replay log
2976 * half of the time. If we don't replay it, then dmu_objset_destroy()
2977 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
2979 if (ztest_random(2) == 0 &&
2980 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
2981 ztest_zd_init(zdtmp, os);
2982 zil_replay(os, zdtmp, ztest_replay_vector);
2983 ztest_zd_fini(zdtmp);
2984 dmu_objset_disown(os, FTAG);
2988 * There may be an old instance of the dataset we're about to
2989 * create lying around from a previous run. If so, destroy it
2990 * and all of its snapshots.
2992 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
2993 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2996 * Verify that the destroyed dataset is no longer in the namespace.
2998 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3001 * Verify that we can create a new dataset.
3003 error = ztest_dataset_create(name);
3005 if (error == ENOSPC) {
3006 ztest_record_enospc(FTAG);
3009 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3013 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3015 ztest_zd_init(zdtmp, os);
3018 * Open the intent log for it.
3020 zilog = zil_open(os, ztest_get_data);
3023 * Put some objects in there, do a little I/O to them,
3024 * and randomly take a couple of snapshots along the way.
3026 iters = ztest_random(5);
3027 for (i = 0; i < iters; i++) {
3028 ztest_dmu_object_alloc_free(zdtmp, id);
3029 if (ztest_random(iters) == 0)
3030 (void) ztest_snapshot_create(name, i);
3034 * Verify that we cannot create an existing dataset.
3036 VERIFY3U(EEXIST, ==,
3037 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3040 * Verify that we can hold an objset that is also owned.
3042 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3043 dmu_objset_rele(os2, FTAG);
3046 * Verify that we cannot own an objset that is already owned.
3049 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3052 dmu_objset_disown(os, FTAG);
3053 ztest_zd_fini(zdtmp);
3055 (void) rw_exit(&zs->zs_name_lock);
3057 umem_free(name, MAXNAMELEN);
3058 umem_free(zdtmp, sizeof (ztest_ds_t));
3062 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3065 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3067 ztest_shared_t *zs = ztest_shared;
3069 (void) rw_enter(&zs->zs_name_lock, RW_READER);
3070 (void) ztest_snapshot_destroy(zd->zd_name, id);
3071 (void) ztest_snapshot_create(zd->zd_name, id);
3072 (void) rw_exit(&zs->zs_name_lock);
3076 * Cleanup non-standard snapshots and clones.
3079 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3088 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3089 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3090 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3091 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3092 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3094 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3095 osname, (u_longlong_t)id);
3096 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3097 osname, (u_longlong_t)id);
3098 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3099 clone1name, (u_longlong_t)id);
3100 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3101 osname, (u_longlong_t)id);
3102 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3103 clone1name, (u_longlong_t)id);
3105 error = dmu_objset_destroy(clone2name, B_FALSE);
3106 if (error && error != ENOENT)
3107 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
3108 error = dmu_objset_destroy(snap3name, B_FALSE);
3109 if (error && error != ENOENT)
3110 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
3111 error = dmu_objset_destroy(snap2name, B_FALSE);
3112 if (error && error != ENOENT)
3113 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
3114 error = dmu_objset_destroy(clone1name, B_FALSE);
3115 if (error && error != ENOENT)
3116 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
3117 error = dmu_objset_destroy(snap1name, B_FALSE);
3118 if (error && error != ENOENT)
3119 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
3121 umem_free(snap1name, MAXNAMELEN);
3122 umem_free(clone1name, MAXNAMELEN);
3123 umem_free(snap2name, MAXNAMELEN);
3124 umem_free(clone2name, MAXNAMELEN);
3125 umem_free(snap3name, MAXNAMELEN);
3129 * Verify dsl_dataset_promote handles EBUSY
3132 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3134 ztest_shared_t *zs = ztest_shared;
3142 char *osname = zd->zd_name;
3145 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3146 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3147 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3148 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3149 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3151 (void) rw_enter(&zs->zs_name_lock, RW_READER);
3153 ztest_dsl_dataset_cleanup(osname, id);
3155 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3156 osname, (u_longlong_t)id);
3157 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3158 osname, (u_longlong_t)id);
3159 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3160 clone1name, (u_longlong_t)id);
3161 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3162 osname, (u_longlong_t)id);
3163 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3164 clone1name, (u_longlong_t)id);
3166 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
3167 NULL, NULL, B_FALSE, B_FALSE, -1);
3168 if (error && error != EEXIST) {
3169 if (error == ENOSPC) {
3170 ztest_record_enospc(FTAG);
3173 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3176 error = dmu_objset_hold(snap1name, FTAG, &clone);
3178 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
3180 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
3181 dmu_objset_rele(clone, FTAG);
3183 if (error == ENOSPC) {
3184 ztest_record_enospc(FTAG);
3187 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3190 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
3191 NULL, NULL, B_FALSE, B_FALSE, -1);
3192 if (error && error != EEXIST) {
3193 if (error == ENOSPC) {
3194 ztest_record_enospc(FTAG);
3197 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3200 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
3201 NULL, NULL, B_FALSE, B_FALSE, -1);
3202 if (error && error != EEXIST) {
3203 if (error == ENOSPC) {
3204 ztest_record_enospc(FTAG);
3207 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3210 error = dmu_objset_hold(snap3name, FTAG, &clone);
3212 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3214 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
3215 dmu_objset_rele(clone, FTAG);
3217 if (error == ENOSPC) {
3218 ztest_record_enospc(FTAG);
3221 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3224 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
3226 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
3227 error = dsl_dataset_promote(clone2name, NULL);
3229 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3231 dsl_dataset_disown(ds, FTAG);
3234 ztest_dsl_dataset_cleanup(osname, id);
3236 (void) rw_exit(&zs->zs_name_lock);
3238 umem_free(snap1name, MAXNAMELEN);
3239 umem_free(clone1name, MAXNAMELEN);
3240 umem_free(snap2name, MAXNAMELEN);
3241 umem_free(clone2name, MAXNAMELEN);
3242 umem_free(snap3name, MAXNAMELEN);
3245 #undef OD_ARRAY_SIZE
3246 #define OD_ARRAY_SIZE 4
3249 * Verify that dmu_object_{alloc,free} work as expected.
3252 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3259 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3260 od = umem_alloc(size, UMEM_NOFAIL);
3261 batchsize = OD_ARRAY_SIZE;
3263 for (b = 0; b < batchsize; b++)
3264 ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3267 * Destroy the previous batch of objects, create a new batch,
3268 * and do some I/O on the new objects.
3270 if (ztest_object_init(zd, od, size, B_TRUE) != 0)
3273 while (ztest_random(4 * batchsize) != 0)
3274 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3275 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3277 umem_free(od, size);
3280 #undef OD_ARRAY_SIZE
3281 #define OD_ARRAY_SIZE 2
3284 * Verify that dmu_{read,write} work as expected.
3287 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3292 objset_t *os = zd->zd_os;
3293 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3294 od = umem_alloc(size, UMEM_NOFAIL);
3296 int i, freeit, error;
3298 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3299 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3300 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3301 uint64_t regions = 997;
3302 uint64_t stride = 123456789ULL;
3303 uint64_t width = 40;
3304 int free_percent = 5;
3307 * This test uses two objects, packobj and bigobj, that are always
3308 * updated together (i.e. in the same tx) so that their contents are
3309 * in sync and can be compared. Their contents relate to each other
3310 * in a simple way: packobj is a dense array of 'bufwad' structures,
3311 * while bigobj is a sparse array of the same bufwads. Specifically,
3312 * for any index n, there are three bufwads that should be identical:
3314 * packobj, at offset n * sizeof (bufwad_t)
3315 * bigobj, at the head of the nth chunk
3316 * bigobj, at the tail of the nth chunk
3318 * The chunk size is arbitrary. It doesn't have to be a power of two,
3319 * and it doesn't have any relation to the object blocksize.
3320 * The only requirement is that it can hold at least two bufwads.
3322 * Normally, we write the bufwad to each of these locations.
3323 * However, free_percent of the time we instead write zeroes to
3324 * packobj and perform a dmu_free_range() on bigobj. By comparing
3325 * bigobj to packobj, we can verify that the DMU is correctly
3326 * tracking which parts of an object are allocated and free,
3327 * and that the contents of the allocated blocks are correct.
3331 * Read the directory info. If it's the first time, set things up.
3333 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3334 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3336 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3337 umem_free(od, size);
3341 bigobj = od[0].od_object;
3342 packobj = od[1].od_object;
3343 chunksize = od[0].od_gen;
3344 ASSERT(chunksize == od[1].od_gen);
3347 * Prefetch a random chunk of the big object.
3348 * Our aim here is to get some async reads in flight
3349 * for blocks that we may free below; the DMU should
3350 * handle this race correctly.
3352 n = ztest_random(regions) * stride + ztest_random(width);
3353 s = 1 + ztest_random(2 * width - 1);
3354 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3357 * Pick a random index and compute the offsets into packobj and bigobj.
3359 n = ztest_random(regions) * stride + ztest_random(width);
3360 s = 1 + ztest_random(width - 1);
3362 packoff = n * sizeof (bufwad_t);
3363 packsize = s * sizeof (bufwad_t);
3365 bigoff = n * chunksize;
3366 bigsize = s * chunksize;
3368 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3369 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3372 * free_percent of the time, free a range of bigobj rather than
3375 freeit = (ztest_random(100) < free_percent);
3378 * Read the current contents of our objects.
3380 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3382 ASSERT3U(error, ==, 0);
3383 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3385 ASSERT3U(error, ==, 0);
3388 * Get a tx for the mods to both packobj and bigobj.
3390 tx = dmu_tx_create(os);
3392 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3395 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3397 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3399 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3401 umem_free(packbuf, packsize);
3402 umem_free(bigbuf, bigsize);
3403 umem_free(od, size);
3407 dmu_object_set_checksum(os, bigobj,
3408 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3410 dmu_object_set_compress(os, bigobj,
3411 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3414 * For each index from n to n + s, verify that the existing bufwad
3415 * in packobj matches the bufwads at the head and tail of the
3416 * corresponding chunk in bigobj. Then update all three bufwads
3417 * with the new values we want to write out.
3419 for (i = 0; i < s; i++) {
3421 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3423 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3425 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3427 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3428 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3430 if (pack->bw_txg > txg)
3431 fatal(0, "future leak: got %llx, open txg is %llx",
3434 if (pack->bw_data != 0 && pack->bw_index != n + i)
3435 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3436 pack->bw_index, n, i);
3438 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3439 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3441 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3442 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3445 bzero(pack, sizeof (bufwad_t));
3447 pack->bw_index = n + i;
3449 pack->bw_data = 1 + ztest_random(-2ULL);
3456 * We've verified all the old bufwads, and made new ones.
3457 * Now write them out.
3459 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3462 if (zopt_verbose >= 7) {
3463 (void) printf("freeing offset %llx size %llx"
3465 (u_longlong_t)bigoff,
3466 (u_longlong_t)bigsize,
3469 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3471 if (zopt_verbose >= 7) {
3472 (void) printf("writing offset %llx size %llx"
3474 (u_longlong_t)bigoff,
3475 (u_longlong_t)bigsize,
3478 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3484 * Sanity check the stuff we just wrote.
3487 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3488 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3490 VERIFY(0 == dmu_read(os, packobj, packoff,
3491 packsize, packcheck, DMU_READ_PREFETCH));
3492 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3493 bigsize, bigcheck, DMU_READ_PREFETCH));
3495 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3496 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3498 umem_free(packcheck, packsize);
3499 umem_free(bigcheck, bigsize);
3502 umem_free(packbuf, packsize);
3503 umem_free(bigbuf, bigsize);
3504 umem_free(od, size);
3508 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3509 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3517 * For each index from n to n + s, verify that the existing bufwad
3518 * in packobj matches the bufwads at the head and tail of the
3519 * corresponding chunk in bigobj. Then update all three bufwads
3520 * with the new values we want to write out.
3522 for (i = 0; i < s; i++) {
3524 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3526 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3528 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3530 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3531 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3533 if (pack->bw_txg > txg)
3534 fatal(0, "future leak: got %llx, open txg is %llx",
3537 if (pack->bw_data != 0 && pack->bw_index != n + i)
3538 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3539 pack->bw_index, n, i);
3541 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3542 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3544 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3545 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3547 pack->bw_index = n + i;
3549 pack->bw_data = 1 + ztest_random(-2ULL);
3556 #undef OD_ARRAY_SIZE
3557 #define OD_ARRAY_SIZE 2
3560 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3562 objset_t *os = zd->zd_os;
3569 bufwad_t *packbuf, *bigbuf;
3570 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3571 uint64_t blocksize = ztest_random_blocksize();
3572 uint64_t chunksize = blocksize;
3573 uint64_t regions = 997;
3574 uint64_t stride = 123456789ULL;
3576 dmu_buf_t *bonus_db;
3577 arc_buf_t **bigbuf_arcbufs;
3578 dmu_object_info_t doi;
3580 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3581 od = umem_alloc(size, UMEM_NOFAIL);
3584 * This test uses two objects, packobj and bigobj, that are always
3585 * updated together (i.e. in the same tx) so that their contents are
3586 * in sync and can be compared. Their contents relate to each other
3587 * in a simple way: packobj is a dense array of 'bufwad' structures,
3588 * while bigobj is a sparse array of the same bufwads. Specifically,
3589 * for any index n, there are three bufwads that should be identical:
3591 * packobj, at offset n * sizeof (bufwad_t)
3592 * bigobj, at the head of the nth chunk
3593 * bigobj, at the tail of the nth chunk
3595 * The chunk size is set equal to bigobj block size so that
3596 * dmu_assign_arcbuf() can be tested for object updates.
3600 * Read the directory info. If it's the first time, set things up.
3602 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3603 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3606 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3607 umem_free(od, size);
3611 bigobj = od[0].od_object;
3612 packobj = od[1].od_object;
3613 blocksize = od[0].od_blocksize;
3614 chunksize = blocksize;
3615 ASSERT(chunksize == od[1].od_gen);
3617 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3618 VERIFY(ISP2(doi.doi_data_block_size));
3619 VERIFY(chunksize == doi.doi_data_block_size);
3620 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3623 * Pick a random index and compute the offsets into packobj and bigobj.
3625 n = ztest_random(regions) * stride + ztest_random(width);
3626 s = 1 + ztest_random(width - 1);
3628 packoff = n * sizeof (bufwad_t);
3629 packsize = s * sizeof (bufwad_t);
3631 bigoff = n * chunksize;
3632 bigsize = s * chunksize;
3634 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3635 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3637 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3639 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3642 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3643 * Iteration 1 test zcopy to already referenced dbufs.
3644 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3645 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3646 * Iteration 4 test zcopy when dbuf is no longer dirty.
3647 * Iteration 5 test zcopy when it can't be done.
3648 * Iteration 6 one more zcopy write.
3650 for (i = 0; i < 7; i++) {
3655 * In iteration 5 (i == 5) use arcbufs
3656 * that don't match bigobj blksz to test
3657 * dmu_assign_arcbuf() when it can't directly
3658 * assign an arcbuf to a dbuf.
3660 for (j = 0; j < s; j++) {
3663 dmu_request_arcbuf(bonus_db, chunksize);
3665 bigbuf_arcbufs[2 * j] =
3666 dmu_request_arcbuf(bonus_db, chunksize / 2);
3667 bigbuf_arcbufs[2 * j + 1] =
3668 dmu_request_arcbuf(bonus_db, chunksize / 2);
3673 * Get a tx for the mods to both packobj and bigobj.
3675 tx = dmu_tx_create(os);
3677 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3678 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3680 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3682 umem_free(packbuf, packsize);
3683 umem_free(bigbuf, bigsize);
3684 for (j = 0; j < s; j++) {
3686 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3689 bigbuf_arcbufs[2 * j]);
3691 bigbuf_arcbufs[2 * j + 1]);
3694 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3695 umem_free(od, size);
3696 dmu_buf_rele(bonus_db, FTAG);
3701 * 50% of the time don't read objects in the 1st iteration to
3702 * test dmu_assign_arcbuf() for the case when there're no
3703 * existing dbufs for the specified offsets.
3705 if (i != 0 || ztest_random(2) != 0) {
3706 error = dmu_read(os, packobj, packoff,
3707 packsize, packbuf, DMU_READ_PREFETCH);
3708 ASSERT3U(error, ==, 0);
3709 error = dmu_read(os, bigobj, bigoff, bigsize,
3710 bigbuf, DMU_READ_PREFETCH);
3711 ASSERT3U(error, ==, 0);
3713 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3717 * We've verified all the old bufwads, and made new ones.
3718 * Now write them out.
3720 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3721 if (zopt_verbose >= 7) {
3722 (void) printf("writing offset %llx size %llx"
3724 (u_longlong_t)bigoff,
3725 (u_longlong_t)bigsize,
3728 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3731 bcopy((caddr_t)bigbuf + (off - bigoff),
3732 bigbuf_arcbufs[j]->b_data, chunksize);
3734 bcopy((caddr_t)bigbuf + (off - bigoff),
3735 bigbuf_arcbufs[2 * j]->b_data,
3737 bcopy((caddr_t)bigbuf + (off - bigoff) +
3739 bigbuf_arcbufs[2 * j + 1]->b_data,
3744 VERIFY(dmu_buf_hold(os, bigobj, off,
3745 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3748 dmu_assign_arcbuf(bonus_db, off,
3749 bigbuf_arcbufs[j], tx);
3751 dmu_assign_arcbuf(bonus_db, off,
3752 bigbuf_arcbufs[2 * j], tx);
3753 dmu_assign_arcbuf(bonus_db,
3754 off + chunksize / 2,
3755 bigbuf_arcbufs[2 * j + 1], tx);
3758 dmu_buf_rele(dbt, FTAG);
3764 * Sanity check the stuff we just wrote.
3767 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3768 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3770 VERIFY(0 == dmu_read(os, packobj, packoff,
3771 packsize, packcheck, DMU_READ_PREFETCH));
3772 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3773 bigsize, bigcheck, DMU_READ_PREFETCH));
3775 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3776 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3778 umem_free(packcheck, packsize);
3779 umem_free(bigcheck, bigsize);
3782 txg_wait_open(dmu_objset_pool(os), 0);
3783 } else if (i == 3) {
3784 txg_wait_synced(dmu_objset_pool(os), 0);
3788 dmu_buf_rele(bonus_db, FTAG);
3789 umem_free(packbuf, packsize);
3790 umem_free(bigbuf, bigsize);
3791 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3792 umem_free(od, size);
3797 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3801 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
3802 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3803 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3806 * Have multiple threads write to large offsets in an object
3807 * to verify that parallel writes to an object -- even to the
3808 * same blocks within the object -- doesn't cause any trouble.
3810 ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
3812 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
3815 while (ztest_random(10) != 0)
3816 ztest_io(zd, od->od_object, offset);
3818 umem_free(od, sizeof(ztest_od_t));
3822 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
3825 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
3826 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3827 uint64_t count = ztest_random(20) + 1;
3828 uint64_t blocksize = ztest_random_blocksize();
3831 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
3833 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3835 if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) {
3836 umem_free(od, sizeof(ztest_od_t));
3840 if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
3841 umem_free(od, sizeof(ztest_od_t));
3845 ztest_prealloc(zd, od->od_object, offset, count * blocksize);
3847 data = umem_zalloc(blocksize, UMEM_NOFAIL);
3849 while (ztest_random(count) != 0) {
3850 uint64_t randoff = offset + (ztest_random(count) * blocksize);
3851 if (ztest_write(zd, od->od_object, randoff, blocksize,
3854 while (ztest_random(4) != 0)
3855 ztest_io(zd, od->od_object, randoff);
3858 umem_free(data, blocksize);
3859 umem_free(od, sizeof(ztest_od_t));
3863 * Verify that zap_{create,destroy,add,remove,update} work as expected.
3865 #define ZTEST_ZAP_MIN_INTS 1
3866 #define ZTEST_ZAP_MAX_INTS 4
3867 #define ZTEST_ZAP_MAX_PROPS 1000
3870 ztest_zap(ztest_ds_t *zd, uint64_t id)
3872 objset_t *os = zd->zd_os;
3875 uint64_t txg, last_txg;
3876 uint64_t value[ZTEST_ZAP_MAX_INTS];
3877 uint64_t zl_ints, zl_intsize, prop;
3880 char propname[100], txgname[100];
3882 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3884 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
3885 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3887 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
3888 !ztest_random(2)) != 0)
3891 object = od->od_object;
3894 * Generate a known hash collision, and verify that
3895 * we can lookup and remove both entries.
3897 tx = dmu_tx_create(os);
3898 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3899 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3902 for (i = 0; i < 2; i++) {
3904 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
3907 for (i = 0; i < 2; i++) {
3908 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3909 sizeof (uint64_t), 1, &value[i], tx));
3911 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3912 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3913 ASSERT3U(zl_ints, ==, 1);
3915 for (i = 0; i < 2; i++) {
3916 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
3921 * Generate a buch of random entries.
3923 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3925 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3926 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3927 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3928 bzero(value, sizeof (value));
3932 * If these zap entries already exist, validate their contents.
3934 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3936 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3937 ASSERT3U(zl_ints, ==, 1);
3939 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
3940 zl_ints, &last_txg) == 0);
3942 VERIFY(zap_length(os, object, propname, &zl_intsize,
3945 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3946 ASSERT3U(zl_ints, ==, ints);
3948 VERIFY(zap_lookup(os, object, propname, zl_intsize,
3949 zl_ints, value) == 0);
3951 for (i = 0; i < ints; i++) {
3952 ASSERT3U(value[i], ==, last_txg + object + i);
3955 ASSERT3U(error, ==, ENOENT);
3959 * Atomically update two entries in our zap object.
3960 * The first is named txg_%llu, and contains the txg
3961 * in which the property was last updated. The second
3962 * is named prop_%llu, and the nth element of its value
3963 * should be txg + object + n.
3965 tx = dmu_tx_create(os);
3966 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3967 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3972 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
3974 for (i = 0; i < ints; i++)
3975 value[i] = txg + object + i;
3977 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
3979 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
3985 * Remove a random pair of entries.
3987 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3988 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3989 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3991 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3993 if (error == ENOENT)
3996 ASSERT3U(error, ==, 0);
3998 tx = dmu_tx_create(os);
3999 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4000 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4003 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4004 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4007 umem_free(od, sizeof(ztest_od_t));
4011 * Testcase to test the upgrading of a microzap to fatzap.
4014 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4016 objset_t *os = zd->zd_os;
4018 uint64_t object, txg;
4021 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4022 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4024 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
4025 !ztest_random(2)) != 0)
4027 object = od->od_object;
4030 * Add entries to this ZAP and make sure it spills over
4031 * and gets upgraded to a fatzap. Also, since we are adding
4032 * 2050 entries we should see ptrtbl growth and leaf-block split.
4034 for (i = 0; i < 2050; i++) {
4035 char name[MAXNAMELEN];
4040 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4041 (u_longlong_t)id, (u_longlong_t)value);
4043 tx = dmu_tx_create(os);
4044 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4045 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4048 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4050 ASSERT(error == 0 || error == EEXIST);
4054 umem_free(od, sizeof(ztest_od_t));
4059 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4061 objset_t *os = zd->zd_os;
4063 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4065 int i, namelen, error;
4066 int micro = ztest_random(2);
4067 char name[20], string_value[20];
4070 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4071 ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4073 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4074 umem_free(od, sizeof(ztest_od_t));
4078 object = od->od_object;
4081 * Generate a random name of the form 'xxx.....' where each
4082 * x is a random printable character and the dots are dots.
4083 * There are 94 such characters, and the name length goes from
4084 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4086 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4088 for (i = 0; i < 3; i++)
4089 name[i] = '!' + ztest_random('~' - '!' + 1);
4090 for (; i < namelen - 1; i++)
4094 if ((namelen & 1) || micro) {
4095 wsize = sizeof (txg);
4101 data = string_value;
4105 VERIFY(zap_count(os, object, &count) == 0);
4106 ASSERT(count != -1ULL);
4109 * Select an operation: length, lookup, add, update, remove.
4111 i = ztest_random(5);
4114 tx = dmu_tx_create(os);
4115 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4116 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4119 bcopy(name, string_value, namelen);
4123 bzero(string_value, namelen);
4129 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4131 ASSERT3U(wsize, ==, zl_wsize);
4132 ASSERT3U(wc, ==, zl_wc);
4134 ASSERT3U(error, ==, ENOENT);
4139 error = zap_lookup(os, object, name, wsize, wc, data);
4141 if (data == string_value &&
4142 bcmp(name, data, namelen) != 0)
4143 fatal(0, "name '%s' != val '%s' len %d",
4144 name, data, namelen);
4146 ASSERT3U(error, ==, ENOENT);
4151 error = zap_add(os, object, name, wsize, wc, data, tx);
4152 ASSERT(error == 0 || error == EEXIST);
4156 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4160 error = zap_remove(os, object, name, tx);
4161 ASSERT(error == 0 || error == ENOENT);
4168 umem_free(od, sizeof(ztest_od_t));
4172 * Commit callback data.
4174 typedef struct ztest_cb_data {
4175 list_node_t zcd_node;
4177 int zcd_expected_err;
4178 boolean_t zcd_added;
4179 boolean_t zcd_called;
4183 /* This is the actual commit callback function */
4185 ztest_commit_callback(void *arg, int error)
4187 ztest_cb_data_t *data = arg;
4188 uint64_t synced_txg;
4190 VERIFY(data != NULL);
4191 VERIFY3S(data->zcd_expected_err, ==, error);
4192 VERIFY(!data->zcd_called);
4194 synced_txg = spa_last_synced_txg(data->zcd_spa);
4195 if (data->zcd_txg > synced_txg)
4196 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4197 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4200 data->zcd_called = B_TRUE;
4202 if (error == ECANCELED) {
4203 ASSERT3U(data->zcd_txg, ==, 0);
4204 ASSERT(!data->zcd_added);
4207 * The private callback data should be destroyed here, but
4208 * since we are going to check the zcd_called field after
4209 * dmu_tx_abort(), we will destroy it there.
4214 ASSERT(data->zcd_added);
4215 ASSERT3U(data->zcd_txg, !=, 0);
4217 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4219 /* See if this cb was called more quickly */
4220 if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
4221 zc_min_txg_delay = synced_txg - data->zcd_txg;
4223 /* Remove our callback from the list */
4224 list_remove(&zcl.zcl_callbacks, data);
4226 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4228 umem_free(data, sizeof (ztest_cb_data_t));
4231 /* Allocate and initialize callback data structure */
4232 static ztest_cb_data_t *
4233 ztest_create_cb_data(objset_t *os, uint64_t txg)
4235 ztest_cb_data_t *cb_data;
4237 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4239 cb_data->zcd_txg = txg;
4240 cb_data->zcd_spa = dmu_objset_spa(os);
4241 list_link_init(&cb_data->zcd_node);
4247 * Commit callback test.
4250 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4252 objset_t *os = zd->zd_os;
4255 ztest_cb_data_t *cb_data[3], *tmp_cb;
4256 uint64_t old_txg, txg;
4259 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4260 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4262 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4263 umem_free(od, sizeof(ztest_od_t));
4267 tx = dmu_tx_create(os);
4269 cb_data[0] = ztest_create_cb_data(os, 0);
4270 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4272 dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
4274 /* Every once in a while, abort the transaction on purpose */
4275 if (ztest_random(100) == 0)
4279 error = dmu_tx_assign(tx, TXG_NOWAIT);
4281 txg = error ? 0 : dmu_tx_get_txg(tx);
4283 cb_data[0]->zcd_txg = txg;
4284 cb_data[1] = ztest_create_cb_data(os, txg);
4285 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4289 * It's not a strict requirement to call the registered
4290 * callbacks from inside dmu_tx_abort(), but that's what
4291 * it's supposed to happen in the current implementation
4292 * so we will check for that.
4294 for (i = 0; i < 2; i++) {
4295 cb_data[i]->zcd_expected_err = ECANCELED;
4296 VERIFY(!cb_data[i]->zcd_called);
4301 for (i = 0; i < 2; i++) {
4302 VERIFY(cb_data[i]->zcd_called);
4303 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4306 umem_free(od, sizeof(ztest_od_t));
4310 cb_data[2] = ztest_create_cb_data(os, txg);
4311 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4314 * Read existing data to make sure there isn't a future leak.
4316 VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t),
4317 &old_txg, DMU_READ_PREFETCH));
4320 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4323 dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
4325 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4328 * Since commit callbacks don't have any ordering requirement and since
4329 * it is theoretically possible for a commit callback to be called
4330 * after an arbitrary amount of time has elapsed since its txg has been
4331 * synced, it is difficult to reliably determine whether a commit
4332 * callback hasn't been called due to high load or due to a flawed
4335 * In practice, we will assume that if after a certain number of txgs a
4336 * commit callback hasn't been called, then most likely there's an
4337 * implementation bug..
4339 tmp_cb = list_head(&zcl.zcl_callbacks);
4340 if (tmp_cb != NULL &&
4341 tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
4342 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4343 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4347 * Let's find the place to insert our callbacks.
4349 * Even though the list is ordered by txg, it is possible for the
4350 * insertion point to not be the end because our txg may already be
4351 * quiescing at this point and other callbacks in the open txg
4352 * (from other objsets) may have sneaked in.
4354 tmp_cb = list_tail(&zcl.zcl_callbacks);
4355 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4356 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4358 /* Add the 3 callbacks to the list */
4359 for (i = 0; i < 3; i++) {
4361 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4363 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4366 cb_data[i]->zcd_added = B_TRUE;
4367 VERIFY(!cb_data[i]->zcd_called);
4369 tmp_cb = cb_data[i];
4374 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4378 umem_free(od, sizeof(ztest_od_t));
4383 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4385 zfs_prop_t proplist[] = {
4387 ZFS_PROP_COMPRESSION,
4391 ztest_shared_t *zs = ztest_shared;
4394 (void) rw_enter(&zs->zs_name_lock, RW_READER);
4396 for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4397 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4398 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4400 (void) rw_exit(&zs->zs_name_lock);
4405 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4407 ztest_shared_t *zs = ztest_shared;
4408 nvlist_t *props = NULL;
4410 (void) rw_enter(&zs->zs_name_lock, RW_READER);
4412 (void) ztest_spa_prop_set_uint64(zs, ZPOOL_PROP_DEDUPDITTO,
4413 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4415 VERIFY3U(spa_prop_get(zs->zs_spa, &props), ==, 0);
4417 if (zopt_verbose >= 6)
4418 dump_nvlist(props, 4);
4422 (void) rw_exit(&zs->zs_name_lock);
4426 * Test snapshot hold/release and deferred destroy.
4429 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4432 objset_t *os = zd->zd_os;
4436 char clonename[100];
4438 char osname[MAXNAMELEN];
4440 (void) rw_enter(&ztest_shared->zs_name_lock, RW_READER);
4442 dmu_objset_name(os, osname);
4444 (void) snprintf(snapname, 100, "sh1_%llu", (u_longlong_t)id);
4445 (void) snprintf(fullname, 100, "%s@%s", osname, snapname);
4446 (void) snprintf(clonename, 100, "%s/ch1_%llu",osname,(u_longlong_t)id);
4447 (void) snprintf(tag, 100, "tag_%llu", (u_longlong_t)id);
4450 * Clean up from any previous run.
4452 (void) dmu_objset_destroy(clonename, B_FALSE);
4453 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4454 (void) dmu_objset_destroy(fullname, B_FALSE);
4457 * Create snapshot, clone it, mark snap for deferred destroy,
4458 * destroy clone, verify snap was also destroyed.
4460 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4463 if (error == ENOSPC) {
4464 ztest_record_enospc("dmu_objset_snapshot");
4467 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4470 error = dmu_objset_hold(fullname, FTAG, &origin);
4472 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4474 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
4475 dmu_objset_rele(origin, FTAG);
4477 if (error == ENOSPC) {
4478 ztest_record_enospc("dmu_objset_clone");
4481 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4484 error = dmu_objset_destroy(fullname, B_TRUE);
4486 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4490 error = dmu_objset_destroy(clonename, B_FALSE);
4492 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
4494 error = dmu_objset_hold(fullname, FTAG, &origin);
4495 if (error != ENOENT)
4496 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4499 * Create snapshot, add temporary hold, verify that we can't
4500 * destroy a held snapshot, mark for deferred destroy,
4501 * release hold, verify snapshot was destroyed.
4503 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4506 if (error == ENOSPC) {
4507 ztest_record_enospc("dmu_objset_snapshot");
4510 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4513 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
4516 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4518 error = dmu_objset_destroy(fullname, B_FALSE);
4519 if (error != EBUSY) {
4520 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4524 error = dmu_objset_destroy(fullname, B_TRUE);
4526 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4530 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4532 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
4534 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
4537 (void) rw_exit(&ztest_shared->zs_name_lock);
4541 * Inject random faults into the on-disk data.
4545 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4547 ztest_shared_t *zs = ztest_shared;
4548 spa_t *spa = zs->zs_spa;
4552 uint64_t bad = 0x1990c0ffeedecadeull;
4557 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4563 boolean_t islog = B_FALSE;
4565 path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4566 pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4568 mutex_enter(&zs->zs_vdev_lock);
4569 maxfaults = MAXFAULTS();
4570 leaves = MAX(zs->zs_mirrors, 1) * zopt_raidz;
4571 mirror_save = zs->zs_mirrors;
4572 mutex_exit(&zs->zs_vdev_lock);
4574 ASSERT(leaves >= 1);
4577 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4579 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4581 if (ztest_random(2) == 0) {
4583 * Inject errors on a normal data device or slog device.
4585 top = ztest_random_vdev_top(spa, B_TRUE);
4586 leaf = ztest_random(leaves) + zs->zs_splits;
4589 * Generate paths to the first leaf in this top-level vdev,
4590 * and to the random leaf we selected. We'll induce transient
4591 * write failures and random online/offline activity on leaf 0,
4592 * and we'll write random garbage to the randomly chosen leaf.
4594 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4595 zopt_dir, zopt_pool, top * leaves + zs->zs_splits);
4596 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4597 zopt_dir, zopt_pool, top * leaves + leaf);
4599 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4600 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4603 if (vd0 != NULL && maxfaults != 1) {
4605 * Make vd0 explicitly claim to be unreadable,
4606 * or unwriteable, or reach behind its back
4607 * and close the underlying fd. We can do this if
4608 * maxfaults == 0 because we'll fail and reexecute,
4609 * and we can do it if maxfaults >= 2 because we'll
4610 * have enough redundancy. If maxfaults == 1, the
4611 * combination of this with injection of random data
4612 * corruption below exceeds the pool's fault tolerance.
4614 vdev_file_t *vf = vd0->vdev_tsd;
4616 if (vf != NULL && ztest_random(3) == 0) {
4617 (void) close(vf->vf_vnode->v_fd);
4618 vf->vf_vnode->v_fd = -1;
4619 } else if (ztest_random(2) == 0) {
4620 vd0->vdev_cant_read = B_TRUE;
4622 vd0->vdev_cant_write = B_TRUE;
4624 guid0 = vd0->vdev_guid;
4628 * Inject errors on an l2cache device.
4630 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4632 if (sav->sav_count == 0) {
4633 spa_config_exit(spa, SCL_STATE, FTAG);
4636 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4637 guid0 = vd0->vdev_guid;
4638 (void) strcpy(path0, vd0->vdev_path);
4639 (void) strcpy(pathrand, vd0->vdev_path);
4643 maxfaults = INT_MAX; /* no limit on cache devices */
4646 spa_config_exit(spa, SCL_STATE, FTAG);
4649 * If we can tolerate two or more faults, or we're dealing
4650 * with a slog, randomly online/offline vd0.
4652 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4653 if (ztest_random(10) < 6) {
4654 int flags = (ztest_random(2) == 0 ?
4655 ZFS_OFFLINE_TEMPORARY : 0);
4658 * We have to grab the zs_name_lock as writer to
4659 * prevent a race between offlining a slog and
4660 * destroying a dataset. Offlining the slog will
4661 * grab a reference on the dataset which may cause
4662 * dmu_objset_destroy() to fail with EBUSY thus
4663 * leaving the dataset in an inconsistent state.
4666 (void) rw_enter(&ztest_shared->zs_name_lock,
4669 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4672 (void) rw_exit(&ztest_shared->zs_name_lock);
4674 (void) vdev_online(spa, guid0, 0, NULL);
4682 * We have at least single-fault tolerance, so inject data corruption.
4684 fd = open(pathrand, O_RDWR);
4686 if (fd == -1) /* we hit a gap in the device namespace */
4689 fsize = lseek(fd, 0, SEEK_END);
4691 while (--iters != 0) {
4692 offset = ztest_random(fsize / (leaves << bshift)) *
4693 (leaves << bshift) + (leaf << bshift) +
4694 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4696 if (offset >= fsize)
4699 mutex_enter(&zs->zs_vdev_lock);
4700 if (mirror_save != zs->zs_mirrors) {
4701 mutex_exit(&zs->zs_vdev_lock);
4706 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4707 fatal(1, "can't inject bad word at 0x%llx in %s",
4710 mutex_exit(&zs->zs_vdev_lock);
4712 if (zopt_verbose >= 7)
4713 (void) printf("injected bad word into %s,"
4714 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4719 umem_free(path0, MAXPATHLEN);
4720 umem_free(pathrand, MAXPATHLEN);
4724 * Verify that DDT repair works as expected.
4727 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4729 ztest_shared_t *zs = ztest_shared;
4730 spa_t *spa = zs->zs_spa;
4731 objset_t *os = zd->zd_os;
4733 uint64_t object, blocksize, txg, pattern, psize;
4734 enum zio_checksum checksum = spa_dedup_checksum(spa);
4739 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4742 blocksize = ztest_random_blocksize();
4743 blocksize = MIN(blocksize, 2048); /* because we write so many */
4745 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4746 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4748 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4749 umem_free(od, sizeof(ztest_od_t));
4754 * Take the name lock as writer to prevent anyone else from changing
4755 * the pool and dataset properies we need to maintain during this test.
4757 (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
4759 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4761 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4763 (void) rw_exit(&zs->zs_name_lock);
4764 umem_free(od, sizeof(ztest_od_t));
4768 object = od[0].od_object;
4769 blocksize = od[0].od_blocksize;
4770 pattern = spa_guid(spa) ^ dmu_objset_fsid_guid(os);
4772 ASSERT(object != 0);
4774 tx = dmu_tx_create(os);
4775 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4776 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4778 (void) rw_exit(&zs->zs_name_lock);
4779 umem_free(od, sizeof(ztest_od_t));
4784 * Write all the copies of our block.
4786 for (i = 0; i < copies; i++) {
4787 uint64_t offset = i * blocksize;
4788 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db,
4789 DMU_READ_NO_PREFETCH) == 0);
4790 ASSERT(db->db_offset == offset);
4791 ASSERT(db->db_size == blocksize);
4792 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4793 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4794 dmu_buf_will_fill(db, tx);
4795 ztest_pattern_set(db->db_data, db->db_size, pattern);
4796 dmu_buf_rele(db, FTAG);
4800 txg_wait_synced(spa_get_dsl(spa), txg);
4803 * Find out what block we got.
4805 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db,
4806 DMU_READ_NO_PREFETCH) == 0);
4807 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4808 dmu_buf_rele(db, FTAG);
4811 * Damage the block. Dedup-ditto will save us when we read it later.
4813 psize = BP_GET_PSIZE(&blk);
4814 buf = zio_buf_alloc(psize);
4815 ztest_pattern_set(buf, psize, ~pattern);
4817 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
4818 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
4819 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
4821 zio_buf_free(buf, psize);
4823 (void) rw_exit(&zs->zs_name_lock);
4824 umem_free(od, sizeof(ztest_od_t));
4832 ztest_scrub(ztest_ds_t *zd, uint64_t id)
4834 ztest_shared_t *zs = ztest_shared;
4835 spa_t *spa = zs->zs_spa;
4837 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4838 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
4839 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4843 * Rename the pool to a different name and then rename it back.
4847 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4849 ztest_shared_t *zs = ztest_shared;
4850 char *oldname, *newname;
4853 (void) rw_enter(&zs->zs_name_lock, RW_WRITER);
4855 oldname = zs->zs_pool;
4856 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4857 (void) strcpy(newname, oldname);
4858 (void) strcat(newname, "_tmp");
4863 VERIFY3U(0, ==, spa_rename(oldname, newname));
4866 * Try to open it under the old name, which shouldn't exist
4868 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4871 * Open it under the new name and make sure it's still the same spa_t.
4873 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
4875 ASSERT(spa == zs->zs_spa);
4876 spa_close(spa, FTAG);
4879 * Rename it back to the original
4881 VERIFY3U(0, ==, spa_rename(newname, oldname));
4884 * Make sure it can still be opened
4886 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4888 ASSERT(spa == zs->zs_spa);
4889 spa_close(spa, FTAG);
4891 umem_free(newname, strlen(newname) + 1);
4893 (void) rw_exit(&zs->zs_name_lock);
4897 * Verify pool integrity by running zdb.
4900 ztest_run_zdb(char *pool)
4903 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4911 (void) realpath(getexecname(), zdb);
4913 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
4914 bin = strstr(zdb, "/usr/bin/");
4915 ztest = strstr(bin, "/ztest");
4917 isalen = ztest - isa;
4921 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
4924 zopt_verbose >= 3 ? "s" : "",
4925 zopt_verbose >= 4 ? "v" : "",
4930 if (zopt_verbose >= 5)
4931 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
4933 fp = popen(zdb, "r");
4935 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
4936 if (zopt_verbose >= 3)
4937 (void) printf("%s", zbuf);
4939 status = pclose(fp);
4944 ztest_dump_core = 0;
4945 if (WIFEXITED(status))
4946 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
4948 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
4952 ztest_walk_pool_directory(char *header)
4956 if (zopt_verbose >= 6)
4957 (void) printf("%s\n", header);
4959 mutex_enter(&spa_namespace_lock);
4960 while ((spa = spa_next(spa)) != NULL)
4961 if (zopt_verbose >= 6)
4962 (void) printf("\t%s\n", spa_name(spa));
4963 mutex_exit(&spa_namespace_lock);
4967 ztest_spa_import_export(char *oldname, char *newname)
4969 nvlist_t *config, *newconfig;
4973 if (zopt_verbose >= 4) {
4974 (void) printf("import/export: old = %s, new = %s\n",
4979 * Clean up from previous runs.
4981 (void) spa_destroy(newname);
4984 * Get the pool's configuration and guid.
4986 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4989 * Kick off a scrub to tickle scrub/export races.
4991 if (ztest_random(2) == 0)
4992 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4994 pool_guid = spa_guid(spa);
4995 spa_close(spa, FTAG);
4997 ztest_walk_pool_directory("pools before export");
5002 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5004 ztest_walk_pool_directory("pools after export");
5009 newconfig = spa_tryimport(config);
5010 ASSERT(newconfig != NULL);
5011 nvlist_free(newconfig);
5014 * Import it under the new name.
5016 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5018 ztest_walk_pool_directory("pools after import");
5021 * Try to import it again -- should fail with EEXIST.
5023 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5026 * Try to import it under a different name -- should fail with EEXIST.
5028 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5031 * Verify that the pool is no longer visible under the old name.
5033 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5036 * Verify that we can open and close the pool using the new name.
5038 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5039 ASSERT(pool_guid == spa_guid(spa));
5040 spa_close(spa, FTAG);
5042 nvlist_free(config);
5046 ztest_resume(spa_t *spa)
5048 if (spa_suspended(spa) && zopt_verbose >= 6)
5049 (void) printf("resuming from suspended state\n");
5050 spa_vdev_state_enter(spa, SCL_NONE);
5051 vdev_clear(spa, NULL);
5052 (void) spa_vdev_state_exit(spa, NULL, 0);
5053 (void) zio_resume(spa);
5057 ztest_resume_thread(void *arg)
5061 while (!ztest_exiting) {
5062 if (spa_suspended(spa))
5064 (void) poll(NULL, 0, 100);
5075 ztest_deadman_alarm(int sig)
5077 fatal(0, "failed to complete within %d seconds of deadline", GRACE);
5081 ztest_execute(ztest_info_t *zi, uint64_t id)
5083 ztest_shared_t *zs = ztest_shared;
5084 ztest_ds_t *zd = &zs->zs_zd[id % zopt_datasets];
5085 hrtime_t functime = gethrtime();
5088 for (i = 0; i < zi->zi_iters; i++)
5089 zi->zi_func(zd, id);
5091 functime = gethrtime() - functime;
5093 atomic_add_64(&zi->zi_call_count, 1);
5094 atomic_add_64(&zi->zi_call_time, functime);
5096 if (zopt_verbose >= 4) {
5098 (void) dladdr((void *)zi->zi_func, &dli);
5099 (void) printf("%6.2f sec in %s\n",
5100 (double)functime / NANOSEC, dli.dli_sname);
5105 ztest_thread(void *arg)
5107 uint64_t id = (uintptr_t)arg;
5108 ztest_shared_t *zs = ztest_shared;
5113 while ((now = gethrtime()) < zs->zs_thread_stop) {
5115 * See if it's time to force a crash.
5117 if (now > zs->zs_thread_kill)
5121 * If we're getting ENOSPC with some regularity, stop.
5123 if (zs->zs_enospc_count > 10)
5127 * Pick a random function to execute.
5129 zi = &zs->zs_info[ztest_random(ZTEST_FUNCS)];
5130 call_next = zi->zi_call_next;
5132 if (now >= call_next &&
5133 atomic_cas_64(&zi->zi_call_next, call_next, call_next +
5134 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next)
5135 ztest_execute(zi, id);
5144 ztest_dataset_name(char *dsname, char *pool, int d)
5146 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5150 ztest_dataset_destroy(ztest_shared_t *zs, int d)
5152 char name[MAXNAMELEN];
5155 ztest_dataset_name(name, zs->zs_pool, d);
5157 if (zopt_verbose >= 3)
5158 (void) printf("Destroying %s to free up space\n", name);
5161 * Cleanup any non-standard clones and snapshots. In general,
5162 * ztest thread t operates on dataset (t % zopt_datasets),
5163 * so there may be more than one thing to clean up.
5165 for (t = d; t < zopt_threads; t += zopt_datasets)
5166 ztest_dsl_dataset_cleanup(name, t);
5168 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5169 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5173 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5175 uint64_t usedobjs, dirobjs, scratch;
5178 * ZTEST_DIROBJ is the object directory for the entire dataset.
5179 * Therefore, the number of objects in use should equal the
5180 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5181 * If not, we have an object leak.
5183 * Note that we can only check this in ztest_dataset_open(),
5184 * when the open-context and syncing-context values agree.
5185 * That's because zap_count() returns the open-context value,
5186 * while dmu_objset_space() returns the rootbp fill count.
5188 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5189 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5190 ASSERT3U(dirobjs + 1, ==, usedobjs);
5194 ztest_dataset_open(ztest_shared_t *zs, int d)
5196 ztest_ds_t *zd = &zs->zs_zd[d];
5197 uint64_t committed_seq = zd->zd_seq;
5200 char name[MAXNAMELEN];
5203 ztest_dataset_name(name, zs->zs_pool, d);
5205 (void) rw_enter(&zs->zs_name_lock, RW_READER);
5207 error = ztest_dataset_create(name);
5208 if (error == ENOSPC) {
5209 (void) rw_exit(&zs->zs_name_lock);
5210 ztest_record_enospc(FTAG);
5213 ASSERT(error == 0 || error == EEXIST);
5215 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5216 (void) rw_exit(&zs->zs_name_lock);
5218 ztest_zd_init(zd, os);
5220 zilog = zd->zd_zilog;
5222 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5223 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5224 fatal(0, "missing log records: claimed %llu < committed %llu",
5225 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5227 ztest_dataset_dirobj_verify(zd);
5229 zil_replay(os, zd, ztest_replay_vector);
5231 ztest_dataset_dirobj_verify(zd);
5233 if (zopt_verbose >= 6)
5234 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5236 (u_longlong_t)zilog->zl_parse_blk_count,
5237 (u_longlong_t)zilog->zl_parse_lr_count,
5238 (u_longlong_t)zilog->zl_replaying_seq);
5240 zilog = zil_open(os, ztest_get_data);
5242 if (zilog->zl_replaying_seq != 0 &&
5243 zilog->zl_replaying_seq < committed_seq)
5244 fatal(0, "missing log records: replayed %llu < committed %llu",
5245 zilog->zl_replaying_seq, committed_seq);
5251 ztest_dataset_close(ztest_shared_t *zs, int d)
5253 ztest_ds_t *zd = &zs->zs_zd[d];
5255 zil_close(zd->zd_zilog);
5256 dmu_objset_rele(zd->zd_os, zd);
5262 * Kick off threads to run tests on all datasets in parallel.
5265 ztest_run(ztest_shared_t *zs)
5269 kthread_t *resume_thread;
5274 ztest_exiting = B_FALSE;
5277 * Initialize parent/child shared state.
5279 mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5280 rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL);
5282 zs->zs_thread_start = gethrtime();
5283 zs->zs_thread_stop = zs->zs_thread_start + zopt_passtime * NANOSEC;
5284 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5285 zs->zs_thread_kill = zs->zs_thread_stop;
5286 if (ztest_random(100) < zopt_killrate)
5287 zs->zs_thread_kill -= ztest_random(zopt_passtime * NANOSEC);
5289 mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
5291 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5292 offsetof(ztest_cb_data_t, zcd_node));
5297 kernel_init(FREAD | FWRITE);
5298 VERIFY(spa_open(zs->zs_pool, &spa, FTAG) == 0);
5301 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5304 * We don't expect the pool to suspend unless maxfaults == 0,
5305 * in which case ztest_fault_inject() temporarily takes away
5306 * the only valid replica.
5308 if (MAXFAULTS() == 0)
5309 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5311 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5314 * Create a thread to periodically resume suspended I/O.
5316 VERIFY3P((resume_thread = thread_create(NULL, 0, ztest_resume_thread,
5317 spa, TS_RUN, NULL, 0, 0)), !=, NULL);
5320 * Set a deadman alarm to abort() if we hang.
5322 signal(SIGALRM, ztest_deadman_alarm);
5323 alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE);
5326 * Verify that we can safely inquire about about any object,
5327 * whether it's allocated or not. To make it interesting,
5328 * we probe a 5-wide window around each power of two.
5329 * This hits all edge cases, including zero and the max.
5331 for (t = 0; t < 64; t++) {
5332 for (d = -5; d <= 5; d++) {
5333 error = dmu_object_info(spa->spa_meta_objset,
5334 (1ULL << t) + d, NULL);
5335 ASSERT(error == 0 || error == ENOENT ||
5341 * If we got any ENOSPC errors on the previous run, destroy something.
5343 if (zs->zs_enospc_count != 0) {
5344 int d = ztest_random(zopt_datasets);
5345 ztest_dataset_destroy(zs, d);
5347 zs->zs_enospc_count = 0;
5349 tid = umem_zalloc(zopt_threads * sizeof (kt_did_t), UMEM_NOFAIL);
5351 if (zopt_verbose >= 4)
5352 (void) printf("starting main threads...\n");
5355 * Kick off all the tests that run in parallel.
5357 for (t = 0; t < zopt_threads; t++) {
5360 if (t < zopt_datasets && ztest_dataset_open(zs, t) != 0)
5363 VERIFY3P(thread = thread_create(NULL, 0, ztest_thread,
5364 (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0), !=, NULL);
5365 tid[t] = thread->t_tid;
5369 * Wait for all of the tests to complete. We go in reverse order
5370 * so we don't close datasets while threads are still using them.
5372 for (t = zopt_threads - 1; t >= 0; t--) {
5373 thread_join(tid[t]);
5374 if (t < zopt_datasets)
5375 ztest_dataset_close(zs, t);
5378 txg_wait_synced(spa_get_dsl(spa), 0);
5380 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5381 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5383 umem_free(tid, zopt_threads * sizeof (kt_did_t));
5385 /* Kill the resume thread */
5386 ztest_exiting = B_TRUE;
5387 thread_join(resume_thread->t_tid);
5391 * Right before closing the pool, kick off a bunch of async I/O;
5392 * spa_close() should wait for it to complete.
5394 for (object = 1; object < 50; object++)
5395 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5397 /* Verify that at least one commit cb was called in a timely fashion */
5398 if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
5399 VERIFY3U(zc_min_txg_delay, ==, 0);
5401 spa_close(spa, FTAG);
5404 * Verify that we can loop over all pools.
5406 mutex_enter(&spa_namespace_lock);
5407 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5408 if (zopt_verbose > 3)
5409 (void) printf("spa_next: found %s\n", spa_name(spa));
5410 mutex_exit(&spa_namespace_lock);
5413 * Verify that we can export the pool and reimport it under a
5416 if (ztest_random(2) == 0) {
5417 char name[MAXNAMELEN];
5418 (void) snprintf(name, MAXNAMELEN, "%s_import", zs->zs_pool);
5419 ztest_spa_import_export(zs->zs_pool, name);
5420 ztest_spa_import_export(name, zs->zs_pool);
5425 list_destroy(&zcl.zcl_callbacks);
5427 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5429 (void) rwlock_destroy(&zs->zs_name_lock);
5430 (void) _mutex_destroy(&zs->zs_vdev_lock);
5434 ztest_freeze(ztest_shared_t *zs)
5436 ztest_ds_t *zd = &zs->zs_zd[0];
5440 if (zopt_verbose >= 3)
5441 (void) printf("testing spa_freeze()...\n");
5443 kernel_init(FREAD | FWRITE);
5444 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG));
5445 VERIFY3U(0, ==, ztest_dataset_open(zs, 0));
5448 * Force the first log block to be transactionally allocated.
5449 * We have to do this before we freeze the pool -- otherwise
5450 * the log chain won't be anchored.
5452 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5453 ztest_dmu_object_alloc_free(zd, 0);
5454 zil_commit(zd->zd_zilog, 0);
5457 txg_wait_synced(spa_get_dsl(spa), 0);
5460 * Freeze the pool. This stops spa_sync() from doing anything,
5461 * so that the only way to record changes from now on is the ZIL.
5466 * Run tests that generate log records but don't alter the pool config
5467 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5468 * We do a txg_wait_synced() after each iteration to force the txg
5469 * to increase well beyond the last synced value in the uberblock.
5470 * The ZIL should be OK with that.
5472 while (ztest_random(10) != 0 && numloops++ < zopt_maxloops) {
5473 ztest_dmu_write_parallel(zd, 0);
5474 ztest_dmu_object_alloc_free(zd, 0);
5475 txg_wait_synced(spa_get_dsl(spa), 0);
5479 * Commit all of the changes we just generated.
5481 zil_commit(zd->zd_zilog, 0);
5482 txg_wait_synced(spa_get_dsl(spa), 0);
5485 * Close our dataset and close the pool.
5487 ztest_dataset_close(zs, 0);
5488 spa_close(spa, FTAG);
5492 * Open and close the pool and dataset to induce log replay.
5494 kernel_init(FREAD | FWRITE);
5495 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG));
5496 VERIFY3U(0, ==, ztest_dataset_open(zs, 0));
5497 ztest_dataset_close(zs, 0);
5498 spa_close(spa, FTAG);
5503 print_time(hrtime_t t, char *timebuf)
5505 hrtime_t s = t / NANOSEC;
5506 hrtime_t m = s / 60;
5507 hrtime_t h = m / 60;
5508 hrtime_t d = h / 24;
5517 (void) sprintf(timebuf,
5518 "%llud%02lluh%02llum%02llus", d, h, m, s);
5520 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5522 (void) sprintf(timebuf, "%llum%02llus", m, s);
5524 (void) sprintf(timebuf, "%llus", s);
5528 make_random_props(void)
5532 if (ztest_random(2) == 0)
5535 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5536 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5538 (void) printf("props:\n");
5539 dump_nvlist(props, 4);
5545 * Create a storage pool with the given name and initial vdev size.
5546 * Then test spa_freeze() functionality.
5549 ztest_init(ztest_shared_t *zs)
5552 nvlist_t *nvroot, *props;
5554 mutex_init(&zs->zs_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5555 rw_init(&zs->zs_name_lock, NULL, RW_DEFAULT, NULL);
5557 kernel_init(FREAD | FWRITE);
5560 * Create the storage pool.
5562 (void) spa_destroy(zs->zs_pool);
5563 ztest_shared->zs_vdev_next_leaf = 0;
5565 zs->zs_mirrors = zopt_mirrors;
5566 nvroot = make_vdev_root(NULL, NULL, zopt_vdev_size, 0,
5567 0, zopt_raidz, zs->zs_mirrors, 1);
5568 props = make_random_props();
5569 VERIFY3U(0, ==, spa_create(zs->zs_pool, nvroot, props, NULL, NULL));
5570 nvlist_free(nvroot);
5572 VERIFY3U(0, ==, spa_open(zs->zs_pool, &spa, FTAG));
5573 metaslab_sz = 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5574 spa_close(spa, FTAG);
5578 ztest_run_zdb(zs->zs_pool);
5582 ztest_run_zdb(zs->zs_pool);
5584 (void) rw_destroy(&zs->zs_name_lock);
5585 (void) mutex_destroy(&zs->zs_vdev_lock);
5589 main(int argc, char **argv)
5601 (void) setvbuf(stdout, NULL, _IOLBF, 0);
5603 ztest_random_fd = open("/dev/urandom", O_RDONLY);
5605 process_options(argc, argv);
5607 /* Override location of zpool.cache */
5608 VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache",
5612 * Blow away any existing copy of zpool.cache
5615 (void) remove(spa_config_path);
5617 shared_size = sizeof (*zs) + zopt_datasets * sizeof (ztest_ds_t);
5619 zs = ztest_shared = (void *)mmap(0,
5620 P2ROUNDUP(shared_size, getpagesize()),
5621 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
5623 if (zopt_verbose >= 1) {
5624 (void) printf("%llu vdevs, %d datasets, %d threads,"
5625 " %llu seconds...\n",
5626 (u_longlong_t)zopt_vdevs, zopt_datasets, zopt_threads,
5627 (u_longlong_t)zopt_time);
5631 * Create and initialize our storage pool.
5633 for (i = 1; i <= zopt_init; i++) {
5634 bzero(zs, sizeof (ztest_shared_t));
5635 if (zopt_verbose >= 3 && zopt_init != 1)
5636 (void) printf("ztest_init(), pass %d\n", i);
5637 zs->zs_pool = zopt_pool;
5641 zs->zs_pool = zopt_pool;
5642 zs->zs_proc_start = gethrtime();
5643 zs->zs_proc_stop = zs->zs_proc_start + zopt_time * NANOSEC;
5645 for (f = 0; f < ZTEST_FUNCS; f++) {
5646 zi = &zs->zs_info[f];
5647 *zi = ztest_info[f];
5648 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
5649 zi->zi_call_next = UINT64_MAX;
5651 zi->zi_call_next = zs->zs_proc_start +
5652 ztest_random(2 * zi->zi_interval[0] + 1);
5656 * Run the tests in a loop. These tests include fault injection
5657 * to verify that self-healing data works, and forced crashes
5658 * to verify that we never lose on-disk consistency.
5660 while (gethrtime() < zs->zs_proc_stop) {
5665 * Initialize the workload counters for each function.
5667 for (f = 0; f < ZTEST_FUNCS; f++) {
5668 zi = &zs->zs_info[f];
5669 zi->zi_call_count = 0;
5670 zi->zi_call_time = 0;
5673 /* Set the allocation switch size */
5674 metaslab_df_alloc_threshold = ztest_random(metaslab_sz / 4) + 1;
5679 fatal(1, "fork failed");
5681 if (pid == 0) { /* child */
5682 struct rlimit rl = { 1024, 1024 };
5683 (void) setrlimit(RLIMIT_NOFILE, &rl);
5684 (void) enable_extended_FILE_stdio(-1, -1);
5689 while (waitpid(pid, &status, 0) != pid)
5692 if (WIFEXITED(status)) {
5693 if (WEXITSTATUS(status) != 0) {
5694 (void) fprintf(stderr,
5695 "child exited with code %d\n",
5696 WEXITSTATUS(status));
5699 } else if (WIFSIGNALED(status)) {
5700 if (WTERMSIG(status) != SIGKILL) {
5701 (void) fprintf(stderr,
5702 "child died with signal %d\n",
5708 (void) fprintf(stderr, "something strange happened "
5715 if (zopt_verbose >= 1) {
5716 hrtime_t now = gethrtime();
5718 now = MIN(now, zs->zs_proc_stop);
5719 print_time(zs->zs_proc_stop - now, timebuf);
5720 nicenum(zs->zs_space, numbuf);
5722 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
5723 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
5725 WIFEXITED(status) ? "Complete" : "SIGKILL",
5726 (u_longlong_t)zs->zs_enospc_count,
5727 100.0 * zs->zs_alloc / zs->zs_space,
5729 100.0 * (now - zs->zs_proc_start) /
5730 (zopt_time * NANOSEC), timebuf);
5733 if (zopt_verbose >= 2) {
5734 (void) printf("\nWorkload summary:\n\n");
5735 (void) printf("%7s %9s %s\n",
5736 "Calls", "Time", "Function");
5737 (void) printf("%7s %9s %s\n",
5738 "-----", "----", "--------");
5739 for (f = 0; f < ZTEST_FUNCS; f++) {
5742 zi = &zs->zs_info[f];
5743 print_time(zi->zi_call_time, timebuf);
5744 (void) dladdr((void *)zi->zi_func, &dli);
5745 (void) printf("%7llu %9s %s\n",
5746 (u_longlong_t)zi->zi_call_count, timebuf,
5749 (void) printf("\n");
5753 * It's possible that we killed a child during a rename test,
5754 * in which case we'll have a 'ztest_tmp' pool lying around
5755 * instead of 'ztest'. Do a blind rename in case this happened.
5758 if (spa_open(zopt_pool, &spa, FTAG) == 0) {
5759 spa_close(spa, FTAG);
5761 char tmpname[MAXNAMELEN];
5763 kernel_init(FREAD | FWRITE);
5764 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
5766 (void) spa_rename(tmpname, zopt_pool);
5770 ztest_run_zdb(zopt_pool);
5773 if (zopt_verbose >= 1) {
5774 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
5775 kills, iters - kills, (100.0 * kills) / MAX(1, iters));