4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
26 * Copyright (c) 2013 Steven Hartland. All rights reserved.
30 * The objective of this program is to provide a DMU/ZAP/SPA stress test
31 * that runs entirely in userland, is easy to use, and easy to extend.
33 * The overall design of the ztest program is as follows:
35 * (1) For each major functional area (e.g. adding vdevs to a pool,
36 * creating and destroying datasets, reading and writing objects, etc)
37 * we have a simple routine to test that functionality. These
38 * individual routines do not have to do anything "stressful".
40 * (2) We turn these simple functionality tests into a stress test by
41 * running them all in parallel, with as many threads as desired,
42 * and spread across as many datasets, objects, and vdevs as desired.
44 * (3) While all this is happening, we inject faults into the pool to
45 * verify that self-healing data really works.
47 * (4) Every time we open a dataset, we change its checksum and compression
48 * functions. Thus even individual objects vary from block to block
49 * in which checksum they use and whether they're compressed.
51 * (5) To verify that we never lose on-disk consistency after a crash,
52 * we run the entire test in a child of the main process.
53 * At random times, the child self-immolates with a SIGKILL.
54 * This is the software equivalent of pulling the power cord.
55 * The parent then runs the test again, using the existing
56 * storage pool, as many times as desired. If backwards compatability
57 * testing is enabled ztest will sometimes run the "older" version
58 * of ztest after a SIGKILL.
60 * (6) To verify that we don't have future leaks or temporal incursions,
61 * many of the functional tests record the transaction group number
62 * as part of their data. When reading old data, they verify that
63 * the transaction group number is less than the current, open txg.
64 * If you add a new test, please do this if applicable.
66 * When run with no arguments, ztest runs for about five minutes and
67 * produces no output if successful. To get a little bit of information,
68 * specify -V. To get more information, specify -VV, and so on.
70 * To turn this into an overnight stress test, use -T to specify run time.
72 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
73 * to increase the pool capacity, fanout, and overall stress level.
75 * Use the -k option to set the desired frequency of kills.
77 * When ztest invokes itself it passes all relevant information through a
78 * temporary file which is mmap-ed in the child process. This allows shared
79 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
80 * stored at offset 0 of this file and contains information on the size and
81 * number of shared structures in the file. The information stored in this file
82 * must remain backwards compatible with older versions of ztest so that
83 * ztest can invoke them during backwards compatibility testing (-B).
86 #include <sys/zfs_context.h>
92 #include <sys/dmu_objset.h>
98 #include <sys/resource.h>
101 #include <sys/zil_impl.h>
102 #include <sys/vdev_impl.h>
103 #include <sys/vdev_file.h>
104 #include <sys/spa_impl.h>
105 #include <sys/metaslab_impl.h>
106 #include <sys/dsl_prop.h>
107 #include <sys/dsl_dataset.h>
108 #include <sys/dsl_destroy.h>
109 #include <sys/dsl_scan.h>
110 #include <sys/zio_checksum.h>
111 #include <sys/refcount.h>
112 #include <sys/zfeature.h>
113 #include <sys/dsl_userhold.h>
115 #include <stdio_ext.h>
124 #include <sys/fs/zfs.h>
125 #include <libnvpair.h>
127 static int ztest_fd_data = -1;
128 static int ztest_fd_rand = -1;
130 typedef struct ztest_shared_hdr {
131 uint64_t zh_hdr_size;
132 uint64_t zh_opts_size;
134 uint64_t zh_stats_size;
135 uint64_t zh_stats_count;
137 uint64_t zh_ds_count;
138 } ztest_shared_hdr_t;
140 static ztest_shared_hdr_t *ztest_shared_hdr;
142 typedef struct ztest_shared_opts {
143 char zo_pool[MAXNAMELEN];
144 char zo_dir[MAXNAMELEN];
145 char zo_alt_ztest[MAXNAMELEN];
146 char zo_alt_libpath[MAXNAMELEN];
148 uint64_t zo_vdevtime;
156 uint64_t zo_passtime;
157 uint64_t zo_killrate;
161 uint64_t zo_maxloops;
162 uint64_t zo_metaslab_gang_bang;
163 } ztest_shared_opts_t;
165 static const ztest_shared_opts_t ztest_opts_defaults = {
166 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
167 .zo_dir = { '/', 't', 'm', 'p', '\0' },
168 .zo_alt_ztest = { '\0' },
169 .zo_alt_libpath = { '\0' },
171 .zo_ashift = SPA_MINBLOCKSHIFT,
174 .zo_raidz_parity = 1,
175 .zo_vdev_size = SPA_MINDEVSIZE,
178 .zo_passtime = 60, /* 60 seconds */
179 .zo_killrate = 70, /* 70% kill rate */
182 .zo_time = 300, /* 5 minutes */
183 .zo_maxloops = 50, /* max loops during spa_freeze() */
184 .zo_metaslab_gang_bang = 32 << 10
187 extern uint64_t metaslab_gang_bang;
188 extern uint64_t metaslab_df_alloc_threshold;
190 static ztest_shared_opts_t *ztest_shared_opts;
191 static ztest_shared_opts_t ztest_opts;
193 typedef struct ztest_shared_ds {
197 static ztest_shared_ds_t *ztest_shared_ds;
198 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
200 #define BT_MAGIC 0x123456789abcdefULL
201 #define MAXFAULTS() \
202 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
206 ZTEST_IO_WRITE_PATTERN,
207 ZTEST_IO_WRITE_ZEROES,
214 typedef struct ztest_block_tag {
224 typedef struct bufwad {
231 * XXX -- fix zfs range locks to be generic so we can use them here.
253 #define ZTEST_RANGE_LOCKS 64
254 #define ZTEST_OBJECT_LOCKS 64
257 * Object descriptor. Used as a template for object lookup/create/remove.
259 typedef struct ztest_od {
262 dmu_object_type_t od_type;
263 dmu_object_type_t od_crtype;
264 uint64_t od_blocksize;
265 uint64_t od_crblocksize;
268 char od_name[MAXNAMELEN];
274 typedef struct ztest_ds {
275 ztest_shared_ds_t *zd_shared;
277 rwlock_t zd_zilog_lock;
279 ztest_od_t *zd_od; /* debugging aid */
280 char zd_name[MAXNAMELEN];
281 mutex_t zd_dirobj_lock;
282 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
283 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
287 * Per-iteration state.
289 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
291 typedef struct ztest_info {
292 ztest_func_t *zi_func; /* test function */
293 uint64_t zi_iters; /* iterations per execution */
294 uint64_t *zi_interval; /* execute every <interval> seconds */
297 typedef struct ztest_shared_callstate {
298 uint64_t zc_count; /* per-pass count */
299 uint64_t zc_time; /* per-pass time */
300 uint64_t zc_next; /* next time to call this function */
301 } ztest_shared_callstate_t;
303 static ztest_shared_callstate_t *ztest_shared_callstate;
304 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
307 * Note: these aren't static because we want dladdr() to work.
309 ztest_func_t ztest_dmu_read_write;
310 ztest_func_t ztest_dmu_write_parallel;
311 ztest_func_t ztest_dmu_object_alloc_free;
312 ztest_func_t ztest_dmu_commit_callbacks;
313 ztest_func_t ztest_zap;
314 ztest_func_t ztest_zap_parallel;
315 ztest_func_t ztest_zil_commit;
316 ztest_func_t ztest_zil_remount;
317 ztest_func_t ztest_dmu_read_write_zcopy;
318 ztest_func_t ztest_dmu_objset_create_destroy;
319 ztest_func_t ztest_dmu_prealloc;
320 ztest_func_t ztest_fzap;
321 ztest_func_t ztest_dmu_snapshot_create_destroy;
322 ztest_func_t ztest_dsl_prop_get_set;
323 ztest_func_t ztest_spa_prop_get_set;
324 ztest_func_t ztest_spa_create_destroy;
325 ztest_func_t ztest_fault_inject;
326 ztest_func_t ztest_ddt_repair;
327 ztest_func_t ztest_dmu_snapshot_hold;
328 ztest_func_t ztest_spa_rename;
329 ztest_func_t ztest_scrub;
330 ztest_func_t ztest_dsl_dataset_promote_busy;
331 ztest_func_t ztest_vdev_attach_detach;
332 ztest_func_t ztest_vdev_LUN_growth;
333 ztest_func_t ztest_vdev_add_remove;
334 ztest_func_t ztest_vdev_aux_add_remove;
335 ztest_func_t ztest_split_pool;
336 ztest_func_t ztest_reguid;
337 ztest_func_t ztest_spa_upgrade;
339 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
340 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
341 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
342 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
343 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
345 ztest_info_t ztest_info[] = {
346 { ztest_dmu_read_write, 1, &zopt_always },
347 { ztest_dmu_write_parallel, 10, &zopt_always },
348 { ztest_dmu_object_alloc_free, 1, &zopt_always },
349 { ztest_dmu_commit_callbacks, 1, &zopt_always },
350 { ztest_zap, 30, &zopt_always },
351 { ztest_zap_parallel, 100, &zopt_always },
352 { ztest_split_pool, 1, &zopt_always },
353 { ztest_zil_commit, 1, &zopt_incessant },
354 { ztest_zil_remount, 1, &zopt_sometimes },
355 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
356 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
357 { ztest_dsl_prop_get_set, 1, &zopt_often },
358 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
360 { ztest_dmu_prealloc, 1, &zopt_sometimes },
362 { ztest_fzap, 1, &zopt_sometimes },
363 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
364 { ztest_spa_create_destroy, 1, &zopt_sometimes },
365 { ztest_fault_inject, 1, &zopt_sometimes },
366 { ztest_ddt_repair, 1, &zopt_sometimes },
367 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
368 { ztest_reguid, 1, &zopt_sometimes },
369 { ztest_spa_rename, 1, &zopt_rarely },
370 { ztest_scrub, 1, &zopt_rarely },
371 { ztest_spa_upgrade, 1, &zopt_rarely },
372 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
373 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
374 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
375 { ztest_vdev_add_remove, 1,
376 &ztest_opts.zo_vdevtime },
377 { ztest_vdev_aux_add_remove, 1,
378 &ztest_opts.zo_vdevtime },
381 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
384 * The following struct is used to hold a list of uncalled commit callbacks.
385 * The callbacks are ordered by txg number.
387 typedef struct ztest_cb_list {
388 mutex_t zcl_callbacks_lock;
389 list_t zcl_callbacks;
393 * Stuff we need to share writably between parent and child.
395 typedef struct ztest_shared {
396 boolean_t zs_do_init;
397 hrtime_t zs_proc_start;
398 hrtime_t zs_proc_stop;
399 hrtime_t zs_thread_start;
400 hrtime_t zs_thread_stop;
401 hrtime_t zs_thread_kill;
402 uint64_t zs_enospc_count;
403 uint64_t zs_vdev_next_leaf;
404 uint64_t zs_vdev_aux;
409 uint64_t zs_metaslab_sz;
410 uint64_t zs_metaslab_df_alloc_threshold;
414 #define ID_PARALLEL -1ULL
416 static char ztest_dev_template[] = "%s/%s.%llua";
417 static char ztest_aux_template[] = "%s/%s.%s.%llu";
418 ztest_shared_t *ztest_shared;
420 static spa_t *ztest_spa = NULL;
421 static ztest_ds_t *ztest_ds;
423 static mutex_t ztest_vdev_lock;
426 * The ztest_name_lock protects the pool and dataset namespace used by
427 * the individual tests. To modify the namespace, consumers must grab
428 * this lock as writer. Grabbing the lock as reader will ensure that the
429 * namespace does not change while the lock is held.
431 static rwlock_t ztest_name_lock;
433 static boolean_t ztest_dump_core = B_TRUE;
434 static boolean_t ztest_exiting;
436 /* Global commit callback list */
437 static ztest_cb_list_t zcl;
440 ZTEST_META_DNODE = 0,
445 static void usage(boolean_t) __NORETURN;
448 * These libumem hooks provide a reasonable set of defaults for the allocator's
449 * debugging facilities.
454 return ("default,verbose"); /* $UMEM_DEBUG setting */
458 _umem_logging_init(void)
460 return ("fail,contents"); /* $UMEM_LOGGING setting */
463 #define FATAL_MSG_SZ 1024
468 fatal(int do_perror, char *message, ...)
471 int save_errno = errno;
472 char buf[FATAL_MSG_SZ];
474 (void) fflush(stdout);
476 va_start(args, message);
477 (void) sprintf(buf, "ztest: ");
479 (void) vsprintf(buf + strlen(buf), message, args);
482 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
483 ": %s", strerror(save_errno));
485 (void) fprintf(stderr, "%s\n", buf);
486 fatal_msg = buf; /* to ease debugging */
493 str2shift(const char *buf)
495 const char *ends = "BKMGTPEZ";
500 for (i = 0; i < strlen(ends); i++) {
501 if (toupper(buf[0]) == ends[i])
504 if (i == strlen(ends)) {
505 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
509 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
512 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
518 nicenumtoull(const char *buf)
523 val = strtoull(buf, &end, 0);
525 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
527 } else if (end[0] == '.') {
528 double fval = strtod(buf, &end);
529 fval *= pow(2, str2shift(end));
530 if (fval > UINT64_MAX) {
531 (void) fprintf(stderr, "ztest: value too large: %s\n",
535 val = (uint64_t)fval;
537 int shift = str2shift(end);
538 if (shift >= 64 || (val << shift) >> shift != val) {
539 (void) fprintf(stderr, "ztest: value too large: %s\n",
549 usage(boolean_t requested)
551 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
553 char nice_vdev_size[10];
554 char nice_gang_bang[10];
555 FILE *fp = requested ? stdout : stderr;
557 nicenum(zo->zo_vdev_size, nice_vdev_size);
558 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
560 (void) fprintf(fp, "Usage: %s\n"
561 "\t[-v vdevs (default: %llu)]\n"
562 "\t[-s size_of_each_vdev (default: %s)]\n"
563 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
564 "\t[-m mirror_copies (default: %d)]\n"
565 "\t[-r raidz_disks (default: %d)]\n"
566 "\t[-R raidz_parity (default: %d)]\n"
567 "\t[-d datasets (default: %d)]\n"
568 "\t[-t threads (default: %d)]\n"
569 "\t[-g gang_block_threshold (default: %s)]\n"
570 "\t[-i init_count (default: %d)] initialize pool i times\n"
571 "\t[-k kill_percentage (default: %llu%%)]\n"
572 "\t[-p pool_name (default: %s)]\n"
573 "\t[-f dir (default: %s)] file directory for vdev files\n"
574 "\t[-V] verbose (use multiple times for ever more blather)\n"
575 "\t[-E] use existing pool instead of creating new one\n"
576 "\t[-T time (default: %llu sec)] total run time\n"
577 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
578 "\t[-P passtime (default: %llu sec)] time per pass\n"
579 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
580 "\t[-h] (print help)\n"
583 (u_longlong_t)zo->zo_vdevs, /* -v */
584 nice_vdev_size, /* -s */
585 zo->zo_ashift, /* -a */
586 zo->zo_mirrors, /* -m */
587 zo->zo_raidz, /* -r */
588 zo->zo_raidz_parity, /* -R */
589 zo->zo_datasets, /* -d */
590 zo->zo_threads, /* -t */
591 nice_gang_bang, /* -g */
592 zo->zo_init, /* -i */
593 (u_longlong_t)zo->zo_killrate, /* -k */
594 zo->zo_pool, /* -p */
596 (u_longlong_t)zo->zo_time, /* -T */
597 (u_longlong_t)zo->zo_maxloops, /* -F */
598 (u_longlong_t)zo->zo_passtime);
599 exit(requested ? 0 : 1);
603 process_options(int argc, char **argv)
606 ztest_shared_opts_t *zo = &ztest_opts;
610 char altdir[MAXNAMELEN] = { 0 };
612 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
614 while ((opt = getopt(argc, argv,
615 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
632 value = nicenumtoull(optarg);
636 zo->zo_vdevs = value;
639 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
642 zo->zo_ashift = value;
645 zo->zo_mirrors = value;
648 zo->zo_raidz = MAX(1, value);
651 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
654 zo->zo_datasets = MAX(1, value);
657 zo->zo_threads = MAX(1, value);
660 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
667 zo->zo_killrate = value;
670 (void) strlcpy(zo->zo_pool, optarg,
671 sizeof (zo->zo_pool));
674 path = realpath(optarg, NULL);
676 (void) fprintf(stderr, "error: %s: %s\n",
677 optarg, strerror(errno));
680 (void) strlcpy(zo->zo_dir, path,
681 sizeof (zo->zo_dir));
694 zo->zo_passtime = MAX(1, value);
697 zo->zo_maxloops = MAX(1, value);
700 (void) strlcpy(altdir, optarg, sizeof (altdir));
712 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
715 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
718 if (strlen(altdir) > 0) {
726 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
727 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
729 VERIFY(NULL != realpath(getexecname(), cmd));
730 if (0 != access(altdir, F_OK)) {
731 ztest_dump_core = B_FALSE;
732 fatal(B_TRUE, "invalid alternate ztest path: %s",
735 VERIFY(NULL != realpath(altdir, realaltdir));
738 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
739 * We want to extract <isa> to determine if we should use
740 * 32 or 64 bit binaries.
742 bin = strstr(cmd, "/usr/bin/");
743 ztest = strstr(bin, "/ztest");
745 isalen = ztest - isa;
746 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
747 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
748 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
749 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
751 if (0 != access(zo->zo_alt_ztest, X_OK)) {
752 ztest_dump_core = B_FALSE;
753 fatal(B_TRUE, "invalid alternate ztest: %s",
755 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
756 ztest_dump_core = B_FALSE;
757 fatal(B_TRUE, "invalid alternate lib directory %s",
761 umem_free(cmd, MAXPATHLEN);
762 umem_free(realaltdir, MAXPATHLEN);
767 ztest_kill(ztest_shared_t *zs)
769 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
770 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
771 (void) kill(getpid(), SIGKILL);
775 ztest_random(uint64_t range)
779 ASSERT3S(ztest_fd_rand, >=, 0);
784 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
785 fatal(1, "short read from /dev/urandom");
792 ztest_record_enospc(const char *s)
794 ztest_shared->zs_enospc_count++;
798 ztest_get_ashift(void)
800 if (ztest_opts.zo_ashift == 0)
801 return (SPA_MINBLOCKSHIFT + ztest_random(3));
802 return (ztest_opts.zo_ashift);
806 make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
808 char pathbuf[MAXPATHLEN];
813 ashift = ztest_get_ashift();
819 vdev = ztest_shared->zs_vdev_aux;
820 (void) snprintf(path, sizeof (pathbuf),
821 ztest_aux_template, ztest_opts.zo_dir,
822 pool == NULL ? ztest_opts.zo_pool : pool,
825 vdev = ztest_shared->zs_vdev_next_leaf++;
826 (void) snprintf(path, sizeof (pathbuf),
827 ztest_dev_template, ztest_opts.zo_dir,
828 pool == NULL ? ztest_opts.zo_pool : pool, vdev);
833 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
835 fatal(1, "can't open %s", path);
836 if (ftruncate(fd, size) != 0)
837 fatal(1, "can't ftruncate %s", path);
841 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
842 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
843 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
844 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
850 make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
851 uint64_t ashift, int r)
853 nvlist_t *raidz, **child;
857 return (make_vdev_file(path, aux, pool, size, ashift));
858 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
860 for (c = 0; c < r; c++)
861 child[c] = make_vdev_file(path, aux, pool, size, ashift);
863 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
864 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
865 VDEV_TYPE_RAIDZ) == 0);
866 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
867 ztest_opts.zo_raidz_parity) == 0);
868 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
871 for (c = 0; c < r; c++)
872 nvlist_free(child[c]);
874 umem_free(child, r * sizeof (nvlist_t *));
880 make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
881 uint64_t ashift, int r, int m)
883 nvlist_t *mirror, **child;
887 return (make_vdev_raidz(path, aux, pool, size, ashift, r));
889 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
891 for (c = 0; c < m; c++)
892 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
894 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
895 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
896 VDEV_TYPE_MIRROR) == 0);
897 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
900 for (c = 0; c < m; c++)
901 nvlist_free(child[c]);
903 umem_free(child, m * sizeof (nvlist_t *));
909 make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
910 int log, int r, int m, int t)
912 nvlist_t *root, **child;
917 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
919 for (c = 0; c < t; c++) {
920 child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
922 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
926 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
927 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
928 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
931 for (c = 0; c < t; c++)
932 nvlist_free(child[c]);
934 umem_free(child, t * sizeof (nvlist_t *));
940 * Find a random spa version. Returns back a random spa version in the
941 * range [initial_version, SPA_VERSION_FEATURES].
944 ztest_random_spa_version(uint64_t initial_version)
946 uint64_t version = initial_version;
948 if (version <= SPA_VERSION_BEFORE_FEATURES) {
950 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
953 if (version > SPA_VERSION_BEFORE_FEATURES)
954 version = SPA_VERSION_FEATURES;
956 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
961 ztest_random_blocksize(void)
963 return (1 << (SPA_MINBLOCKSHIFT +
964 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
968 ztest_random_ibshift(void)
970 return (DN_MIN_INDBLKSHIFT +
971 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
975 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
978 vdev_t *rvd = spa->spa_root_vdev;
981 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
984 top = ztest_random(rvd->vdev_children);
985 tvd = rvd->vdev_child[top];
986 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
987 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
993 ztest_random_dsl_prop(zfs_prop_t prop)
998 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
999 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
1005 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
1008 const char *propname = zfs_prop_to_name(prop);
1009 const char *valname;
1010 char setpoint[MAXPATHLEN];
1014 error = dsl_prop_set_int(osname, propname,
1015 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
1017 if (error == ENOSPC) {
1018 ztest_record_enospc(FTAG);
1023 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
1025 if (ztest_opts.zo_verbose >= 6) {
1026 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
1027 (void) printf("%s %s = %s at '%s'\n",
1028 osname, propname, valname, setpoint);
1035 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
1037 spa_t *spa = ztest_spa;
1038 nvlist_t *props = NULL;
1041 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
1042 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1044 error = spa_prop_set(spa, props);
1048 if (error == ENOSPC) {
1049 ztest_record_enospc(FTAG);
1058 ztest_rll_init(rll_t *rll)
1060 rll->rll_writer = NULL;
1061 rll->rll_readers = 0;
1062 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1063 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1067 ztest_rll_destroy(rll_t *rll)
1069 ASSERT(rll->rll_writer == NULL);
1070 ASSERT(rll->rll_readers == 0);
1071 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1072 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1076 ztest_rll_lock(rll_t *rll, rl_type_t type)
1078 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1080 if (type == RL_READER) {
1081 while (rll->rll_writer != NULL)
1082 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1085 while (rll->rll_writer != NULL || rll->rll_readers)
1086 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1087 rll->rll_writer = curthread;
1090 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1094 ztest_rll_unlock(rll_t *rll)
1096 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1098 if (rll->rll_writer) {
1099 ASSERT(rll->rll_readers == 0);
1100 rll->rll_writer = NULL;
1102 ASSERT(rll->rll_readers != 0);
1103 ASSERT(rll->rll_writer == NULL);
1107 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1108 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1110 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1114 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1116 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1118 ztest_rll_lock(rll, type);
1122 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1124 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1126 ztest_rll_unlock(rll);
1130 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1131 uint64_t size, rl_type_t type)
1133 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1134 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1137 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1138 rl->rl_object = object;
1139 rl->rl_offset = offset;
1143 ztest_rll_lock(rll, type);
1149 ztest_range_unlock(rl_t *rl)
1151 rll_t *rll = rl->rl_lock;
1153 ztest_rll_unlock(rll);
1155 umem_free(rl, sizeof (*rl));
1159 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1162 zd->zd_zilog = dmu_objset_zil(os);
1163 zd->zd_shared = szd;
1164 dmu_objset_name(os, zd->zd_name);
1166 if (zd->zd_shared != NULL)
1167 zd->zd_shared->zd_seq = 0;
1169 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1170 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1172 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1173 ztest_rll_init(&zd->zd_object_lock[l]);
1175 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1176 ztest_rll_init(&zd->zd_range_lock[l]);
1180 ztest_zd_fini(ztest_ds_t *zd)
1182 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1184 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1185 ztest_rll_destroy(&zd->zd_object_lock[l]);
1187 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1188 ztest_rll_destroy(&zd->zd_range_lock[l]);
1191 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1194 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1200 * Attempt to assign tx to some transaction group.
1202 error = dmu_tx_assign(tx, txg_how);
1204 if (error == ERESTART) {
1205 ASSERT(txg_how == TXG_NOWAIT);
1208 ASSERT3U(error, ==, ENOSPC);
1209 ztest_record_enospc(tag);
1214 txg = dmu_tx_get_txg(tx);
1220 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1223 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1230 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1233 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1237 diff |= (value - *ip++);
1243 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1244 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1246 bt->bt_magic = BT_MAGIC;
1247 bt->bt_objset = dmu_objset_id(os);
1248 bt->bt_object = object;
1249 bt->bt_offset = offset;
1252 bt->bt_crtxg = crtxg;
1256 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1257 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1259 ASSERT(bt->bt_magic == BT_MAGIC);
1260 ASSERT(bt->bt_objset == dmu_objset_id(os));
1261 ASSERT(bt->bt_object == object);
1262 ASSERT(bt->bt_offset == offset);
1263 ASSERT(bt->bt_gen <= gen);
1264 ASSERT(bt->bt_txg <= txg);
1265 ASSERT(bt->bt_crtxg == crtxg);
1268 static ztest_block_tag_t *
1269 ztest_bt_bonus(dmu_buf_t *db)
1271 dmu_object_info_t doi;
1272 ztest_block_tag_t *bt;
1274 dmu_object_info_from_db(db, &doi);
1275 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1276 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1277 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1286 #define lrz_type lr_mode
1287 #define lrz_blocksize lr_uid
1288 #define lrz_ibshift lr_gid
1289 #define lrz_bonustype lr_rdev
1290 #define lrz_bonuslen lr_crtime[1]
1293 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1295 char *name = (void *)(lr + 1); /* name follows lr */
1296 size_t namesize = strlen(name) + 1;
1299 if (zil_replaying(zd->zd_zilog, tx))
1302 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1303 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1304 sizeof (*lr) + namesize - sizeof (lr_t));
1306 zil_itx_assign(zd->zd_zilog, itx, tx);
1310 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1312 char *name = (void *)(lr + 1); /* name follows lr */
1313 size_t namesize = strlen(name) + 1;
1316 if (zil_replaying(zd->zd_zilog, tx))
1319 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1320 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1321 sizeof (*lr) + namesize - sizeof (lr_t));
1323 itx->itx_oid = object;
1324 zil_itx_assign(zd->zd_zilog, itx, tx);
1328 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1331 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1333 if (zil_replaying(zd->zd_zilog, tx))
1336 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1337 write_state = WR_INDIRECT;
1339 itx = zil_itx_create(TX_WRITE,
1340 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1342 if (write_state == WR_COPIED &&
1343 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1344 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1345 zil_itx_destroy(itx);
1346 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1347 write_state = WR_NEED_COPY;
1349 itx->itx_private = zd;
1350 itx->itx_wr_state = write_state;
1351 itx->itx_sync = (ztest_random(8) == 0);
1352 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1354 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1355 sizeof (*lr) - sizeof (lr_t));
1357 zil_itx_assign(zd->zd_zilog, itx, tx);
1361 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1365 if (zil_replaying(zd->zd_zilog, tx))
1368 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1369 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1370 sizeof (*lr) - sizeof (lr_t));
1372 itx->itx_sync = B_FALSE;
1373 zil_itx_assign(zd->zd_zilog, itx, tx);
1377 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1381 if (zil_replaying(zd->zd_zilog, tx))
1384 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1385 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1386 sizeof (*lr) - sizeof (lr_t));
1388 itx->itx_sync = B_FALSE;
1389 zil_itx_assign(zd->zd_zilog, itx, tx);
1396 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1398 char *name = (void *)(lr + 1); /* name follows lr */
1399 objset_t *os = zd->zd_os;
1400 ztest_block_tag_t *bbt;
1407 byteswap_uint64_array(lr, sizeof (*lr));
1409 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1410 ASSERT(name[0] != '\0');
1412 tx = dmu_tx_create(os);
1414 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1416 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1417 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1419 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1422 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1426 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1428 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1429 if (lr->lr_foid == 0) {
1430 lr->lr_foid = zap_create(os,
1431 lr->lrz_type, lr->lrz_bonustype,
1432 lr->lrz_bonuslen, tx);
1434 error = zap_create_claim(os, lr->lr_foid,
1435 lr->lrz_type, lr->lrz_bonustype,
1436 lr->lrz_bonuslen, tx);
1439 if (lr->lr_foid == 0) {
1440 lr->lr_foid = dmu_object_alloc(os,
1441 lr->lrz_type, 0, lr->lrz_bonustype,
1442 lr->lrz_bonuslen, tx);
1444 error = dmu_object_claim(os, lr->lr_foid,
1445 lr->lrz_type, 0, lr->lrz_bonustype,
1446 lr->lrz_bonuslen, tx);
1451 ASSERT3U(error, ==, EEXIST);
1452 ASSERT(zd->zd_zilog->zl_replay);
1457 ASSERT(lr->lr_foid != 0);
1459 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1460 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1461 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1463 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1464 bbt = ztest_bt_bonus(db);
1465 dmu_buf_will_dirty(db, tx);
1466 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1467 dmu_buf_rele(db, FTAG);
1469 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1472 (void) ztest_log_create(zd, tx, lr);
1480 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1482 char *name = (void *)(lr + 1); /* name follows lr */
1483 objset_t *os = zd->zd_os;
1484 dmu_object_info_t doi;
1486 uint64_t object, txg;
1489 byteswap_uint64_array(lr, sizeof (*lr));
1491 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1492 ASSERT(name[0] != '\0');
1495 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1496 ASSERT(object != 0);
1498 ztest_object_lock(zd, object, RL_WRITER);
1500 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1502 tx = dmu_tx_create(os);
1504 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1505 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1507 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1509 ztest_object_unlock(zd, object);
1513 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1514 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1516 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1519 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1521 (void) ztest_log_remove(zd, tx, lr, object);
1525 ztest_object_unlock(zd, object);
1531 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1533 objset_t *os = zd->zd_os;
1534 void *data = lr + 1; /* data follows lr */
1535 uint64_t offset, length;
1536 ztest_block_tag_t *bt = data;
1537 ztest_block_tag_t *bbt;
1538 uint64_t gen, txg, lrtxg, crtxg;
1539 dmu_object_info_t doi;
1542 arc_buf_t *abuf = NULL;
1546 byteswap_uint64_array(lr, sizeof (*lr));
1548 offset = lr->lr_offset;
1549 length = lr->lr_length;
1551 /* If it's a dmu_sync() block, write the whole block */
1552 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1553 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1554 if (length < blocksize) {
1555 offset -= offset % blocksize;
1560 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1561 byteswap_uint64_array(bt, sizeof (*bt));
1563 if (bt->bt_magic != BT_MAGIC)
1566 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1567 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1569 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1571 dmu_object_info_from_db(db, &doi);
1573 bbt = ztest_bt_bonus(db);
1574 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1576 crtxg = bbt->bt_crtxg;
1577 lrtxg = lr->lr_common.lrc_txg;
1579 tx = dmu_tx_create(os);
1581 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1583 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1584 P2PHASE(offset, length) == 0)
1585 abuf = dmu_request_arcbuf(db, length);
1587 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1590 dmu_return_arcbuf(abuf);
1591 dmu_buf_rele(db, FTAG);
1592 ztest_range_unlock(rl);
1593 ztest_object_unlock(zd, lr->lr_foid);
1599 * Usually, verify the old data before writing new data --
1600 * but not always, because we also want to verify correct
1601 * behavior when the data was not recently read into cache.
1603 ASSERT(offset % doi.doi_data_block_size == 0);
1604 if (ztest_random(4) != 0) {
1605 int prefetch = ztest_random(2) ?
1606 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1607 ztest_block_tag_t rbt;
1609 VERIFY(dmu_read(os, lr->lr_foid, offset,
1610 sizeof (rbt), &rbt, prefetch) == 0);
1611 if (rbt.bt_magic == BT_MAGIC) {
1612 ztest_bt_verify(&rbt, os, lr->lr_foid,
1613 offset, gen, txg, crtxg);
1618 * Writes can appear to be newer than the bonus buffer because
1619 * the ztest_get_data() callback does a dmu_read() of the
1620 * open-context data, which may be different than the data
1621 * as it was when the write was generated.
1623 if (zd->zd_zilog->zl_replay) {
1624 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1625 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1630 * Set the bt's gen/txg to the bonus buffer's gen/txg
1631 * so that all of the usual ASSERTs will work.
1633 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1637 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1639 bcopy(data, abuf->b_data, length);
1640 dmu_assign_arcbuf(db, offset, abuf, tx);
1643 (void) ztest_log_write(zd, tx, lr);
1645 dmu_buf_rele(db, FTAG);
1649 ztest_range_unlock(rl);
1650 ztest_object_unlock(zd, lr->lr_foid);
1656 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1658 objset_t *os = zd->zd_os;
1664 byteswap_uint64_array(lr, sizeof (*lr));
1666 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1667 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1670 tx = dmu_tx_create(os);
1672 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1674 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1676 ztest_range_unlock(rl);
1677 ztest_object_unlock(zd, lr->lr_foid);
1681 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1682 lr->lr_length, tx) == 0);
1684 (void) ztest_log_truncate(zd, tx, lr);
1688 ztest_range_unlock(rl);
1689 ztest_object_unlock(zd, lr->lr_foid);
1695 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1697 objset_t *os = zd->zd_os;
1700 ztest_block_tag_t *bbt;
1701 uint64_t txg, lrtxg, crtxg;
1704 byteswap_uint64_array(lr, sizeof (*lr));
1706 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1708 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1710 tx = dmu_tx_create(os);
1711 dmu_tx_hold_bonus(tx, lr->lr_foid);
1713 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1715 dmu_buf_rele(db, FTAG);
1716 ztest_object_unlock(zd, lr->lr_foid);
1720 bbt = ztest_bt_bonus(db);
1721 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1722 crtxg = bbt->bt_crtxg;
1723 lrtxg = lr->lr_common.lrc_txg;
1725 if (zd->zd_zilog->zl_replay) {
1726 ASSERT(lr->lr_size != 0);
1727 ASSERT(lr->lr_mode != 0);
1731 * Randomly change the size and increment the generation.
1733 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1735 lr->lr_mode = bbt->bt_gen + 1;
1740 * Verify that the current bonus buffer is not newer than our txg.
1742 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1743 MAX(txg, lrtxg), crtxg);
1745 dmu_buf_will_dirty(db, tx);
1747 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1748 ASSERT3U(lr->lr_size, <=, db->db_size);
1749 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
1750 bbt = ztest_bt_bonus(db);
1752 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1754 dmu_buf_rele(db, FTAG);
1756 (void) ztest_log_setattr(zd, tx, lr);
1760 ztest_object_unlock(zd, lr->lr_foid);
1765 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1766 NULL, /* 0 no such transaction type */
1767 ztest_replay_create, /* TX_CREATE */
1768 NULL, /* TX_MKDIR */
1769 NULL, /* TX_MKXATTR */
1770 NULL, /* TX_SYMLINK */
1771 ztest_replay_remove, /* TX_REMOVE */
1772 NULL, /* TX_RMDIR */
1774 NULL, /* TX_RENAME */
1775 ztest_replay_write, /* TX_WRITE */
1776 ztest_replay_truncate, /* TX_TRUNCATE */
1777 ztest_replay_setattr, /* TX_SETATTR */
1779 NULL, /* TX_CREATE_ACL */
1780 NULL, /* TX_CREATE_ATTR */
1781 NULL, /* TX_CREATE_ACL_ATTR */
1782 NULL, /* TX_MKDIR_ACL */
1783 NULL, /* TX_MKDIR_ATTR */
1784 NULL, /* TX_MKDIR_ACL_ATTR */
1785 NULL, /* TX_WRITE2 */
1789 * ZIL get_data callbacks
1793 ztest_get_done(zgd_t *zgd, int error)
1795 ztest_ds_t *zd = zgd->zgd_private;
1796 uint64_t object = zgd->zgd_rl->rl_object;
1799 dmu_buf_rele(zgd->zgd_db, zgd);
1801 ztest_range_unlock(zgd->zgd_rl);
1802 ztest_object_unlock(zd, object);
1804 if (error == 0 && zgd->zgd_bp)
1805 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1807 umem_free(zgd, sizeof (*zgd));
1811 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1813 ztest_ds_t *zd = arg;
1814 objset_t *os = zd->zd_os;
1815 uint64_t object = lr->lr_foid;
1816 uint64_t offset = lr->lr_offset;
1817 uint64_t size = lr->lr_length;
1818 blkptr_t *bp = &lr->lr_blkptr;
1819 uint64_t txg = lr->lr_common.lrc_txg;
1821 dmu_object_info_t doi;
1826 ztest_object_lock(zd, object, RL_READER);
1827 error = dmu_bonus_hold(os, object, FTAG, &db);
1829 ztest_object_unlock(zd, object);
1833 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1835 if (crtxg == 0 || crtxg > txg) {
1836 dmu_buf_rele(db, FTAG);
1837 ztest_object_unlock(zd, object);
1841 dmu_object_info_from_db(db, &doi);
1842 dmu_buf_rele(db, FTAG);
1845 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1846 zgd->zgd_zilog = zd->zd_zilog;
1847 zgd->zgd_private = zd;
1849 if (buf != NULL) { /* immediate write */
1850 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1853 error = dmu_read(os, object, offset, size, buf,
1854 DMU_READ_NO_PREFETCH);
1857 size = doi.doi_data_block_size;
1859 offset = P2ALIGN(offset, size);
1861 ASSERT(offset < size);
1865 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1868 error = dmu_buf_hold(os, object, offset, zgd, &db,
1869 DMU_READ_NO_PREFETCH);
1872 blkptr_t *obp = dmu_buf_get_blkptr(db);
1874 ASSERT(BP_IS_HOLE(bp));
1881 ASSERT(db->db_offset == offset);
1882 ASSERT(db->db_size == size);
1884 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1885 ztest_get_done, zgd);
1892 ztest_get_done(zgd, error);
1898 ztest_lr_alloc(size_t lrsize, char *name)
1901 size_t namesize = name ? strlen(name) + 1 : 0;
1903 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1906 bcopy(name, lr + lrsize, namesize);
1912 ztest_lr_free(void *lr, size_t lrsize, char *name)
1914 size_t namesize = name ? strlen(name) + 1 : 0;
1916 umem_free(lr, lrsize + namesize);
1920 * Lookup a bunch of objects. Returns the number of objects not found.
1923 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1928 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1930 for (int i = 0; i < count; i++, od++) {
1932 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1933 sizeof (uint64_t), 1, &od->od_object);
1935 ASSERT(error == ENOENT);
1936 ASSERT(od->od_object == 0);
1940 ztest_block_tag_t *bbt;
1941 dmu_object_info_t doi;
1943 ASSERT(od->od_object != 0);
1944 ASSERT(missing == 0); /* there should be no gaps */
1946 ztest_object_lock(zd, od->od_object, RL_READER);
1947 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1948 od->od_object, FTAG, &db));
1949 dmu_object_info_from_db(db, &doi);
1950 bbt = ztest_bt_bonus(db);
1951 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1952 od->od_type = doi.doi_type;
1953 od->od_blocksize = doi.doi_data_block_size;
1954 od->od_gen = bbt->bt_gen;
1955 dmu_buf_rele(db, FTAG);
1956 ztest_object_unlock(zd, od->od_object);
1964 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1968 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1970 for (int i = 0; i < count; i++, od++) {
1977 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1979 lr->lr_doid = od->od_dir;
1980 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1981 lr->lrz_type = od->od_crtype;
1982 lr->lrz_blocksize = od->od_crblocksize;
1983 lr->lrz_ibshift = ztest_random_ibshift();
1984 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1985 lr->lrz_bonuslen = dmu_bonus_max();
1986 lr->lr_gen = od->od_crgen;
1987 lr->lr_crtime[0] = time(NULL);
1989 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1990 ASSERT(missing == 0);
1994 od->od_object = lr->lr_foid;
1995 od->od_type = od->od_crtype;
1996 od->od_blocksize = od->od_crblocksize;
1997 od->od_gen = od->od_crgen;
1998 ASSERT(od->od_object != 0);
2001 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2008 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2013 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
2017 for (int i = count - 1; i >= 0; i--, od--) {
2024 * No object was found.
2026 if (od->od_object == 0)
2029 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2031 lr->lr_doid = od->od_dir;
2033 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
2034 ASSERT3U(error, ==, ENOSPC);
2039 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2046 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
2052 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2054 lr->lr_foid = object;
2055 lr->lr_offset = offset;
2056 lr->lr_length = size;
2058 BP_ZERO(&lr->lr_blkptr);
2060 bcopy(data, lr + 1, size);
2062 error = ztest_replay_write(zd, lr, B_FALSE);
2064 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2070 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2075 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2077 lr->lr_foid = object;
2078 lr->lr_offset = offset;
2079 lr->lr_length = size;
2081 error = ztest_replay_truncate(zd, lr, B_FALSE);
2083 ztest_lr_free(lr, sizeof (*lr), NULL);
2089 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2094 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2096 lr->lr_foid = object;
2100 error = ztest_replay_setattr(zd, lr, B_FALSE);
2102 ztest_lr_free(lr, sizeof (*lr), NULL);
2108 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2110 objset_t *os = zd->zd_os;
2115 txg_wait_synced(dmu_objset_pool(os), 0);
2117 ztest_object_lock(zd, object, RL_READER);
2118 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2120 tx = dmu_tx_create(os);
2122 dmu_tx_hold_write(tx, object, offset, size);
2124 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2127 dmu_prealloc(os, object, offset, size, tx);
2129 txg_wait_synced(dmu_objset_pool(os), txg);
2131 (void) dmu_free_long_range(os, object, offset, size);
2134 ztest_range_unlock(rl);
2135 ztest_object_unlock(zd, object);
2139 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2142 ztest_block_tag_t wbt;
2143 dmu_object_info_t doi;
2144 enum ztest_io_type io_type;
2148 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2149 blocksize = doi.doi_data_block_size;
2150 data = umem_alloc(blocksize, UMEM_NOFAIL);
2153 * Pick an i/o type at random, biased toward writing block tags.
2155 io_type = ztest_random(ZTEST_IO_TYPES);
2156 if (ztest_random(2) == 0)
2157 io_type = ZTEST_IO_WRITE_TAG;
2159 (void) rw_rdlock(&zd->zd_zilog_lock);
2163 case ZTEST_IO_WRITE_TAG:
2164 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2165 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2168 case ZTEST_IO_WRITE_PATTERN:
2169 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2170 if (ztest_random(2) == 0) {
2172 * Induce fletcher2 collisions to ensure that
2173 * zio_ddt_collision() detects and resolves them
2174 * when using fletcher2-verify for deduplication.
2176 ((uint64_t *)data)[0] ^= 1ULL << 63;
2177 ((uint64_t *)data)[4] ^= 1ULL << 63;
2179 (void) ztest_write(zd, object, offset, blocksize, data);
2182 case ZTEST_IO_WRITE_ZEROES:
2183 bzero(data, blocksize);
2184 (void) ztest_write(zd, object, offset, blocksize, data);
2187 case ZTEST_IO_TRUNCATE:
2188 (void) ztest_truncate(zd, object, offset, blocksize);
2191 case ZTEST_IO_SETATTR:
2192 (void) ztest_setattr(zd, object);
2195 case ZTEST_IO_REWRITE:
2196 (void) rw_rdlock(&ztest_name_lock);
2197 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2198 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
2200 VERIFY(err == 0 || err == ENOSPC);
2201 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2202 ZFS_PROP_COMPRESSION,
2203 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
2205 VERIFY(err == 0 || err == ENOSPC);
2206 (void) rw_unlock(&ztest_name_lock);
2208 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
2209 DMU_READ_NO_PREFETCH));
2211 (void) ztest_write(zd, object, offset, blocksize, data);
2215 (void) rw_unlock(&zd->zd_zilog_lock);
2217 umem_free(data, blocksize);
2221 * Initialize an object description template.
2224 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2225 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2227 od->od_dir = ZTEST_DIROBJ;
2230 od->od_crtype = type;
2231 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2234 od->od_type = DMU_OT_NONE;
2235 od->od_blocksize = 0;
2238 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2239 tag, (int64_t)id, index);
2243 * Lookup or create the objects for a test using the od template.
2244 * If the objects do not all exist, or if 'remove' is specified,
2245 * remove any existing objects and create new ones. Otherwise,
2246 * use the existing objects.
2249 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2251 int count = size / sizeof (*od);
2254 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2255 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2256 (ztest_remove(zd, od, count) != 0 ||
2257 ztest_create(zd, od, count) != 0))
2260 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2267 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2269 zilog_t *zilog = zd->zd_zilog;
2271 (void) rw_rdlock(&zd->zd_zilog_lock);
2273 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2276 * Remember the committed values in zd, which is in parent/child
2277 * shared memory. If we die, the next iteration of ztest_run()
2278 * will verify that the log really does contain this record.
2280 mutex_enter(&zilog->zl_lock);
2281 ASSERT(zd->zd_shared != NULL);
2282 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2283 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2284 mutex_exit(&zilog->zl_lock);
2286 (void) rw_unlock(&zd->zd_zilog_lock);
2290 * This function is designed to simulate the operations that occur during a
2291 * mount/unmount operation. We hold the dataset across these operations in an
2292 * attempt to expose any implicit assumptions about ZIL management.
2296 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2298 objset_t *os = zd->zd_os;
2301 * We grab the zd_dirobj_lock to ensure that no other thread is
2302 * updating the zil (i.e. adding in-memory log records) and the
2303 * zd_zilog_lock to block any I/O.
2305 VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
2306 (void) rw_wrlock(&zd->zd_zilog_lock);
2308 /* zfsvfs_teardown() */
2309 zil_close(zd->zd_zilog);
2311 /* zfsvfs_setup() */
2312 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2313 zil_replay(os, zd, ztest_replay_vector);
2315 (void) rw_unlock(&zd->zd_zilog_lock);
2316 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2320 * Verify that we can't destroy an active pool, create an existing pool,
2321 * or create a pool with a bad vdev spec.
2325 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2327 ztest_shared_opts_t *zo = &ztest_opts;
2332 * Attempt to create using a bad file.
2334 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2335 VERIFY3U(ENOENT, ==,
2336 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2337 nvlist_free(nvroot);
2340 * Attempt to create using a bad mirror.
2342 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
2343 VERIFY3U(ENOENT, ==,
2344 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2345 nvlist_free(nvroot);
2348 * Attempt to create an existing pool. It shouldn't matter
2349 * what's in the nvroot; we should fail with EEXIST.
2351 (void) rw_rdlock(&ztest_name_lock);
2352 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2353 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2354 nvlist_free(nvroot);
2355 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2356 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2357 spa_close(spa, FTAG);
2359 (void) rw_unlock(&ztest_name_lock);
2364 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
2367 uint64_t initial_version = SPA_VERSION_INITIAL;
2368 uint64_t version, newversion;
2369 nvlist_t *nvroot, *props;
2372 VERIFY0(mutex_lock(&ztest_vdev_lock));
2373 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
2376 * Clean up from previous runs.
2378 (void) spa_destroy(name);
2380 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
2381 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
2384 * If we're configuring a RAIDZ device then make sure that the
2385 * the initial version is capable of supporting that feature.
2387 switch (ztest_opts.zo_raidz_parity) {
2390 initial_version = SPA_VERSION_INITIAL;
2393 initial_version = SPA_VERSION_RAIDZ2;
2396 initial_version = SPA_VERSION_RAIDZ3;
2401 * Create a pool with a spa version that can be upgraded. Pick
2402 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
2405 version = ztest_random_spa_version(initial_version);
2406 } while (version > SPA_VERSION_BEFORE_FEATURES);
2408 props = fnvlist_alloc();
2409 fnvlist_add_uint64(props,
2410 zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
2411 VERIFY0(spa_create(name, nvroot, props, NULL));
2412 fnvlist_free(nvroot);
2413 fnvlist_free(props);
2415 VERIFY0(spa_open(name, &spa, FTAG));
2416 VERIFY3U(spa_version(spa), ==, version);
2417 newversion = ztest_random_spa_version(version + 1);
2419 if (ztest_opts.zo_verbose >= 4) {
2420 (void) printf("upgrading spa version from %llu to %llu\n",
2421 (u_longlong_t)version, (u_longlong_t)newversion);
2424 spa_upgrade(spa, newversion);
2425 VERIFY3U(spa_version(spa), >, version);
2426 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
2427 zpool_prop_to_name(ZPOOL_PROP_VERSION)));
2428 spa_close(spa, FTAG);
2431 VERIFY0(mutex_unlock(&ztest_vdev_lock));
2435 vdev_lookup_by_path(vdev_t *vd, const char *path)
2439 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2442 for (int c = 0; c < vd->vdev_children; c++)
2443 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2451 * Find the first available hole which can be used as a top-level.
2454 find_vdev_hole(spa_t *spa)
2456 vdev_t *rvd = spa->spa_root_vdev;
2459 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2461 for (c = 0; c < rvd->vdev_children; c++) {
2462 vdev_t *cvd = rvd->vdev_child[c];
2464 if (cvd->vdev_ishole)
2471 * Verify that vdev_add() works as expected.
2475 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2477 ztest_shared_t *zs = ztest_shared;
2478 spa_t *spa = ztest_spa;
2484 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2485 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2487 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2489 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2492 * If we have slogs then remove them 1/4 of the time.
2494 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2496 * Grab the guid from the head of the log class rotor.
2498 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2500 spa_config_exit(spa, SCL_VDEV, FTAG);
2503 * We have to grab the zs_name_lock as writer to
2504 * prevent a race between removing a slog (dmu_objset_find)
2505 * and destroying a dataset. Removing the slog will
2506 * grab a reference on the dataset which may cause
2507 * dmu_objset_destroy() to fail with EBUSY thus
2508 * leaving the dataset in an inconsistent state.
2510 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2511 error = spa_vdev_remove(spa, guid, B_FALSE);
2512 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2514 if (error && error != EEXIST)
2515 fatal(0, "spa_vdev_remove() = %d", error);
2517 spa_config_exit(spa, SCL_VDEV, FTAG);
2520 * Make 1/4 of the devices be log devices.
2522 nvroot = make_vdev_root(NULL, NULL, NULL,
2523 ztest_opts.zo_vdev_size, 0,
2524 ztest_random(4) == 0, ztest_opts.zo_raidz,
2527 error = spa_vdev_add(spa, nvroot);
2528 nvlist_free(nvroot);
2530 if (error == ENOSPC)
2531 ztest_record_enospc("spa_vdev_add");
2532 else if (error != 0)
2533 fatal(0, "spa_vdev_add() = %d", error);
2536 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2540 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2544 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2546 ztest_shared_t *zs = ztest_shared;
2547 spa_t *spa = ztest_spa;
2548 vdev_t *rvd = spa->spa_root_vdev;
2549 spa_aux_vdev_t *sav;
2554 if (ztest_random(2) == 0) {
2555 sav = &spa->spa_spares;
2556 aux = ZPOOL_CONFIG_SPARES;
2558 sav = &spa->spa_l2cache;
2559 aux = ZPOOL_CONFIG_L2CACHE;
2562 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2564 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2566 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2568 * Pick a random device to remove.
2570 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2573 * Find an unused device we can add.
2575 zs->zs_vdev_aux = 0;
2577 char path[MAXPATHLEN];
2579 (void) snprintf(path, sizeof (path), ztest_aux_template,
2580 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2582 for (c = 0; c < sav->sav_count; c++)
2583 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2586 if (c == sav->sav_count &&
2587 vdev_lookup_by_path(rvd, path) == NULL)
2593 spa_config_exit(spa, SCL_VDEV, FTAG);
2599 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
2600 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2601 error = spa_vdev_add(spa, nvroot);
2603 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2604 nvlist_free(nvroot);
2607 * Remove an existing device. Sometimes, dirty its
2608 * vdev state first to make sure we handle removal
2609 * of devices that have pending state changes.
2611 if (ztest_random(2) == 0)
2612 (void) vdev_online(spa, guid, 0, NULL);
2614 error = spa_vdev_remove(spa, guid, B_FALSE);
2615 if (error != 0 && error != EBUSY)
2616 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2619 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2623 * split a pool if it has mirror tlvdevs
2627 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2629 ztest_shared_t *zs = ztest_shared;
2630 spa_t *spa = ztest_spa;
2631 vdev_t *rvd = spa->spa_root_vdev;
2632 nvlist_t *tree, **child, *config, *split, **schild;
2633 uint_t c, children, schildren = 0, lastlogid = 0;
2636 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2638 /* ensure we have a useable config; mirrors of raidz aren't supported */
2639 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2640 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2644 /* clean up the old pool, if any */
2645 (void) spa_destroy("splitp");
2647 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2649 /* generate a config from the existing config */
2650 mutex_enter(&spa->spa_props_lock);
2651 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2653 mutex_exit(&spa->spa_props_lock);
2655 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2658 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2659 for (c = 0; c < children; c++) {
2660 vdev_t *tvd = rvd->vdev_child[c];
2664 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2665 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2667 VERIFY(nvlist_add_string(schild[schildren],
2668 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2669 VERIFY(nvlist_add_uint64(schild[schildren],
2670 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2672 lastlogid = schildren;
2677 VERIFY(nvlist_lookup_nvlist_array(child[c],
2678 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2679 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2682 /* OK, create a config that can be used to split */
2683 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2684 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2685 VDEV_TYPE_ROOT) == 0);
2686 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2687 lastlogid != 0 ? lastlogid : schildren) == 0);
2689 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2690 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2692 for (c = 0; c < schildren; c++)
2693 nvlist_free(schild[c]);
2697 spa_config_exit(spa, SCL_VDEV, FTAG);
2699 (void) rw_wrlock(&ztest_name_lock);
2700 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2701 (void) rw_unlock(&ztest_name_lock);
2703 nvlist_free(config);
2706 (void) printf("successful split - results:\n");
2707 mutex_enter(&spa_namespace_lock);
2708 show_pool_stats(spa);
2709 show_pool_stats(spa_lookup("splitp"));
2710 mutex_exit(&spa_namespace_lock);
2714 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2719 * Verify that we can attach and detach devices.
2723 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2725 ztest_shared_t *zs = ztest_shared;
2726 spa_t *spa = ztest_spa;
2727 spa_aux_vdev_t *sav = &spa->spa_spares;
2728 vdev_t *rvd = spa->spa_root_vdev;
2729 vdev_t *oldvd, *newvd, *pvd;
2733 uint64_t ashift = ztest_get_ashift();
2734 uint64_t oldguid, pguid;
2735 size_t oldsize, newsize;
2736 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2738 int oldvd_has_siblings = B_FALSE;
2739 int newvd_is_spare = B_FALSE;
2741 int error, expected_error;
2743 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2744 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2746 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2749 * Decide whether to do an attach or a replace.
2751 replacing = ztest_random(2);
2754 * Pick a random top-level vdev.
2756 top = ztest_random_vdev_top(spa, B_TRUE);
2759 * Pick a random leaf within it.
2761 leaf = ztest_random(leaves);
2766 oldvd = rvd->vdev_child[top];
2767 if (zs->zs_mirrors >= 1) {
2768 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2769 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2770 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2772 if (ztest_opts.zo_raidz > 1) {
2773 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2774 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2775 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2779 * If we're already doing an attach or replace, oldvd may be a
2780 * mirror vdev -- in which case, pick a random child.
2782 while (oldvd->vdev_children != 0) {
2783 oldvd_has_siblings = B_TRUE;
2784 ASSERT(oldvd->vdev_children >= 2);
2785 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2788 oldguid = oldvd->vdev_guid;
2789 oldsize = vdev_get_min_asize(oldvd);
2790 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2791 (void) strcpy(oldpath, oldvd->vdev_path);
2792 pvd = oldvd->vdev_parent;
2793 pguid = pvd->vdev_guid;
2796 * If oldvd has siblings, then half of the time, detach it.
2798 if (oldvd_has_siblings && ztest_random(2) == 0) {
2799 spa_config_exit(spa, SCL_VDEV, FTAG);
2800 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2801 if (error != 0 && error != ENODEV && error != EBUSY &&
2803 fatal(0, "detach (%s) returned %d", oldpath, error);
2804 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2809 * For the new vdev, choose with equal probability between the two
2810 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2812 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2813 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2814 newvd_is_spare = B_TRUE;
2815 (void) strcpy(newpath, newvd->vdev_path);
2817 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2818 ztest_opts.zo_dir, ztest_opts.zo_pool,
2819 top * leaves + leaf);
2820 if (ztest_random(2) == 0)
2821 newpath[strlen(newpath) - 1] = 'b';
2822 newvd = vdev_lookup_by_path(rvd, newpath);
2826 newsize = vdev_get_min_asize(newvd);
2829 * Make newsize a little bigger or smaller than oldsize.
2830 * If it's smaller, the attach should fail.
2831 * If it's larger, and we're doing a replace,
2832 * we should get dynamic LUN growth when we're done.
2834 newsize = 10 * oldsize / (9 + ztest_random(3));
2838 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2839 * unless it's a replace; in that case any non-replacing parent is OK.
2841 * If newvd is already part of the pool, it should fail with EBUSY.
2843 * If newvd is too small, it should fail with EOVERFLOW.
2845 if (pvd->vdev_ops != &vdev_mirror_ops &&
2846 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2847 pvd->vdev_ops == &vdev_replacing_ops ||
2848 pvd->vdev_ops == &vdev_spare_ops))
2849 expected_error = ENOTSUP;
2850 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2851 expected_error = ENOTSUP;
2852 else if (newvd == oldvd)
2853 expected_error = replacing ? 0 : EBUSY;
2854 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2855 expected_error = EBUSY;
2856 else if (newsize < oldsize)
2857 expected_error = EOVERFLOW;
2858 else if (ashift > oldvd->vdev_top->vdev_ashift)
2859 expected_error = EDOM;
2863 spa_config_exit(spa, SCL_VDEV, FTAG);
2866 * Build the nvlist describing newpath.
2868 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
2869 ashift, 0, 0, 0, 1);
2871 error = spa_vdev_attach(spa, oldguid, root, replacing);
2876 * If our parent was the replacing vdev, but the replace completed,
2877 * then instead of failing with ENOTSUP we may either succeed,
2878 * fail with ENODEV, or fail with EOVERFLOW.
2880 if (expected_error == ENOTSUP &&
2881 (error == 0 || error == ENODEV || error == EOVERFLOW))
2882 expected_error = error;
2885 * If someone grew the LUN, the replacement may be too small.
2887 if (error == EOVERFLOW || error == EBUSY)
2888 expected_error = error;
2890 /* XXX workaround 6690467 */
2891 if (error != expected_error && expected_error != EBUSY) {
2892 fatal(0, "attach (%s %llu, %s %llu, %d) "
2893 "returned %d, expected %d",
2894 oldpath, (longlong_t)oldsize, newpath,
2895 (longlong_t)newsize, replacing, error, expected_error);
2898 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2902 * Callback function which expands the physical size of the vdev.
2905 grow_vdev(vdev_t *vd, void *arg)
2907 spa_t *spa = vd->vdev_spa;
2908 size_t *newsize = arg;
2912 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2913 ASSERT(vd->vdev_ops->vdev_op_leaf);
2915 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2918 fsize = lseek(fd, 0, SEEK_END);
2919 (void) ftruncate(fd, *newsize);
2921 if (ztest_opts.zo_verbose >= 6) {
2922 (void) printf("%s grew from %lu to %lu bytes\n",
2923 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2930 * Callback function which expands a given vdev by calling vdev_online().
2934 online_vdev(vdev_t *vd, void *arg)
2936 spa_t *spa = vd->vdev_spa;
2937 vdev_t *tvd = vd->vdev_top;
2938 uint64_t guid = vd->vdev_guid;
2939 uint64_t generation = spa->spa_config_generation + 1;
2940 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2943 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2944 ASSERT(vd->vdev_ops->vdev_op_leaf);
2946 /* Calling vdev_online will initialize the new metaslabs */
2947 spa_config_exit(spa, SCL_STATE, spa);
2948 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2949 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2952 * If vdev_online returned an error or the underlying vdev_open
2953 * failed then we abort the expand. The only way to know that
2954 * vdev_open fails is by checking the returned newstate.
2956 if (error || newstate != VDEV_STATE_HEALTHY) {
2957 if (ztest_opts.zo_verbose >= 5) {
2958 (void) printf("Unable to expand vdev, state %llu, "
2959 "error %d\n", (u_longlong_t)newstate, error);
2963 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2966 * Since we dropped the lock we need to ensure that we're
2967 * still talking to the original vdev. It's possible this
2968 * vdev may have been detached/replaced while we were
2969 * trying to online it.
2971 if (generation != spa->spa_config_generation) {
2972 if (ztest_opts.zo_verbose >= 5) {
2973 (void) printf("vdev configuration has changed, "
2974 "guid %llu, state %llu, expected gen %llu, "
2977 (u_longlong_t)tvd->vdev_state,
2978 (u_longlong_t)generation,
2979 (u_longlong_t)spa->spa_config_generation);
2987 * Traverse the vdev tree calling the supplied function.
2988 * We continue to walk the tree until we either have walked all
2989 * children or we receive a non-NULL return from the callback.
2990 * If a NULL callback is passed, then we just return back the first
2991 * leaf vdev we encounter.
2994 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2996 if (vd->vdev_ops->vdev_op_leaf) {
3000 return (func(vd, arg));
3003 for (uint_t c = 0; c < vd->vdev_children; c++) {
3004 vdev_t *cvd = vd->vdev_child[c];
3005 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
3012 * Verify that dynamic LUN growth works as expected.
3016 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
3018 spa_t *spa = ztest_spa;
3020 metaslab_class_t *mc;
3021 metaslab_group_t *mg;
3022 size_t psize, newsize;
3024 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
3026 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
3027 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3029 top = ztest_random_vdev_top(spa, B_TRUE);
3031 tvd = spa->spa_root_vdev->vdev_child[top];
3034 old_ms_count = tvd->vdev_ms_count;
3035 old_class_space = metaslab_class_get_space(mc);
3038 * Determine the size of the first leaf vdev associated with
3039 * our top-level device.
3041 vd = vdev_walk_tree(tvd, NULL, NULL);
3042 ASSERT3P(vd, !=, NULL);
3043 ASSERT(vd->vdev_ops->vdev_op_leaf);
3045 psize = vd->vdev_psize;
3048 * We only try to expand the vdev if it's healthy, less than 4x its
3049 * original size, and it has a valid psize.
3051 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
3052 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
3053 spa_config_exit(spa, SCL_STATE, spa);
3054 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3058 newsize = psize + psize / 8;
3059 ASSERT3U(newsize, >, psize);
3061 if (ztest_opts.zo_verbose >= 6) {
3062 (void) printf("Expanding LUN %s from %lu to %lu\n",
3063 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
3067 * Growing the vdev is a two step process:
3068 * 1). expand the physical size (i.e. relabel)
3069 * 2). online the vdev to create the new metaslabs
3071 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
3072 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
3073 tvd->vdev_state != VDEV_STATE_HEALTHY) {
3074 if (ztest_opts.zo_verbose >= 5) {
3075 (void) printf("Could not expand LUN because "
3076 "the vdev configuration changed.\n");
3078 spa_config_exit(spa, SCL_STATE, spa);
3079 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3083 spa_config_exit(spa, SCL_STATE, spa);
3086 * Expanding the LUN will update the config asynchronously,
3087 * thus we must wait for the async thread to complete any
3088 * pending tasks before proceeding.
3092 mutex_enter(&spa->spa_async_lock);
3093 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
3094 mutex_exit(&spa->spa_async_lock);
3097 txg_wait_synced(spa_get_dsl(spa), 0);
3098 (void) poll(NULL, 0, 100);
3101 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3103 tvd = spa->spa_root_vdev->vdev_child[top];
3104 new_ms_count = tvd->vdev_ms_count;
3105 new_class_space = metaslab_class_get_space(mc);
3107 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3108 if (ztest_opts.zo_verbose >= 5) {
3109 (void) printf("Could not verify LUN expansion due to "
3110 "intervening vdev offline or remove.\n");
3112 spa_config_exit(spa, SCL_STATE, spa);
3113 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3118 * Make sure we were able to grow the vdev.
3120 if (new_ms_count <= old_ms_count)
3121 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3122 old_ms_count, new_ms_count);
3125 * Make sure we were able to grow the pool.
3127 if (new_class_space <= old_class_space)
3128 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3129 old_class_space, new_class_space);
3131 if (ztest_opts.zo_verbose >= 5) {
3132 char oldnumbuf[6], newnumbuf[6];
3134 nicenum(old_class_space, oldnumbuf);
3135 nicenum(new_class_space, newnumbuf);
3136 (void) printf("%s grew from %s to %s\n",
3137 spa->spa_name, oldnumbuf, newnumbuf);
3140 spa_config_exit(spa, SCL_STATE, spa);
3141 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3145 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3149 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3152 * Create the objects common to all ztest datasets.
3154 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3155 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3159 ztest_dataset_create(char *dsname)
3161 uint64_t zilset = ztest_random(100);
3162 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3163 ztest_objset_create_cb, NULL);
3165 if (err || zilset < 80)
3168 if (ztest_opts.zo_verbose >= 6)
3169 (void) printf("Setting dataset %s to sync always\n", dsname);
3170 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3171 ZFS_SYNC_ALWAYS, B_FALSE));
3176 ztest_objset_destroy_cb(const char *name, void *arg)
3179 dmu_object_info_t doi;
3183 * Verify that the dataset contains a directory object.
3185 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os));
3186 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3187 if (error != ENOENT) {
3188 /* We could have crashed in the middle of destroying it */
3190 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3191 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3193 dmu_objset_disown(os, FTAG);
3196 * Destroy the dataset.
3198 if (strchr(name, '@') != NULL) {
3199 VERIFY0(dsl_destroy_snapshot(name, B_FALSE));
3201 VERIFY0(dsl_destroy_head(name));
3207 ztest_snapshot_create(char *osname, uint64_t id)
3209 char snapname[MAXNAMELEN];
3212 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
3214 error = dmu_objset_snapshot_one(osname, snapname);
3215 if (error == ENOSPC) {
3216 ztest_record_enospc(FTAG);
3219 if (error != 0 && error != EEXIST) {
3220 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
3227 ztest_snapshot_destroy(char *osname, uint64_t id)
3229 char snapname[MAXNAMELEN];
3232 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3235 error = dsl_destroy_snapshot(snapname, B_FALSE);
3236 if (error != 0 && error != ENOENT)
3237 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3243 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3249 char name[MAXNAMELEN];
3252 (void) rw_rdlock(&ztest_name_lock);
3254 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3255 ztest_opts.zo_pool, (u_longlong_t)id);
3258 * If this dataset exists from a previous run, process its replay log
3259 * half of the time. If we don't replay it, then dmu_objset_destroy()
3260 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3262 if (ztest_random(2) == 0 &&
3263 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3264 ztest_zd_init(&zdtmp, NULL, os);
3265 zil_replay(os, &zdtmp, ztest_replay_vector);
3266 ztest_zd_fini(&zdtmp);
3267 dmu_objset_disown(os, FTAG);
3271 * There may be an old instance of the dataset we're about to
3272 * create lying around from a previous run. If so, destroy it
3273 * and all of its snapshots.
3275 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3276 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3279 * Verify that the destroyed dataset is no longer in the namespace.
3281 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
3285 * Verify that we can create a new dataset.
3287 error = ztest_dataset_create(name);
3289 if (error == ENOSPC) {
3290 ztest_record_enospc(FTAG);
3291 (void) rw_unlock(&ztest_name_lock);
3294 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3297 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3299 ztest_zd_init(&zdtmp, NULL, os);
3302 * Open the intent log for it.
3304 zilog = zil_open(os, ztest_get_data);
3307 * Put some objects in there, do a little I/O to them,
3308 * and randomly take a couple of snapshots along the way.
3310 iters = ztest_random(5);
3311 for (int i = 0; i < iters; i++) {
3312 ztest_dmu_object_alloc_free(&zdtmp, id);
3313 if (ztest_random(iters) == 0)
3314 (void) ztest_snapshot_create(name, i);
3318 * Verify that we cannot create an existing dataset.
3320 VERIFY3U(EEXIST, ==,
3321 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3324 * Verify that we can hold an objset that is also owned.
3326 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3327 dmu_objset_rele(os2, FTAG);
3330 * Verify that we cannot own an objset that is already owned.
3333 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3336 dmu_objset_disown(os, FTAG);
3337 ztest_zd_fini(&zdtmp);
3339 (void) rw_unlock(&ztest_name_lock);
3343 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3346 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3348 (void) rw_rdlock(&ztest_name_lock);
3349 (void) ztest_snapshot_destroy(zd->zd_name, id);
3350 (void) ztest_snapshot_create(zd->zd_name, id);
3351 (void) rw_unlock(&ztest_name_lock);
3355 * Cleanup non-standard snapshots and clones.
3358 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3360 char snap1name[MAXNAMELEN];
3361 char clone1name[MAXNAMELEN];
3362 char snap2name[MAXNAMELEN];
3363 char clone2name[MAXNAMELEN];
3364 char snap3name[MAXNAMELEN];
3367 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3368 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3369 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3370 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3371 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3373 error = dsl_destroy_head(clone2name);
3374 if (error && error != ENOENT)
3375 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
3376 error = dsl_destroy_snapshot(snap3name, B_FALSE);
3377 if (error && error != ENOENT)
3378 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
3379 error = dsl_destroy_snapshot(snap2name, B_FALSE);
3380 if (error && error != ENOENT)
3381 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
3382 error = dsl_destroy_head(clone1name);
3383 if (error && error != ENOENT)
3384 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
3385 error = dsl_destroy_snapshot(snap1name, B_FALSE);
3386 if (error && error != ENOENT)
3387 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
3391 * Verify dsl_dataset_promote handles EBUSY
3394 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3397 char snap1name[MAXNAMELEN];
3398 char clone1name[MAXNAMELEN];
3399 char snap2name[MAXNAMELEN];
3400 char clone2name[MAXNAMELEN];
3401 char snap3name[MAXNAMELEN];
3402 char *osname = zd->zd_name;
3405 (void) rw_rdlock(&ztest_name_lock);
3407 ztest_dsl_dataset_cleanup(osname, id);
3409 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3410 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3411 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3412 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3413 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3415 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
3416 if (error && error != EEXIST) {
3417 if (error == ENOSPC) {
3418 ztest_record_enospc(FTAG);
3421 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3424 error = dmu_objset_clone(clone1name, snap1name);
3426 if (error == ENOSPC) {
3427 ztest_record_enospc(FTAG);
3430 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3433 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
3434 if (error && error != EEXIST) {
3435 if (error == ENOSPC) {
3436 ztest_record_enospc(FTAG);
3439 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3442 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
3443 if (error && error != EEXIST) {
3444 if (error == ENOSPC) {
3445 ztest_record_enospc(FTAG);
3448 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3451 error = dmu_objset_clone(clone2name, snap3name);
3453 if (error == ENOSPC) {
3454 ztest_record_enospc(FTAG);
3457 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3460 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os);
3462 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
3463 error = dsl_dataset_promote(clone2name, NULL);
3465 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3467 dmu_objset_disown(os, FTAG);
3470 ztest_dsl_dataset_cleanup(osname, id);
3472 (void) rw_unlock(&ztest_name_lock);
3476 * Verify that dmu_object_{alloc,free} work as expected.
3479 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3482 int batchsize = sizeof (od) / sizeof (od[0]);
3484 for (int b = 0; b < batchsize; b++)
3485 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3488 * Destroy the previous batch of objects, create a new batch,
3489 * and do some I/O on the new objects.
3491 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3494 while (ztest_random(4 * batchsize) != 0)
3495 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3496 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3500 * Verify that dmu_{read,write} work as expected.
3503 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3505 objset_t *os = zd->zd_os;
3508 int i, freeit, error;
3510 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3511 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3512 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3513 uint64_t regions = 997;
3514 uint64_t stride = 123456789ULL;
3515 uint64_t width = 40;
3516 int free_percent = 5;
3519 * This test uses two objects, packobj and bigobj, that are always
3520 * updated together (i.e. in the same tx) so that their contents are
3521 * in sync and can be compared. Their contents relate to each other
3522 * in a simple way: packobj is a dense array of 'bufwad' structures,
3523 * while bigobj is a sparse array of the same bufwads. Specifically,
3524 * for any index n, there are three bufwads that should be identical:
3526 * packobj, at offset n * sizeof (bufwad_t)
3527 * bigobj, at the head of the nth chunk
3528 * bigobj, at the tail of the nth chunk
3530 * The chunk size is arbitrary. It doesn't have to be a power of two,
3531 * and it doesn't have any relation to the object blocksize.
3532 * The only requirement is that it can hold at least two bufwads.
3534 * Normally, we write the bufwad to each of these locations.
3535 * However, free_percent of the time we instead write zeroes to
3536 * packobj and perform a dmu_free_range() on bigobj. By comparing
3537 * bigobj to packobj, we can verify that the DMU is correctly
3538 * tracking which parts of an object are allocated and free,
3539 * and that the contents of the allocated blocks are correct.
3543 * Read the directory info. If it's the first time, set things up.
3545 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3546 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3548 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3551 bigobj = od[0].od_object;
3552 packobj = od[1].od_object;
3553 chunksize = od[0].od_gen;
3554 ASSERT(chunksize == od[1].od_gen);
3557 * Prefetch a random chunk of the big object.
3558 * Our aim here is to get some async reads in flight
3559 * for blocks that we may free below; the DMU should
3560 * handle this race correctly.
3562 n = ztest_random(regions) * stride + ztest_random(width);
3563 s = 1 + ztest_random(2 * width - 1);
3564 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3567 * Pick a random index and compute the offsets into packobj and bigobj.
3569 n = ztest_random(regions) * stride + ztest_random(width);
3570 s = 1 + ztest_random(width - 1);
3572 packoff = n * sizeof (bufwad_t);
3573 packsize = s * sizeof (bufwad_t);
3575 bigoff = n * chunksize;
3576 bigsize = s * chunksize;
3578 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3579 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3582 * free_percent of the time, free a range of bigobj rather than
3585 freeit = (ztest_random(100) < free_percent);
3588 * Read the current contents of our objects.
3590 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3593 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3598 * Get a tx for the mods to both packobj and bigobj.
3600 tx = dmu_tx_create(os);
3602 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3605 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3607 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3609 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3611 umem_free(packbuf, packsize);
3612 umem_free(bigbuf, bigsize);
3616 dmu_object_set_checksum(os, bigobj,
3617 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3619 dmu_object_set_compress(os, bigobj,
3620 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3623 * For each index from n to n + s, verify that the existing bufwad
3624 * in packobj matches the bufwads at the head and tail of the
3625 * corresponding chunk in bigobj. Then update all three bufwads
3626 * with the new values we want to write out.
3628 for (i = 0; i < s; i++) {
3630 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3632 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3634 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3636 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3637 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3639 if (pack->bw_txg > txg)
3640 fatal(0, "future leak: got %llx, open txg is %llx",
3643 if (pack->bw_data != 0 && pack->bw_index != n + i)
3644 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3645 pack->bw_index, n, i);
3647 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3648 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3650 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3651 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3654 bzero(pack, sizeof (bufwad_t));
3656 pack->bw_index = n + i;
3658 pack->bw_data = 1 + ztest_random(-2ULL);
3665 * We've verified all the old bufwads, and made new ones.
3666 * Now write them out.
3668 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3671 if (ztest_opts.zo_verbose >= 7) {
3672 (void) printf("freeing offset %llx size %llx"
3674 (u_longlong_t)bigoff,
3675 (u_longlong_t)bigsize,
3678 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3680 if (ztest_opts.zo_verbose >= 7) {
3681 (void) printf("writing offset %llx size %llx"
3683 (u_longlong_t)bigoff,
3684 (u_longlong_t)bigsize,
3687 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3693 * Sanity check the stuff we just wrote.
3696 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3697 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3699 VERIFY(0 == dmu_read(os, packobj, packoff,
3700 packsize, packcheck, DMU_READ_PREFETCH));
3701 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3702 bigsize, bigcheck, DMU_READ_PREFETCH));
3704 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3705 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3707 umem_free(packcheck, packsize);
3708 umem_free(bigcheck, bigsize);
3711 umem_free(packbuf, packsize);
3712 umem_free(bigbuf, bigsize);
3716 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3717 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3725 * For each index from n to n + s, verify that the existing bufwad
3726 * in packobj matches the bufwads at the head and tail of the
3727 * corresponding chunk in bigobj. Then update all three bufwads
3728 * with the new values we want to write out.
3730 for (i = 0; i < s; i++) {
3732 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3734 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3736 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3738 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3739 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3741 if (pack->bw_txg > txg)
3742 fatal(0, "future leak: got %llx, open txg is %llx",
3745 if (pack->bw_data != 0 && pack->bw_index != n + i)
3746 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3747 pack->bw_index, n, i);
3749 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3750 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3752 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3753 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3755 pack->bw_index = n + i;
3757 pack->bw_data = 1 + ztest_random(-2ULL);
3765 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3767 objset_t *os = zd->zd_os;
3773 bufwad_t *packbuf, *bigbuf;
3774 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3775 uint64_t blocksize = ztest_random_blocksize();
3776 uint64_t chunksize = blocksize;
3777 uint64_t regions = 997;
3778 uint64_t stride = 123456789ULL;
3780 dmu_buf_t *bonus_db;
3781 arc_buf_t **bigbuf_arcbufs;
3782 dmu_object_info_t doi;
3785 * This test uses two objects, packobj and bigobj, that are always
3786 * updated together (i.e. in the same tx) so that their contents are
3787 * in sync and can be compared. Their contents relate to each other
3788 * in a simple way: packobj is a dense array of 'bufwad' structures,
3789 * while bigobj is a sparse array of the same bufwads. Specifically,
3790 * for any index n, there are three bufwads that should be identical:
3792 * packobj, at offset n * sizeof (bufwad_t)
3793 * bigobj, at the head of the nth chunk
3794 * bigobj, at the tail of the nth chunk
3796 * The chunk size is set equal to bigobj block size so that
3797 * dmu_assign_arcbuf() can be tested for object updates.
3801 * Read the directory info. If it's the first time, set things up.
3803 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3804 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3806 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3809 bigobj = od[0].od_object;
3810 packobj = od[1].od_object;
3811 blocksize = od[0].od_blocksize;
3812 chunksize = blocksize;
3813 ASSERT(chunksize == od[1].od_gen);
3815 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3816 VERIFY(ISP2(doi.doi_data_block_size));
3817 VERIFY(chunksize == doi.doi_data_block_size);
3818 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3821 * Pick a random index and compute the offsets into packobj and bigobj.
3823 n = ztest_random(regions) * stride + ztest_random(width);
3824 s = 1 + ztest_random(width - 1);
3826 packoff = n * sizeof (bufwad_t);
3827 packsize = s * sizeof (bufwad_t);
3829 bigoff = n * chunksize;
3830 bigsize = s * chunksize;
3832 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3833 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3835 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3837 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3840 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3841 * Iteration 1 test zcopy to already referenced dbufs.
3842 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3843 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3844 * Iteration 4 test zcopy when dbuf is no longer dirty.
3845 * Iteration 5 test zcopy when it can't be done.
3846 * Iteration 6 one more zcopy write.
3848 for (i = 0; i < 7; i++) {
3853 * In iteration 5 (i == 5) use arcbufs
3854 * that don't match bigobj blksz to test
3855 * dmu_assign_arcbuf() when it can't directly
3856 * assign an arcbuf to a dbuf.
3858 for (j = 0; j < s; j++) {
3861 dmu_request_arcbuf(bonus_db, chunksize);
3863 bigbuf_arcbufs[2 * j] =
3864 dmu_request_arcbuf(bonus_db, chunksize / 2);
3865 bigbuf_arcbufs[2 * j + 1] =
3866 dmu_request_arcbuf(bonus_db, chunksize / 2);
3871 * Get a tx for the mods to both packobj and bigobj.
3873 tx = dmu_tx_create(os);
3875 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3876 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3878 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3880 umem_free(packbuf, packsize);
3881 umem_free(bigbuf, bigsize);
3882 for (j = 0; j < s; j++) {
3884 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3887 bigbuf_arcbufs[2 * j]);
3889 bigbuf_arcbufs[2 * j + 1]);
3892 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3893 dmu_buf_rele(bonus_db, FTAG);
3898 * 50% of the time don't read objects in the 1st iteration to
3899 * test dmu_assign_arcbuf() for the case when there're no
3900 * existing dbufs for the specified offsets.
3902 if (i != 0 || ztest_random(2) != 0) {
3903 error = dmu_read(os, packobj, packoff,
3904 packsize, packbuf, DMU_READ_PREFETCH);
3906 error = dmu_read(os, bigobj, bigoff, bigsize,
3907 bigbuf, DMU_READ_PREFETCH);
3910 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3914 * We've verified all the old bufwads, and made new ones.
3915 * Now write them out.
3917 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3918 if (ztest_opts.zo_verbose >= 7) {
3919 (void) printf("writing offset %llx size %llx"
3921 (u_longlong_t)bigoff,
3922 (u_longlong_t)bigsize,
3925 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3928 bcopy((caddr_t)bigbuf + (off - bigoff),
3929 bigbuf_arcbufs[j]->b_data, chunksize);
3931 bcopy((caddr_t)bigbuf + (off - bigoff),
3932 bigbuf_arcbufs[2 * j]->b_data,
3934 bcopy((caddr_t)bigbuf + (off - bigoff) +
3936 bigbuf_arcbufs[2 * j + 1]->b_data,
3941 VERIFY(dmu_buf_hold(os, bigobj, off,
3942 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3945 dmu_assign_arcbuf(bonus_db, off,
3946 bigbuf_arcbufs[j], tx);
3948 dmu_assign_arcbuf(bonus_db, off,
3949 bigbuf_arcbufs[2 * j], tx);
3950 dmu_assign_arcbuf(bonus_db,
3951 off + chunksize / 2,
3952 bigbuf_arcbufs[2 * j + 1], tx);
3955 dmu_buf_rele(dbt, FTAG);
3961 * Sanity check the stuff we just wrote.
3964 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3965 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3967 VERIFY(0 == dmu_read(os, packobj, packoff,
3968 packsize, packcheck, DMU_READ_PREFETCH));
3969 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3970 bigsize, bigcheck, DMU_READ_PREFETCH));
3972 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3973 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3975 umem_free(packcheck, packsize);
3976 umem_free(bigcheck, bigsize);
3979 txg_wait_open(dmu_objset_pool(os), 0);
3980 } else if (i == 3) {
3981 txg_wait_synced(dmu_objset_pool(os), 0);
3985 dmu_buf_rele(bonus_db, FTAG);
3986 umem_free(packbuf, packsize);
3987 umem_free(bigbuf, bigsize);
3988 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3993 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3996 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3997 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4000 * Have multiple threads write to large offsets in an object
4001 * to verify that parallel writes to an object -- even to the
4002 * same blocks within the object -- doesn't cause any trouble.
4004 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4006 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4009 while (ztest_random(10) != 0)
4010 ztest_io(zd, od[0].od_object, offset);
4014 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
4017 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
4018 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4019 uint64_t count = ztest_random(20) + 1;
4020 uint64_t blocksize = ztest_random_blocksize();
4023 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4025 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4028 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
4031 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
4033 data = umem_zalloc(blocksize, UMEM_NOFAIL);
4035 while (ztest_random(count) != 0) {
4036 uint64_t randoff = offset + (ztest_random(count) * blocksize);
4037 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
4040 while (ztest_random(4) != 0)
4041 ztest_io(zd, od[0].od_object, randoff);
4044 umem_free(data, blocksize);
4048 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4050 #define ZTEST_ZAP_MIN_INTS 1
4051 #define ZTEST_ZAP_MAX_INTS 4
4052 #define ZTEST_ZAP_MAX_PROPS 1000
4055 ztest_zap(ztest_ds_t *zd, uint64_t id)
4057 objset_t *os = zd->zd_os;
4060 uint64_t txg, last_txg;
4061 uint64_t value[ZTEST_ZAP_MAX_INTS];
4062 uint64_t zl_ints, zl_intsize, prop;
4065 char propname[100], txgname[100];
4067 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4069 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4071 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4074 object = od[0].od_object;
4077 * Generate a known hash collision, and verify that
4078 * we can lookup and remove both entries.
4080 tx = dmu_tx_create(os);
4081 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4082 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4085 for (i = 0; i < 2; i++) {
4087 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
4090 for (i = 0; i < 2; i++) {
4091 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
4092 sizeof (uint64_t), 1, &value[i], tx));
4094 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
4095 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4096 ASSERT3U(zl_ints, ==, 1);
4098 for (i = 0; i < 2; i++) {
4099 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
4104 * Generate a buch of random entries.
4106 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
4108 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4109 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4110 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4111 bzero(value, sizeof (value));
4115 * If these zap entries already exist, validate their contents.
4117 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4119 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4120 ASSERT3U(zl_ints, ==, 1);
4122 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
4123 zl_ints, &last_txg) == 0);
4125 VERIFY(zap_length(os, object, propname, &zl_intsize,
4128 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4129 ASSERT3U(zl_ints, ==, ints);
4131 VERIFY(zap_lookup(os, object, propname, zl_intsize,
4132 zl_ints, value) == 0);
4134 for (i = 0; i < ints; i++) {
4135 ASSERT3U(value[i], ==, last_txg + object + i);
4138 ASSERT3U(error, ==, ENOENT);
4142 * Atomically update two entries in our zap object.
4143 * The first is named txg_%llu, and contains the txg
4144 * in which the property was last updated. The second
4145 * is named prop_%llu, and the nth element of its value
4146 * should be txg + object + n.
4148 tx = dmu_tx_create(os);
4149 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4150 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4155 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4157 for (i = 0; i < ints; i++)
4158 value[i] = txg + object + i;
4160 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4162 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4168 * Remove a random pair of entries.
4170 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4171 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4172 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4174 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4176 if (error == ENOENT)
4181 tx = dmu_tx_create(os);
4182 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4183 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4186 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4187 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4192 * Testcase to test the upgrading of a microzap to fatzap.
4195 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4197 objset_t *os = zd->zd_os;
4199 uint64_t object, txg;
4201 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4203 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4206 object = od[0].od_object;
4209 * Add entries to this ZAP and make sure it spills over
4210 * and gets upgraded to a fatzap. Also, since we are adding
4211 * 2050 entries we should see ptrtbl growth and leaf-block split.
4213 for (int i = 0; i < 2050; i++) {
4214 char name[MAXNAMELEN];
4219 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4222 tx = dmu_tx_create(os);
4223 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4224 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4227 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4229 ASSERT(error == 0 || error == EEXIST);
4236 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4238 objset_t *os = zd->zd_os;
4240 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4242 int i, namelen, error;
4243 int micro = ztest_random(2);
4244 char name[20], string_value[20];
4247 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4249 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4252 object = od[0].od_object;
4255 * Generate a random name of the form 'xxx.....' where each
4256 * x is a random printable character and the dots are dots.
4257 * There are 94 such characters, and the name length goes from
4258 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4260 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4262 for (i = 0; i < 3; i++)
4263 name[i] = '!' + ztest_random('~' - '!' + 1);
4264 for (; i < namelen - 1; i++)
4268 if ((namelen & 1) || micro) {
4269 wsize = sizeof (txg);
4275 data = string_value;
4279 VERIFY0(zap_count(os, object, &count));
4280 ASSERT(count != -1ULL);
4283 * Select an operation: length, lookup, add, update, remove.
4285 i = ztest_random(5);
4288 tx = dmu_tx_create(os);
4289 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4290 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4293 bcopy(name, string_value, namelen);
4297 bzero(string_value, namelen);
4303 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4305 ASSERT3U(wsize, ==, zl_wsize);
4306 ASSERT3U(wc, ==, zl_wc);
4308 ASSERT3U(error, ==, ENOENT);
4313 error = zap_lookup(os, object, name, wsize, wc, data);
4315 if (data == string_value &&
4316 bcmp(name, data, namelen) != 0)
4317 fatal(0, "name '%s' != val '%s' len %d",
4318 name, data, namelen);
4320 ASSERT3U(error, ==, ENOENT);
4325 error = zap_add(os, object, name, wsize, wc, data, tx);
4326 ASSERT(error == 0 || error == EEXIST);
4330 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4334 error = zap_remove(os, object, name, tx);
4335 ASSERT(error == 0 || error == ENOENT);
4344 * Commit callback data.
4346 typedef struct ztest_cb_data {
4347 list_node_t zcd_node;
4349 int zcd_expected_err;
4350 boolean_t zcd_added;
4351 boolean_t zcd_called;
4355 /* This is the actual commit callback function */
4357 ztest_commit_callback(void *arg, int error)
4359 ztest_cb_data_t *data = arg;
4360 uint64_t synced_txg;
4362 VERIFY(data != NULL);
4363 VERIFY3S(data->zcd_expected_err, ==, error);
4364 VERIFY(!data->zcd_called);
4366 synced_txg = spa_last_synced_txg(data->zcd_spa);
4367 if (data->zcd_txg > synced_txg)
4368 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4369 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4372 data->zcd_called = B_TRUE;
4374 if (error == ECANCELED) {
4375 ASSERT0(data->zcd_txg);
4376 ASSERT(!data->zcd_added);
4379 * The private callback data should be destroyed here, but
4380 * since we are going to check the zcd_called field after
4381 * dmu_tx_abort(), we will destroy it there.
4386 /* Was this callback added to the global callback list? */
4387 if (!data->zcd_added)
4390 ASSERT3U(data->zcd_txg, !=, 0);
4392 /* Remove our callback from the list */
4393 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4394 list_remove(&zcl.zcl_callbacks, data);
4395 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4398 umem_free(data, sizeof (ztest_cb_data_t));
4401 /* Allocate and initialize callback data structure */
4402 static ztest_cb_data_t *
4403 ztest_create_cb_data(objset_t *os, uint64_t txg)
4405 ztest_cb_data_t *cb_data;
4407 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4409 cb_data->zcd_txg = txg;
4410 cb_data->zcd_spa = dmu_objset_spa(os);
4416 * If a number of txgs equal to this threshold have been created after a commit
4417 * callback has been registered but not called, then we assume there is an
4418 * implementation bug.
4420 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4423 * Commit callback test.
4426 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4428 objset_t *os = zd->zd_os;
4431 ztest_cb_data_t *cb_data[3], *tmp_cb;
4432 uint64_t old_txg, txg;
4435 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4437 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4440 tx = dmu_tx_create(os);
4442 cb_data[0] = ztest_create_cb_data(os, 0);
4443 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4445 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4447 /* Every once in a while, abort the transaction on purpose */
4448 if (ztest_random(100) == 0)
4452 error = dmu_tx_assign(tx, TXG_NOWAIT);
4454 txg = error ? 0 : dmu_tx_get_txg(tx);
4456 cb_data[0]->zcd_txg = txg;
4457 cb_data[1] = ztest_create_cb_data(os, txg);
4458 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4462 * It's not a strict requirement to call the registered
4463 * callbacks from inside dmu_tx_abort(), but that's what
4464 * it's supposed to happen in the current implementation
4465 * so we will check for that.
4467 for (i = 0; i < 2; i++) {
4468 cb_data[i]->zcd_expected_err = ECANCELED;
4469 VERIFY(!cb_data[i]->zcd_called);
4474 for (i = 0; i < 2; i++) {
4475 VERIFY(cb_data[i]->zcd_called);
4476 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4482 cb_data[2] = ztest_create_cb_data(os, txg);
4483 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4486 * Read existing data to make sure there isn't a future leak.
4488 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4489 &old_txg, DMU_READ_PREFETCH));
4492 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4495 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4497 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4500 * Since commit callbacks don't have any ordering requirement and since
4501 * it is theoretically possible for a commit callback to be called
4502 * after an arbitrary amount of time has elapsed since its txg has been
4503 * synced, it is difficult to reliably determine whether a commit
4504 * callback hasn't been called due to high load or due to a flawed
4507 * In practice, we will assume that if after a certain number of txgs a
4508 * commit callback hasn't been called, then most likely there's an
4509 * implementation bug..
4511 tmp_cb = list_head(&zcl.zcl_callbacks);
4512 if (tmp_cb != NULL &&
4513 (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) {
4514 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4515 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4519 * Let's find the place to insert our callbacks.
4521 * Even though the list is ordered by txg, it is possible for the
4522 * insertion point to not be the end because our txg may already be
4523 * quiescing at this point and other callbacks in the open txg
4524 * (from other objsets) may have sneaked in.
4526 tmp_cb = list_tail(&zcl.zcl_callbacks);
4527 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4528 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4530 /* Add the 3 callbacks to the list */
4531 for (i = 0; i < 3; i++) {
4533 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4535 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4538 cb_data[i]->zcd_added = B_TRUE;
4539 VERIFY(!cb_data[i]->zcd_called);
4541 tmp_cb = cb_data[i];
4544 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4551 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4553 zfs_prop_t proplist[] = {
4555 ZFS_PROP_COMPRESSION,
4560 (void) rw_rdlock(&ztest_name_lock);
4562 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4563 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4564 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4566 (void) rw_unlock(&ztest_name_lock);
4571 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4573 nvlist_t *props = NULL;
4575 (void) rw_rdlock(&ztest_name_lock);
4577 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4578 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4580 VERIFY0(spa_prop_get(ztest_spa, &props));
4582 if (ztest_opts.zo_verbose >= 6)
4583 dump_nvlist(props, 4);
4587 (void) rw_unlock(&ztest_name_lock);
4591 user_release_one(const char *snapname, const char *holdname)
4593 nvlist_t *snaps, *holds;
4596 snaps = fnvlist_alloc();
4597 holds = fnvlist_alloc();
4598 fnvlist_add_boolean(holds, holdname);
4599 fnvlist_add_nvlist(snaps, snapname, holds);
4600 fnvlist_free(holds);
4601 error = dsl_dataset_user_release(snaps, NULL);
4602 fnvlist_free(snaps);
4607 * Test snapshot hold/release and deferred destroy.
4610 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4613 objset_t *os = zd->zd_os;
4617 char clonename[100];
4619 char osname[MAXNAMELEN];
4622 (void) rw_rdlock(&ztest_name_lock);
4624 dmu_objset_name(os, osname);
4626 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
4627 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
4628 (void) snprintf(clonename, sizeof (clonename),
4629 "%s/ch1_%llu", osname, id);
4630 (void) snprintf(tag, sizeof (tag), "tag_%llu", id);
4633 * Clean up from any previous run.
4635 error = dsl_destroy_head(clonename);
4636 if (error != ENOENT)
4638 error = user_release_one(fullname, tag);
4639 if (error != ESRCH && error != ENOENT)
4641 error = dsl_destroy_snapshot(fullname, B_FALSE);
4642 if (error != ENOENT)
4646 * Create snapshot, clone it, mark snap for deferred destroy,
4647 * destroy clone, verify snap was also destroyed.
4649 error = dmu_objset_snapshot_one(osname, snapname);
4651 if (error == ENOSPC) {
4652 ztest_record_enospc("dmu_objset_snapshot");
4655 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4658 error = dmu_objset_clone(clonename, fullname);
4660 if (error == ENOSPC) {
4661 ztest_record_enospc("dmu_objset_clone");
4664 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4667 error = dsl_destroy_snapshot(fullname, B_TRUE);
4669 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4673 error = dsl_destroy_head(clonename);
4675 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
4677 error = dmu_objset_hold(fullname, FTAG, &origin);
4678 if (error != ENOENT)
4679 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4682 * Create snapshot, add temporary hold, verify that we can't
4683 * destroy a held snapshot, mark for deferred destroy,
4684 * release hold, verify snapshot was destroyed.
4686 error = dmu_objset_snapshot_one(osname, snapname);
4688 if (error == ENOSPC) {
4689 ztest_record_enospc("dmu_objset_snapshot");
4692 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4695 holds = fnvlist_alloc();
4696 fnvlist_add_string(holds, fullname, tag);
4697 error = dsl_dataset_user_hold(holds, 0, NULL);
4698 fnvlist_free(holds);
4701 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4703 error = dsl_destroy_snapshot(fullname, B_FALSE);
4704 if (error != EBUSY) {
4705 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
4709 error = dsl_destroy_snapshot(fullname, B_TRUE);
4711 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4715 error = user_release_one(fullname, tag);
4717 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
4719 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
4722 (void) rw_unlock(&ztest_name_lock);
4726 * Inject random faults into the on-disk data.
4730 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4732 ztest_shared_t *zs = ztest_shared;
4733 spa_t *spa = ztest_spa;
4737 uint64_t bad = 0x1990c0ffeedecadeULL;
4739 char path0[MAXPATHLEN];
4740 char pathrand[MAXPATHLEN];
4742 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4748 boolean_t islog = B_FALSE;
4750 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4751 maxfaults = MAXFAULTS();
4752 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4753 mirror_save = zs->zs_mirrors;
4754 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4756 ASSERT(leaves >= 1);
4759 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4761 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4763 if (ztest_random(2) == 0) {
4765 * Inject errors on a normal data device or slog device.
4767 top = ztest_random_vdev_top(spa, B_TRUE);
4768 leaf = ztest_random(leaves) + zs->zs_splits;
4771 * Generate paths to the first leaf in this top-level vdev,
4772 * and to the random leaf we selected. We'll induce transient
4773 * write failures and random online/offline activity on leaf 0,
4774 * and we'll write random garbage to the randomly chosen leaf.
4776 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4777 ztest_opts.zo_dir, ztest_opts.zo_pool,
4778 top * leaves + zs->zs_splits);
4779 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4780 ztest_opts.zo_dir, ztest_opts.zo_pool,
4781 top * leaves + leaf);
4783 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4784 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4787 if (vd0 != NULL && maxfaults != 1) {
4789 * Make vd0 explicitly claim to be unreadable,
4790 * or unwriteable, or reach behind its back
4791 * and close the underlying fd. We can do this if
4792 * maxfaults == 0 because we'll fail and reexecute,
4793 * and we can do it if maxfaults >= 2 because we'll
4794 * have enough redundancy. If maxfaults == 1, the
4795 * combination of this with injection of random data
4796 * corruption below exceeds the pool's fault tolerance.
4798 vdev_file_t *vf = vd0->vdev_tsd;
4800 if (vf != NULL && ztest_random(3) == 0) {
4801 (void) close(vf->vf_vnode->v_fd);
4802 vf->vf_vnode->v_fd = -1;
4803 } else if (ztest_random(2) == 0) {
4804 vd0->vdev_cant_read = B_TRUE;
4806 vd0->vdev_cant_write = B_TRUE;
4808 guid0 = vd0->vdev_guid;
4812 * Inject errors on an l2cache device.
4814 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4816 if (sav->sav_count == 0) {
4817 spa_config_exit(spa, SCL_STATE, FTAG);
4820 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4821 guid0 = vd0->vdev_guid;
4822 (void) strcpy(path0, vd0->vdev_path);
4823 (void) strcpy(pathrand, vd0->vdev_path);
4827 maxfaults = INT_MAX; /* no limit on cache devices */
4830 spa_config_exit(spa, SCL_STATE, FTAG);
4833 * If we can tolerate two or more faults, or we're dealing
4834 * with a slog, randomly online/offline vd0.
4836 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4837 if (ztest_random(10) < 6) {
4838 int flags = (ztest_random(2) == 0 ?
4839 ZFS_OFFLINE_TEMPORARY : 0);
4842 * We have to grab the zs_name_lock as writer to
4843 * prevent a race between offlining a slog and
4844 * destroying a dataset. Offlining the slog will
4845 * grab a reference on the dataset which may cause
4846 * dmu_objset_destroy() to fail with EBUSY thus
4847 * leaving the dataset in an inconsistent state.
4850 (void) rw_wrlock(&ztest_name_lock);
4852 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4855 (void) rw_unlock(&ztest_name_lock);
4858 * Ideally we would like to be able to randomly
4859 * call vdev_[on|off]line without holding locks
4860 * to force unpredictable failures but the side
4861 * effects of vdev_[on|off]line prevent us from
4862 * doing so. We grab the ztest_vdev_lock here to
4863 * prevent a race between injection testing and
4866 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4867 (void) vdev_online(spa, guid0, 0, NULL);
4868 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4876 * We have at least single-fault tolerance, so inject data corruption.
4878 fd = open(pathrand, O_RDWR);
4880 if (fd == -1) /* we hit a gap in the device namespace */
4883 fsize = lseek(fd, 0, SEEK_END);
4885 while (--iters != 0) {
4886 offset = ztest_random(fsize / (leaves << bshift)) *
4887 (leaves << bshift) + (leaf << bshift) +
4888 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4890 if (offset >= fsize)
4893 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4894 if (mirror_save != zs->zs_mirrors) {
4895 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4900 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4901 fatal(1, "can't inject bad word at 0x%llx in %s",
4904 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4906 if (ztest_opts.zo_verbose >= 7)
4907 (void) printf("injected bad word into %s,"
4908 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4915 * Verify that DDT repair works as expected.
4918 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4920 ztest_shared_t *zs = ztest_shared;
4921 spa_t *spa = ztest_spa;
4922 objset_t *os = zd->zd_os;
4924 uint64_t object, blocksize, txg, pattern, psize;
4925 enum zio_checksum checksum = spa_dedup_checksum(spa);
4930 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4932 blocksize = ztest_random_blocksize();
4933 blocksize = MIN(blocksize, 2048); /* because we write so many */
4935 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4937 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4941 * Take the name lock as writer to prevent anyone else from changing
4942 * the pool and dataset properies we need to maintain during this test.
4944 (void) rw_wrlock(&ztest_name_lock);
4946 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4948 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4950 (void) rw_unlock(&ztest_name_lock);
4954 object = od[0].od_object;
4955 blocksize = od[0].od_blocksize;
4956 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4958 ASSERT(object != 0);
4960 tx = dmu_tx_create(os);
4961 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4962 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4964 (void) rw_unlock(&ztest_name_lock);
4969 * Write all the copies of our block.
4971 for (int i = 0; i < copies; i++) {
4972 uint64_t offset = i * blocksize;
4973 int error = dmu_buf_hold(os, object, offset, FTAG, &db,
4974 DMU_READ_NO_PREFETCH);
4976 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u",
4977 os, (long long)object, (long long) offset, error);
4979 ASSERT(db->db_offset == offset);
4980 ASSERT(db->db_size == blocksize);
4981 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4982 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4983 dmu_buf_will_fill(db, tx);
4984 ztest_pattern_set(db->db_data, db->db_size, pattern);
4985 dmu_buf_rele(db, FTAG);
4989 txg_wait_synced(spa_get_dsl(spa), txg);
4992 * Find out what block we got.
4994 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
4995 DMU_READ_NO_PREFETCH));
4996 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4997 dmu_buf_rele(db, FTAG);
5000 * Damage the block. Dedup-ditto will save us when we read it later.
5002 psize = BP_GET_PSIZE(&blk);
5003 buf = zio_buf_alloc(psize);
5004 ztest_pattern_set(buf, psize, ~pattern);
5006 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5007 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5008 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5010 zio_buf_free(buf, psize);
5012 (void) rw_unlock(&ztest_name_lock);
5020 ztest_scrub(ztest_ds_t *zd, uint64_t id)
5022 spa_t *spa = ztest_spa;
5024 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5025 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
5026 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5030 * Change the guid for the pool.
5034 ztest_reguid(ztest_ds_t *zd, uint64_t id)
5036 spa_t *spa = ztest_spa;
5037 uint64_t orig, load;
5040 orig = spa_guid(spa);
5041 load = spa_load_guid(spa);
5043 (void) rw_wrlock(&ztest_name_lock);
5044 error = spa_change_guid(spa);
5045 (void) rw_unlock(&ztest_name_lock);
5050 if (ztest_opts.zo_verbose >= 4) {
5051 (void) printf("Changed guid old %llu -> %llu\n",
5052 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5055 VERIFY3U(orig, !=, spa_guid(spa));
5056 VERIFY3U(load, ==, spa_load_guid(spa));
5060 * Rename the pool to a different name and then rename it back.
5064 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5066 char *oldname, *newname;
5069 (void) rw_wrlock(&ztest_name_lock);
5071 oldname = ztest_opts.zo_pool;
5072 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5073 (void) strcpy(newname, oldname);
5074 (void) strcat(newname, "_tmp");
5079 VERIFY3U(0, ==, spa_rename(oldname, newname));
5082 * Try to open it under the old name, which shouldn't exist
5084 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5087 * Open it under the new name and make sure it's still the same spa_t.
5089 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5091 ASSERT(spa == ztest_spa);
5092 spa_close(spa, FTAG);
5095 * Rename it back to the original
5097 VERIFY3U(0, ==, spa_rename(newname, oldname));
5100 * Make sure it can still be opened
5102 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5104 ASSERT(spa == ztest_spa);
5105 spa_close(spa, FTAG);
5107 umem_free(newname, strlen(newname) + 1);
5109 (void) rw_unlock(&ztest_name_lock);
5113 * Verify pool integrity by running zdb.
5116 ztest_run_zdb(char *pool)
5119 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
5127 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb));
5129 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
5130 bin = strstr(zdb, "/usr/bin/");
5131 ztest = strstr(bin, "/ztest");
5133 isalen = ztest - isa;
5137 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
5140 ztest_opts.zo_verbose >= 3 ? "s" : "",
5141 ztest_opts.zo_verbose >= 4 ? "v" : "",
5146 if (ztest_opts.zo_verbose >= 5)
5147 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
5149 fp = popen(zdb, "r");
5152 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
5153 if (ztest_opts.zo_verbose >= 3)
5154 (void) printf("%s", zbuf);
5156 status = pclose(fp);
5161 ztest_dump_core = 0;
5162 if (WIFEXITED(status))
5163 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
5165 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
5169 ztest_walk_pool_directory(char *header)
5173 if (ztest_opts.zo_verbose >= 6)
5174 (void) printf("%s\n", header);
5176 mutex_enter(&spa_namespace_lock);
5177 while ((spa = spa_next(spa)) != NULL)
5178 if (ztest_opts.zo_verbose >= 6)
5179 (void) printf("\t%s\n", spa_name(spa));
5180 mutex_exit(&spa_namespace_lock);
5184 ztest_spa_import_export(char *oldname, char *newname)
5186 nvlist_t *config, *newconfig;
5191 if (ztest_opts.zo_verbose >= 4) {
5192 (void) printf("import/export: old = %s, new = %s\n",
5197 * Clean up from previous runs.
5199 (void) spa_destroy(newname);
5202 * Get the pool's configuration and guid.
5204 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5207 * Kick off a scrub to tickle scrub/export races.
5209 if (ztest_random(2) == 0)
5210 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5212 pool_guid = spa_guid(spa);
5213 spa_close(spa, FTAG);
5215 ztest_walk_pool_directory("pools before export");
5220 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5222 ztest_walk_pool_directory("pools after export");
5227 newconfig = spa_tryimport(config);
5228 ASSERT(newconfig != NULL);
5229 nvlist_free(newconfig);
5232 * Import it under the new name.
5234 error = spa_import(newname, config, NULL, 0);
5236 dump_nvlist(config, 0);
5237 fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
5238 oldname, newname, error);
5241 ztest_walk_pool_directory("pools after import");
5244 * Try to import it again -- should fail with EEXIST.
5246 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5249 * Try to import it under a different name -- should fail with EEXIST.
5251 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5254 * Verify that the pool is no longer visible under the old name.
5256 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5259 * Verify that we can open and close the pool using the new name.
5261 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5262 ASSERT(pool_guid == spa_guid(spa));
5263 spa_close(spa, FTAG);
5265 nvlist_free(config);
5269 ztest_resume(spa_t *spa)
5271 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5272 (void) printf("resuming from suspended state\n");
5273 spa_vdev_state_enter(spa, SCL_NONE);
5274 vdev_clear(spa, NULL);
5275 (void) spa_vdev_state_exit(spa, NULL, 0);
5276 (void) zio_resume(spa);
5280 ztest_resume_thread(void *arg)
5284 while (!ztest_exiting) {
5285 if (spa_suspended(spa))
5287 (void) poll(NULL, 0, 100);
5293 ztest_deadman_thread(void *arg)
5295 ztest_shared_t *zs = arg;
5299 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5301 (void) poll(NULL, 0, (int)(1000 * delta));
5303 fatal(0, "failed to complete within %d seconds of deadline", grace);
5309 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5311 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5312 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5313 hrtime_t functime = gethrtime();
5315 for (int i = 0; i < zi->zi_iters; i++)
5316 zi->zi_func(zd, id);
5318 functime = gethrtime() - functime;
5320 atomic_add_64(&zc->zc_count, 1);
5321 atomic_add_64(&zc->zc_time, functime);
5323 if (ztest_opts.zo_verbose >= 4) {
5325 (void) dladdr((void *)zi->zi_func, &dli);
5326 (void) printf("%6.2f sec in %s\n",
5327 (double)functime / NANOSEC, dli.dli_sname);
5332 ztest_thread(void *arg)
5335 uint64_t id = (uintptr_t)arg;
5336 ztest_shared_t *zs = ztest_shared;
5340 ztest_shared_callstate_t *zc;
5342 while ((now = gethrtime()) < zs->zs_thread_stop) {
5344 * See if it's time to force a crash.
5346 if (now > zs->zs_thread_kill)
5350 * If we're getting ENOSPC with some regularity, stop.
5352 if (zs->zs_enospc_count > 10)
5356 * Pick a random function to execute.
5358 rand = ztest_random(ZTEST_FUNCS);
5359 zi = &ztest_info[rand];
5360 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5361 call_next = zc->zc_next;
5363 if (now >= call_next &&
5364 atomic_cas_64(&zc->zc_next, call_next, call_next +
5365 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5366 ztest_execute(rand, zi, id);
5374 ztest_dataset_name(char *dsname, char *pool, int d)
5376 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5380 ztest_dataset_destroy(int d)
5382 char name[MAXNAMELEN];
5384 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5386 if (ztest_opts.zo_verbose >= 3)
5387 (void) printf("Destroying %s to free up space\n", name);
5390 * Cleanup any non-standard clones and snapshots. In general,
5391 * ztest thread t operates on dataset (t % zopt_datasets),
5392 * so there may be more than one thing to clean up.
5394 for (int t = d; t < ztest_opts.zo_threads;
5395 t += ztest_opts.zo_datasets) {
5396 ztest_dsl_dataset_cleanup(name, t);
5399 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5400 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5404 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5406 uint64_t usedobjs, dirobjs, scratch;
5409 * ZTEST_DIROBJ is the object directory for the entire dataset.
5410 * Therefore, the number of objects in use should equal the
5411 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5412 * If not, we have an object leak.
5414 * Note that we can only check this in ztest_dataset_open(),
5415 * when the open-context and syncing-context values agree.
5416 * That's because zap_count() returns the open-context value,
5417 * while dmu_objset_space() returns the rootbp fill count.
5419 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5420 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5421 ASSERT3U(dirobjs + 1, ==, usedobjs);
5425 ztest_dataset_open(int d)
5427 ztest_ds_t *zd = &ztest_ds[d];
5428 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5431 char name[MAXNAMELEN];
5434 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5436 (void) rw_rdlock(&ztest_name_lock);
5438 error = ztest_dataset_create(name);
5439 if (error == ENOSPC) {
5440 (void) rw_unlock(&ztest_name_lock);
5441 ztest_record_enospc(FTAG);
5444 ASSERT(error == 0 || error == EEXIST);
5446 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
5447 (void) rw_unlock(&ztest_name_lock);
5449 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5451 zilog = zd->zd_zilog;
5453 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5454 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5455 fatal(0, "missing log records: claimed %llu < committed %llu",
5456 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5458 ztest_dataset_dirobj_verify(zd);
5460 zil_replay(os, zd, ztest_replay_vector);
5462 ztest_dataset_dirobj_verify(zd);
5464 if (ztest_opts.zo_verbose >= 6)
5465 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5467 (u_longlong_t)zilog->zl_parse_blk_count,
5468 (u_longlong_t)zilog->zl_parse_lr_count,
5469 (u_longlong_t)zilog->zl_replaying_seq);
5471 zilog = zil_open(os, ztest_get_data);
5473 if (zilog->zl_replaying_seq != 0 &&
5474 zilog->zl_replaying_seq < committed_seq)
5475 fatal(0, "missing log records: replayed %llu < committed %llu",
5476 zilog->zl_replaying_seq, committed_seq);
5482 ztest_dataset_close(int d)
5484 ztest_ds_t *zd = &ztest_ds[d];
5486 zil_close(zd->zd_zilog);
5487 dmu_objset_disown(zd->zd_os, zd);
5493 * Kick off threads to run tests on all datasets in parallel.
5496 ztest_run(ztest_shared_t *zs)
5501 thread_t resume_tid;
5504 ztest_exiting = B_FALSE;
5507 * Initialize parent/child shared state.
5509 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5510 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5512 zs->zs_thread_start = gethrtime();
5513 zs->zs_thread_stop =
5514 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5515 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5516 zs->zs_thread_kill = zs->zs_thread_stop;
5517 if (ztest_random(100) < ztest_opts.zo_killrate) {
5518 zs->zs_thread_kill -=
5519 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5522 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5524 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5525 offsetof(ztest_cb_data_t, zcd_node));
5530 kernel_init(FREAD | FWRITE);
5531 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5532 spa->spa_debug = B_TRUE;
5535 VERIFY0(dmu_objset_own(ztest_opts.zo_pool,
5536 DMU_OST_ANY, B_TRUE, FTAG, &os));
5537 zs->zs_guid = dmu_objset_fsid_guid(os);
5538 dmu_objset_disown(os, FTAG);
5540 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5543 * We don't expect the pool to suspend unless maxfaults == 0,
5544 * in which case ztest_fault_inject() temporarily takes away
5545 * the only valid replica.
5547 if (MAXFAULTS() == 0)
5548 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5550 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5553 * Create a thread to periodically resume suspended I/O.
5555 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5559 * Create a deadman thread to abort() if we hang.
5561 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5565 * Verify that we can safely inquire about about any object,
5566 * whether it's allocated or not. To make it interesting,
5567 * we probe a 5-wide window around each power of two.
5568 * This hits all edge cases, including zero and the max.
5570 for (int t = 0; t < 64; t++) {
5571 for (int d = -5; d <= 5; d++) {
5572 error = dmu_object_info(spa->spa_meta_objset,
5573 (1ULL << t) + d, NULL);
5574 ASSERT(error == 0 || error == ENOENT ||
5580 * If we got any ENOSPC errors on the previous run, destroy something.
5582 if (zs->zs_enospc_count != 0) {
5583 int d = ztest_random(ztest_opts.zo_datasets);
5584 ztest_dataset_destroy(d);
5586 zs->zs_enospc_count = 0;
5588 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5591 if (ztest_opts.zo_verbose >= 4)
5592 (void) printf("starting main threads...\n");
5595 * Kick off all the tests that run in parallel.
5597 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5598 if (t < ztest_opts.zo_datasets &&
5599 ztest_dataset_open(t) != 0)
5601 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5602 THR_BOUND, &tid[t]) == 0);
5606 * Wait for all of the tests to complete. We go in reverse order
5607 * so we don't close datasets while threads are still using them.
5609 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5610 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5611 if (t < ztest_opts.zo_datasets)
5612 ztest_dataset_close(t);
5615 txg_wait_synced(spa_get_dsl(spa), 0);
5617 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5618 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5620 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5622 /* Kill the resume thread */
5623 ztest_exiting = B_TRUE;
5624 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5628 * Right before closing the pool, kick off a bunch of async I/O;
5629 * spa_close() should wait for it to complete.
5631 for (uint64_t object = 1; object < 50; object++)
5632 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5634 spa_close(spa, FTAG);
5637 * Verify that we can loop over all pools.
5639 mutex_enter(&spa_namespace_lock);
5640 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5641 if (ztest_opts.zo_verbose > 3)
5642 (void) printf("spa_next: found %s\n", spa_name(spa));
5643 mutex_exit(&spa_namespace_lock);
5646 * Verify that we can export the pool and reimport it under a
5649 if (ztest_random(2) == 0) {
5650 char name[MAXNAMELEN];
5651 (void) snprintf(name, MAXNAMELEN, "%s_import",
5652 ztest_opts.zo_pool);
5653 ztest_spa_import_export(ztest_opts.zo_pool, name);
5654 ztest_spa_import_export(name, ztest_opts.zo_pool);
5659 list_destroy(&zcl.zcl_callbacks);
5661 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5663 (void) rwlock_destroy(&ztest_name_lock);
5664 (void) _mutex_destroy(&ztest_vdev_lock);
5670 ztest_ds_t *zd = &ztest_ds[0];
5674 if (ztest_opts.zo_verbose >= 3)
5675 (void) printf("testing spa_freeze()...\n");
5677 kernel_init(FREAD | FWRITE);
5678 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5679 VERIFY3U(0, ==, ztest_dataset_open(0));
5680 spa->spa_debug = B_TRUE;
5684 * Force the first log block to be transactionally allocated.
5685 * We have to do this before we freeze the pool -- otherwise
5686 * the log chain won't be anchored.
5688 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5689 ztest_dmu_object_alloc_free(zd, 0);
5690 zil_commit(zd->zd_zilog, 0);
5693 txg_wait_synced(spa_get_dsl(spa), 0);
5696 * Freeze the pool. This stops spa_sync() from doing anything,
5697 * so that the only way to record changes from now on is the ZIL.
5702 * Run tests that generate log records but don't alter the pool config
5703 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5704 * We do a txg_wait_synced() after each iteration to force the txg
5705 * to increase well beyond the last synced value in the uberblock.
5706 * The ZIL should be OK with that.
5708 while (ztest_random(10) != 0 &&
5709 numloops++ < ztest_opts.zo_maxloops) {
5710 ztest_dmu_write_parallel(zd, 0);
5711 ztest_dmu_object_alloc_free(zd, 0);
5712 txg_wait_synced(spa_get_dsl(spa), 0);
5716 * Commit all of the changes we just generated.
5718 zil_commit(zd->zd_zilog, 0);
5719 txg_wait_synced(spa_get_dsl(spa), 0);
5722 * Close our dataset and close the pool.
5724 ztest_dataset_close(0);
5725 spa_close(spa, FTAG);
5729 * Open and close the pool and dataset to induce log replay.
5731 kernel_init(FREAD | FWRITE);
5732 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5733 ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
5734 VERIFY3U(0, ==, ztest_dataset_open(0));
5735 ztest_dataset_close(0);
5737 spa->spa_debug = B_TRUE;
5739 txg_wait_synced(spa_get_dsl(spa), 0);
5740 ztest_reguid(NULL, 0);
5742 spa_close(spa, FTAG);
5747 print_time(hrtime_t t, char *timebuf)
5749 hrtime_t s = t / NANOSEC;
5750 hrtime_t m = s / 60;
5751 hrtime_t h = m / 60;
5752 hrtime_t d = h / 24;
5761 (void) sprintf(timebuf,
5762 "%llud%02lluh%02llum%02llus", d, h, m, s);
5764 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5766 (void) sprintf(timebuf, "%llum%02llus", m, s);
5768 (void) sprintf(timebuf, "%llus", s);
5776 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5777 if (ztest_random(2) == 0)
5779 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5785 * Create a storage pool with the given name and initial vdev size.
5786 * Then test spa_freeze() functionality.
5789 ztest_init(ztest_shared_t *zs)
5792 nvlist_t *nvroot, *props;
5794 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5795 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5797 kernel_init(FREAD | FWRITE);
5800 * Create the storage pool.
5802 (void) spa_destroy(ztest_opts.zo_pool);
5803 ztest_shared->zs_vdev_next_leaf = 0;
5805 zs->zs_mirrors = ztest_opts.zo_mirrors;
5806 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
5807 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5808 props = make_random_props();
5809 for (int i = 0; i < SPA_FEATURES; i++) {
5811 (void) snprintf(buf, sizeof (buf), "feature@%s",
5812 spa_feature_table[i].fi_uname);
5813 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5815 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
5816 nvlist_free(nvroot);
5818 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5819 zs->zs_metaslab_sz =
5820 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5822 spa_close(spa, FTAG);
5826 ztest_run_zdb(ztest_opts.zo_pool);
5830 ztest_run_zdb(ztest_opts.zo_pool);
5832 (void) rwlock_destroy(&ztest_name_lock);
5833 (void) _mutex_destroy(&ztest_vdev_lock);
5839 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
5841 ztest_fd_data = mkstemp(ztest_name_data);
5842 ASSERT3S(ztest_fd_data, >=, 0);
5843 (void) unlink(ztest_name_data);
5848 shared_data_size(ztest_shared_hdr_t *hdr)
5852 size = hdr->zh_hdr_size;
5853 size += hdr->zh_opts_size;
5854 size += hdr->zh_size;
5855 size += hdr->zh_stats_size * hdr->zh_stats_count;
5856 size += hdr->zh_ds_size * hdr->zh_ds_count;
5865 ztest_shared_hdr_t *hdr;
5867 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5868 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5869 ASSERT(hdr != MAP_FAILED);
5871 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
5873 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5874 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5875 hdr->zh_size = sizeof (ztest_shared_t);
5876 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5877 hdr->zh_stats_count = ZTEST_FUNCS;
5878 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5879 hdr->zh_ds_count = ztest_opts.zo_datasets;
5881 size = shared_data_size(hdr);
5882 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
5884 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5891 ztest_shared_hdr_t *hdr;
5894 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5895 PROT_READ, MAP_SHARED, ztest_fd_data, 0);
5896 ASSERT(hdr != MAP_FAILED);
5898 size = shared_data_size(hdr);
5900 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5901 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5902 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5903 ASSERT(hdr != MAP_FAILED);
5904 buf = (uint8_t *)hdr;
5906 offset = hdr->zh_hdr_size;
5907 ztest_shared_opts = (void *)&buf[offset];
5908 offset += hdr->zh_opts_size;
5909 ztest_shared = (void *)&buf[offset];
5910 offset += hdr->zh_size;
5911 ztest_shared_callstate = (void *)&buf[offset];
5912 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5913 ztest_shared_ds = (void *)&buf[offset];
5917 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5921 char *cmdbuf = NULL;
5926 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
5927 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
5932 fatal(1, "fork failed");
5934 if (pid == 0) { /* child */
5935 char *emptyargv[2] = { cmd, NULL };
5936 char fd_data_str[12];
5938 struct rlimit rl = { 1024, 1024 };
5939 (void) setrlimit(RLIMIT_NOFILE, &rl);
5941 (void) close(ztest_fd_rand);
5943 snprintf(fd_data_str, 12, "%d", ztest_fd_data));
5944 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
5946 (void) enable_extended_FILE_stdio(-1, -1);
5947 if (libpath != NULL)
5948 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5950 (void) execv(cmd, emptyargv);
5952 (void) execvp(cmd, emptyargv);
5954 ztest_dump_core = B_FALSE;
5955 fatal(B_TRUE, "exec failed: %s", cmd);
5958 if (cmdbuf != NULL) {
5959 umem_free(cmdbuf, MAXPATHLEN);
5963 while (waitpid(pid, &status, 0) != pid)
5965 if (statusp != NULL)
5968 if (WIFEXITED(status)) {
5969 if (WEXITSTATUS(status) != 0) {
5970 (void) fprintf(stderr, "child exited with code %d\n",
5971 WEXITSTATUS(status));
5975 } else if (WIFSIGNALED(status)) {
5976 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5977 (void) fprintf(stderr, "child died with signal %d\n",
5983 (void) fprintf(stderr, "something strange happened to child\n");
5990 ztest_run_init(void)
5992 ztest_shared_t *zs = ztest_shared;
5994 ASSERT(ztest_opts.zo_init != 0);
5997 * Blow away any existing copy of zpool.cache
5999 (void) remove(spa_config_path);
6002 * Create and initialize our storage pool.
6004 for (int i = 1; i <= ztest_opts.zo_init; i++) {
6005 bzero(zs, sizeof (ztest_shared_t));
6006 if (ztest_opts.zo_verbose >= 3 &&
6007 ztest_opts.zo_init != 1) {
6008 (void) printf("ztest_init(), pass %d\n", i);
6015 main(int argc, char **argv)
6023 ztest_shared_callstate_t *zc;
6029 char *fd_data_str = getenv("ZTEST_FD_DATA");
6031 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6033 dprintf_setup(&argc, argv);
6035 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6036 ASSERT3S(ztest_fd_rand, >=, 0);
6039 process_options(argc, argv);
6044 bcopy(&ztest_opts, ztest_shared_opts,
6045 sizeof (*ztest_shared_opts));
6047 ztest_fd_data = atoi(fd_data_str);
6049 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6051 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6053 /* Override location of zpool.cache */
6054 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache",
6055 ztest_opts.zo_dir), !=, -1);
6057 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
6062 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
6063 metaslab_df_alloc_threshold =
6064 zs->zs_metaslab_df_alloc_threshold;
6073 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
6075 if (ztest_opts.zo_verbose >= 1) {
6076 (void) printf("%llu vdevs, %d datasets, %d threads,"
6077 " %llu seconds...\n",
6078 (u_longlong_t)ztest_opts.zo_vdevs,
6079 ztest_opts.zo_datasets,
6080 ztest_opts.zo_threads,
6081 (u_longlong_t)ztest_opts.zo_time);
6084 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
6085 (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
6087 zs->zs_do_init = B_TRUE;
6088 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
6089 if (ztest_opts.zo_verbose >= 1) {
6090 (void) printf("Executing older ztest for "
6091 "initialization: %s\n", ztest_opts.zo_alt_ztest);
6093 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
6094 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
6096 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
6098 zs->zs_do_init = B_FALSE;
6100 zs->zs_proc_start = gethrtime();
6101 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
6103 for (int f = 0; f < ZTEST_FUNCS; f++) {
6104 zi = &ztest_info[f];
6105 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6106 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
6107 zc->zc_next = UINT64_MAX;
6109 zc->zc_next = zs->zs_proc_start +
6110 ztest_random(2 * zi->zi_interval[0] + 1);
6114 * Run the tests in a loop. These tests include fault injection
6115 * to verify that self-healing data works, and forced crashes
6116 * to verify that we never lose on-disk consistency.
6118 while (gethrtime() < zs->zs_proc_stop) {
6123 * Initialize the workload counters for each function.
6125 for (int f = 0; f < ZTEST_FUNCS; f++) {
6126 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6131 /* Set the allocation switch size */
6132 zs->zs_metaslab_df_alloc_threshold =
6133 ztest_random(zs->zs_metaslab_sz / 4) + 1;
6135 if (!hasalt || ztest_random(2) == 0) {
6136 if (hasalt && ztest_opts.zo_verbose >= 1) {
6137 (void) printf("Executing newer ztest: %s\n",
6141 killed = exec_child(cmd, NULL, B_TRUE, &status);
6143 if (hasalt && ztest_opts.zo_verbose >= 1) {
6144 (void) printf("Executing older ztest: %s\n",
6145 ztest_opts.zo_alt_ztest);
6148 killed = exec_child(ztest_opts.zo_alt_ztest,
6149 ztest_opts.zo_alt_libpath, B_TRUE, &status);
6156 if (ztest_opts.zo_verbose >= 1) {
6157 hrtime_t now = gethrtime();
6159 now = MIN(now, zs->zs_proc_stop);
6160 print_time(zs->zs_proc_stop - now, timebuf);
6161 nicenum(zs->zs_space, numbuf);
6163 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6164 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6166 WIFEXITED(status) ? "Complete" : "SIGKILL",
6167 (u_longlong_t)zs->zs_enospc_count,
6168 100.0 * zs->zs_alloc / zs->zs_space,
6170 100.0 * (now - zs->zs_proc_start) /
6171 (ztest_opts.zo_time * NANOSEC), timebuf);
6174 if (ztest_opts.zo_verbose >= 2) {
6175 (void) printf("\nWorkload summary:\n\n");
6176 (void) printf("%7s %9s %s\n",
6177 "Calls", "Time", "Function");
6178 (void) printf("%7s %9s %s\n",
6179 "-----", "----", "--------");
6180 for (int f = 0; f < ZTEST_FUNCS; f++) {
6183 zi = &ztest_info[f];
6184 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6185 print_time(zc->zc_time, timebuf);
6186 (void) dladdr((void *)zi->zi_func, &dli);
6187 (void) printf("%7llu %9s %s\n",
6188 (u_longlong_t)zc->zc_count, timebuf,
6191 (void) printf("\n");
6195 * It's possible that we killed a child during a rename test,
6196 * in which case we'll have a 'ztest_tmp' pool lying around
6197 * instead of 'ztest'. Do a blind rename in case this happened.
6200 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
6201 spa_close(spa, FTAG);
6203 char tmpname[MAXNAMELEN];
6205 kernel_init(FREAD | FWRITE);
6206 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
6207 ztest_opts.zo_pool);
6208 (void) spa_rename(tmpname, ztest_opts.zo_pool);
6212 ztest_run_zdb(ztest_opts.zo_pool);
6215 if (ztest_opts.zo_verbose >= 1) {
6217 (void) printf("%d runs of older ztest: %s\n", older,
6218 ztest_opts.zo_alt_ztest);
6219 (void) printf("%d runs of newer ztest: %s\n", newer,
6222 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6223 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
6226 umem_free(cmd, MAXNAMELEN);