4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
29 * The objective of this program is to provide a DMU/ZAP/SPA stress test
30 * that runs entirely in userland, is easy to use, and easy to extend.
32 * The overall design of the ztest program is as follows:
34 * (1) For each major functional area (e.g. adding vdevs to a pool,
35 * creating and destroying datasets, reading and writing objects, etc)
36 * we have a simple routine to test that functionality. These
37 * individual routines do not have to do anything "stressful".
39 * (2) We turn these simple functionality tests into a stress test by
40 * running them all in parallel, with as many threads as desired,
41 * and spread across as many datasets, objects, and vdevs as desired.
43 * (3) While all this is happening, we inject faults into the pool to
44 * verify that self-healing data really works.
46 * (4) Every time we open a dataset, we change its checksum and compression
47 * functions. Thus even individual objects vary from block to block
48 * in which checksum they use and whether they're compressed.
50 * (5) To verify that we never lose on-disk consistency after a crash,
51 * we run the entire test in a child of the main process.
52 * At random times, the child self-immolates with a SIGKILL.
53 * This is the software equivalent of pulling the power cord.
54 * The parent then runs the test again, using the existing
55 * storage pool, as many times as desired. If backwards compatability
56 * testing is enabled ztest will sometimes run the "older" version
57 * of ztest after a SIGKILL.
59 * (6) To verify that we don't have future leaks or temporal incursions,
60 * many of the functional tests record the transaction group number
61 * as part of their data. When reading old data, they verify that
62 * the transaction group number is less than the current, open txg.
63 * If you add a new test, please do this if applicable.
65 * When run with no arguments, ztest runs for about five minutes and
66 * produces no output if successful. To get a little bit of information,
67 * specify -V. To get more information, specify -VV, and so on.
69 * To turn this into an overnight stress test, use -T to specify run time.
71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
72 * to increase the pool capacity, fanout, and overall stress level.
74 * Use the -k option to set the desired frequency of kills.
76 * When ztest invokes itself it passes all relevant information through a
77 * temporary file which is mmap-ed in the child process. This allows shared
78 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
79 * stored at offset 0 of this file and contains information on the size and
80 * number of shared structures in the file. The information stored in this file
81 * must remain backwards compatible with older versions of ztest so that
82 * ztest can invoke them during backwards compatibility testing (-B).
85 #include <sys/zfs_context.h>
91 #include <sys/dmu_objset.h>
97 #include <sys/resource.h>
100 #include <sys/zil_impl.h>
101 #include <sys/vdev_impl.h>
102 #include <sys/vdev_file.h>
103 #include <sys/spa_impl.h>
104 #include <sys/metaslab_impl.h>
105 #include <sys/dsl_prop.h>
106 #include <sys/dsl_dataset.h>
107 #include <sys/dsl_scan.h>
108 #include <sys/zio_checksum.h>
109 #include <sys/refcount.h>
111 #include <stdio_ext.h>
120 #include <sys/fs/zfs.h>
121 #include <libnvpair.h>
123 #define ZTEST_FD_DATA 3
124 #define ZTEST_FD_RAND 4
126 typedef struct ztest_shared_hdr {
127 uint64_t zh_hdr_size;
128 uint64_t zh_opts_size;
130 uint64_t zh_stats_size;
131 uint64_t zh_stats_count;
133 uint64_t zh_ds_count;
134 } ztest_shared_hdr_t;
136 static ztest_shared_hdr_t *ztest_shared_hdr;
138 typedef struct ztest_shared_opts {
139 char zo_pool[MAXNAMELEN];
140 char zo_dir[MAXNAMELEN];
141 char zo_alt_ztest[MAXNAMELEN];
142 char zo_alt_libpath[MAXNAMELEN];
144 uint64_t zo_vdevtime;
152 uint64_t zo_passtime;
153 uint64_t zo_killrate;
157 uint64_t zo_maxloops;
158 uint64_t zo_metaslab_gang_bang;
159 } ztest_shared_opts_t;
161 static const ztest_shared_opts_t ztest_opts_defaults = {
162 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
163 .zo_dir = { '/', 't', 'm', 'p', '\0' },
164 .zo_alt_ztest = { '\0' },
165 .zo_alt_libpath = { '\0' },
167 .zo_ashift = SPA_MINBLOCKSHIFT,
170 .zo_raidz_parity = 1,
171 .zo_vdev_size = SPA_MINDEVSIZE,
174 .zo_passtime = 60, /* 60 seconds */
175 .zo_killrate = 70, /* 70% kill rate */
178 .zo_time = 300, /* 5 minutes */
179 .zo_maxloops = 50, /* max loops during spa_freeze() */
180 .zo_metaslab_gang_bang = 32 << 10
183 extern uint64_t metaslab_gang_bang;
184 extern uint64_t metaslab_df_alloc_threshold;
186 static ztest_shared_opts_t *ztest_shared_opts;
187 static ztest_shared_opts_t ztest_opts;
189 typedef struct ztest_shared_ds {
193 static ztest_shared_ds_t *ztest_shared_ds;
194 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
196 #define BT_MAGIC 0x123456789abcdefULL
197 #define MAXFAULTS() \
198 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
202 ZTEST_IO_WRITE_PATTERN,
203 ZTEST_IO_WRITE_ZEROES,
209 typedef struct ztest_block_tag {
219 typedef struct bufwad {
226 * XXX -- fix zfs range locks to be generic so we can use them here.
248 #define ZTEST_RANGE_LOCKS 64
249 #define ZTEST_OBJECT_LOCKS 64
252 * Object descriptor. Used as a template for object lookup/create/remove.
254 typedef struct ztest_od {
257 dmu_object_type_t od_type;
258 dmu_object_type_t od_crtype;
259 uint64_t od_blocksize;
260 uint64_t od_crblocksize;
263 char od_name[MAXNAMELEN];
269 typedef struct ztest_ds {
270 ztest_shared_ds_t *zd_shared;
272 rwlock_t zd_zilog_lock;
274 ztest_od_t *zd_od; /* debugging aid */
275 char zd_name[MAXNAMELEN];
276 mutex_t zd_dirobj_lock;
277 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
278 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
282 * Per-iteration state.
284 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
286 typedef struct ztest_info {
287 ztest_func_t *zi_func; /* test function */
288 uint64_t zi_iters; /* iterations per execution */
289 uint64_t *zi_interval; /* execute every <interval> seconds */
292 typedef struct ztest_shared_callstate {
293 uint64_t zc_count; /* per-pass count */
294 uint64_t zc_time; /* per-pass time */
295 uint64_t zc_next; /* next time to call this function */
296 } ztest_shared_callstate_t;
298 static ztest_shared_callstate_t *ztest_shared_callstate;
299 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
302 * Note: these aren't static because we want dladdr() to work.
304 ztest_func_t ztest_dmu_read_write;
305 ztest_func_t ztest_dmu_write_parallel;
306 ztest_func_t ztest_dmu_object_alloc_free;
307 ztest_func_t ztest_dmu_commit_callbacks;
308 ztest_func_t ztest_zap;
309 ztest_func_t ztest_zap_parallel;
310 ztest_func_t ztest_zil_commit;
311 ztest_func_t ztest_zil_remount;
312 ztest_func_t ztest_dmu_read_write_zcopy;
313 ztest_func_t ztest_dmu_objset_create_destroy;
314 ztest_func_t ztest_dmu_prealloc;
315 ztest_func_t ztest_fzap;
316 ztest_func_t ztest_dmu_snapshot_create_destroy;
317 ztest_func_t ztest_dsl_prop_get_set;
318 ztest_func_t ztest_spa_prop_get_set;
319 ztest_func_t ztest_spa_create_destroy;
320 ztest_func_t ztest_fault_inject;
321 ztest_func_t ztest_ddt_repair;
322 ztest_func_t ztest_dmu_snapshot_hold;
323 ztest_func_t ztest_spa_rename;
324 ztest_func_t ztest_scrub;
325 ztest_func_t ztest_dsl_dataset_promote_busy;
326 ztest_func_t ztest_vdev_attach_detach;
327 ztest_func_t ztest_vdev_LUN_growth;
328 ztest_func_t ztest_vdev_add_remove;
329 ztest_func_t ztest_vdev_aux_add_remove;
330 ztest_func_t ztest_split_pool;
331 ztest_func_t ztest_reguid;
333 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
334 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
335 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
336 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
337 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
339 ztest_info_t ztest_info[] = {
340 { ztest_dmu_read_write, 1, &zopt_always },
341 { ztest_dmu_write_parallel, 10, &zopt_always },
342 { ztest_dmu_object_alloc_free, 1, &zopt_always },
343 { ztest_dmu_commit_callbacks, 1, &zopt_always },
344 { ztest_zap, 30, &zopt_always },
345 { ztest_zap_parallel, 100, &zopt_always },
346 { ztest_split_pool, 1, &zopt_always },
347 { ztest_zil_commit, 1, &zopt_incessant },
348 { ztest_zil_remount, 1, &zopt_sometimes },
349 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
350 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
351 { ztest_dsl_prop_get_set, 1, &zopt_often },
352 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
354 { ztest_dmu_prealloc, 1, &zopt_sometimes },
356 { ztest_fzap, 1, &zopt_sometimes },
357 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
358 { ztest_spa_create_destroy, 1, &zopt_sometimes },
359 { ztest_fault_inject, 1, &zopt_sometimes },
360 { ztest_ddt_repair, 1, &zopt_sometimes },
361 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
362 { ztest_reguid, 1, &zopt_sometimes },
363 { ztest_spa_rename, 1, &zopt_rarely },
364 { ztest_scrub, 1, &zopt_rarely },
365 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
366 { ztest_vdev_attach_detach, 1, &zopt_rarely },
367 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
368 { ztest_vdev_add_remove, 1,
369 &ztest_opts.zo_vdevtime },
370 { ztest_vdev_aux_add_remove, 1,
371 &ztest_opts.zo_vdevtime },
374 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
377 * The following struct is used to hold a list of uncalled commit callbacks.
378 * The callbacks are ordered by txg number.
380 typedef struct ztest_cb_list {
381 mutex_t zcl_callbacks_lock;
382 list_t zcl_callbacks;
386 * Stuff we need to share writably between parent and child.
388 typedef struct ztest_shared {
389 boolean_t zs_do_init;
390 hrtime_t zs_proc_start;
391 hrtime_t zs_proc_stop;
392 hrtime_t zs_thread_start;
393 hrtime_t zs_thread_stop;
394 hrtime_t zs_thread_kill;
395 uint64_t zs_enospc_count;
396 uint64_t zs_vdev_next_leaf;
397 uint64_t zs_vdev_aux;
402 uint64_t zs_metaslab_sz;
403 uint64_t zs_metaslab_df_alloc_threshold;
407 #define ID_PARALLEL -1ULL
409 static char ztest_dev_template[] = "%s/%s.%llua";
410 static char ztest_aux_template[] = "%s/%s.%s.%llu";
411 ztest_shared_t *ztest_shared;
413 static spa_t *ztest_spa = NULL;
414 static ztest_ds_t *ztest_ds;
416 static mutex_t ztest_vdev_lock;
417 static rwlock_t ztest_name_lock;
419 static boolean_t ztest_dump_core = B_TRUE;
420 static boolean_t ztest_exiting;
422 /* Global commit callback list */
423 static ztest_cb_list_t zcl;
426 ZTEST_META_DNODE = 0,
431 static void usage(boolean_t) __NORETURN;
434 * These libumem hooks provide a reasonable set of defaults for the allocator's
435 * debugging facilities.
440 return ("default,verbose"); /* $UMEM_DEBUG setting */
444 _umem_logging_init(void)
446 return ("fail,contents"); /* $UMEM_LOGGING setting */
449 #define FATAL_MSG_SZ 1024
454 fatal(int do_perror, char *message, ...)
457 int save_errno = errno;
458 char buf[FATAL_MSG_SZ];
460 (void) fflush(stdout);
462 va_start(args, message);
463 (void) sprintf(buf, "ztest: ");
465 (void) vsprintf(buf + strlen(buf), message, args);
468 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
469 ": %s", strerror(save_errno));
471 (void) fprintf(stderr, "%s\n", buf);
472 fatal_msg = buf; /* to ease debugging */
479 str2shift(const char *buf)
481 const char *ends = "BKMGTPEZ";
486 for (i = 0; i < strlen(ends); i++) {
487 if (toupper(buf[0]) == ends[i])
490 if (i == strlen(ends)) {
491 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
495 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
498 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
504 nicenumtoull(const char *buf)
509 val = strtoull(buf, &end, 0);
511 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
513 } else if (end[0] == '.') {
514 double fval = strtod(buf, &end);
515 fval *= pow(2, str2shift(end));
516 if (fval > UINT64_MAX) {
517 (void) fprintf(stderr, "ztest: value too large: %s\n",
521 val = (uint64_t)fval;
523 int shift = str2shift(end);
524 if (shift >= 64 || (val << shift) >> shift != val) {
525 (void) fprintf(stderr, "ztest: value too large: %s\n",
535 usage(boolean_t requested)
537 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
539 char nice_vdev_size[10];
540 char nice_gang_bang[10];
541 FILE *fp = requested ? stdout : stderr;
543 nicenum(zo->zo_vdev_size, nice_vdev_size);
544 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
546 (void) fprintf(fp, "Usage: %s\n"
547 "\t[-v vdevs (default: %llu)]\n"
548 "\t[-s size_of_each_vdev (default: %s)]\n"
549 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
550 "\t[-m mirror_copies (default: %d)]\n"
551 "\t[-r raidz_disks (default: %d)]\n"
552 "\t[-R raidz_parity (default: %d)]\n"
553 "\t[-d datasets (default: %d)]\n"
554 "\t[-t threads (default: %d)]\n"
555 "\t[-g gang_block_threshold (default: %s)]\n"
556 "\t[-i init_count (default: %d)] initialize pool i times\n"
557 "\t[-k kill_percentage (default: %llu%%)]\n"
558 "\t[-p pool_name (default: %s)]\n"
559 "\t[-f dir (default: %s)] file directory for vdev files\n"
560 "\t[-V] verbose (use multiple times for ever more blather)\n"
561 "\t[-E] use existing pool instead of creating new one\n"
562 "\t[-T time (default: %llu sec)] total run time\n"
563 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
564 "\t[-P passtime (default: %llu sec)] time per pass\n"
565 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
566 "\t[-h] (print help)\n"
569 (u_longlong_t)zo->zo_vdevs, /* -v */
570 nice_vdev_size, /* -s */
571 zo->zo_ashift, /* -a */
572 zo->zo_mirrors, /* -m */
573 zo->zo_raidz, /* -r */
574 zo->zo_raidz_parity, /* -R */
575 zo->zo_datasets, /* -d */
576 zo->zo_threads, /* -t */
577 nice_gang_bang, /* -g */
578 zo->zo_init, /* -i */
579 (u_longlong_t)zo->zo_killrate, /* -k */
580 zo->zo_pool, /* -p */
582 (u_longlong_t)zo->zo_time, /* -T */
583 (u_longlong_t)zo->zo_maxloops, /* -F */
584 (u_longlong_t)zo->zo_passtime);
585 exit(requested ? 0 : 1);
589 process_options(int argc, char **argv)
592 ztest_shared_opts_t *zo = &ztest_opts;
596 char altdir[MAXNAMELEN] = { 0 };
598 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
600 while ((opt = getopt(argc, argv,
601 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
618 value = nicenumtoull(optarg);
622 zo->zo_vdevs = value;
625 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
628 zo->zo_ashift = value;
631 zo->zo_mirrors = value;
634 zo->zo_raidz = MAX(1, value);
637 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
640 zo->zo_datasets = MAX(1, value);
643 zo->zo_threads = MAX(1, value);
646 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
653 zo->zo_killrate = value;
656 (void) strlcpy(zo->zo_pool, optarg,
657 sizeof (zo->zo_pool));
660 path = realpath(optarg, NULL);
662 (void) fprintf(stderr, "error: %s: %s\n",
663 optarg, strerror(errno));
666 (void) strlcpy(zo->zo_dir, path,
667 sizeof (zo->zo_dir));
680 zo->zo_passtime = MAX(1, value);
683 zo->zo_maxloops = MAX(1, value);
686 (void) strlcpy(altdir, optarg, sizeof (altdir));
698 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
701 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
704 if (strlen(altdir) > 0) {
705 char cmd[MAXNAMELEN];
706 char realaltdir[MAXNAMELEN];
712 (void) realpath(getexecname(), cmd);
713 if (0 != access(altdir, F_OK)) {
714 ztest_dump_core = B_FALSE;
715 fatal(B_TRUE, "invalid alternate ztest path: %s",
718 VERIFY(NULL != realpath(altdir, realaltdir));
721 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
722 * We want to extract <isa> to determine if we should use
723 * 32 or 64 bit binaries.
725 bin = strstr(cmd, "/usr/bin/");
726 ztest = strstr(bin, "/ztest");
728 isalen = ztest - isa;
729 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
730 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
731 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
732 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
734 if (0 != access(zo->zo_alt_ztest, X_OK)) {
735 ztest_dump_core = B_FALSE;
736 fatal(B_TRUE, "invalid alternate ztest: %s",
738 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
739 ztest_dump_core = B_FALSE;
740 fatal(B_TRUE, "invalid alternate lib directory %s",
747 ztest_kill(ztest_shared_t *zs)
749 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
750 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
751 (void) kill(getpid(), SIGKILL);
755 ztest_random(uint64_t range)
762 if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r))
763 fatal(1, "short read from /dev/urandom");
770 ztest_record_enospc(const char *s)
772 ztest_shared->zs_enospc_count++;
776 ztest_get_ashift(void)
778 if (ztest_opts.zo_ashift == 0)
779 return (SPA_MINBLOCKSHIFT + ztest_random(3));
780 return (ztest_opts.zo_ashift);
784 make_vdev_file(char *path, char *aux, size_t size, uint64_t ashift)
786 char pathbuf[MAXPATHLEN];
791 ashift = ztest_get_ashift();
797 vdev = ztest_shared->zs_vdev_aux;
798 (void) snprintf(path, sizeof (pathbuf),
799 ztest_aux_template, ztest_opts.zo_dir,
800 ztest_opts.zo_pool, aux, vdev);
802 vdev = ztest_shared->zs_vdev_next_leaf++;
803 (void) snprintf(path, sizeof (pathbuf),
804 ztest_dev_template, ztest_opts.zo_dir,
805 ztest_opts.zo_pool, vdev);
810 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
812 fatal(1, "can't open %s", path);
813 if (ftruncate(fd, size) != 0)
814 fatal(1, "can't ftruncate %s", path);
818 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
819 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
820 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
821 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
827 make_vdev_raidz(char *path, char *aux, size_t size, uint64_t ashift, int r)
829 nvlist_t *raidz, **child;
833 return (make_vdev_file(path, aux, size, ashift));
834 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
836 for (c = 0; c < r; c++)
837 child[c] = make_vdev_file(path, aux, size, ashift);
839 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
840 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
841 VDEV_TYPE_RAIDZ) == 0);
842 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
843 ztest_opts.zo_raidz_parity) == 0);
844 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
847 for (c = 0; c < r; c++)
848 nvlist_free(child[c]);
850 umem_free(child, r * sizeof (nvlist_t *));
856 make_vdev_mirror(char *path, char *aux, size_t size, uint64_t ashift,
859 nvlist_t *mirror, **child;
863 return (make_vdev_raidz(path, aux, size, ashift, r));
865 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
867 for (c = 0; c < m; c++)
868 child[c] = make_vdev_raidz(path, aux, size, ashift, r);
870 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
871 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
872 VDEV_TYPE_MIRROR) == 0);
873 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
876 for (c = 0; c < m; c++)
877 nvlist_free(child[c]);
879 umem_free(child, m * sizeof (nvlist_t *));
885 make_vdev_root(char *path, char *aux, size_t size, uint64_t ashift,
886 int log, int r, int m, int t)
888 nvlist_t *root, **child;
893 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
895 for (c = 0; c < t; c++) {
896 child[c] = make_vdev_mirror(path, aux, size, ashift, r, m);
897 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
901 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
902 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
903 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
906 for (c = 0; c < t; c++)
907 nvlist_free(child[c]);
909 umem_free(child, t * sizeof (nvlist_t *));
915 ztest_random_blocksize(void)
917 return (1 << (SPA_MINBLOCKSHIFT +
918 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
922 ztest_random_ibshift(void)
924 return (DN_MIN_INDBLKSHIFT +
925 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
929 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
932 vdev_t *rvd = spa->spa_root_vdev;
935 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
938 top = ztest_random(rvd->vdev_children);
939 tvd = rvd->vdev_child[top];
940 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
941 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
947 ztest_random_dsl_prop(zfs_prop_t prop)
952 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
953 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
959 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
962 const char *propname = zfs_prop_to_name(prop);
964 char setpoint[MAXPATHLEN];
968 error = dsl_prop_set(osname, propname,
969 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
970 sizeof (value), 1, &value);
972 if (error == ENOSPC) {
973 ztest_record_enospc(FTAG);
976 ASSERT3U(error, ==, 0);
978 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
979 1, &curval, setpoint), ==, 0);
981 if (ztest_opts.zo_verbose >= 6) {
982 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
983 (void) printf("%s %s = %s at '%s'\n",
984 osname, propname, valname, setpoint);
991 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
993 spa_t *spa = ztest_spa;
994 nvlist_t *props = NULL;
997 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
998 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1000 error = spa_prop_set(spa, props);
1004 if (error == ENOSPC) {
1005 ztest_record_enospc(FTAG);
1008 ASSERT3U(error, ==, 0);
1014 ztest_rll_init(rll_t *rll)
1016 rll->rll_writer = NULL;
1017 rll->rll_readers = 0;
1018 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1019 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1023 ztest_rll_destroy(rll_t *rll)
1025 ASSERT(rll->rll_writer == NULL);
1026 ASSERT(rll->rll_readers == 0);
1027 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1028 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1032 ztest_rll_lock(rll_t *rll, rl_type_t type)
1034 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1036 if (type == RL_READER) {
1037 while (rll->rll_writer != NULL)
1038 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1041 while (rll->rll_writer != NULL || rll->rll_readers)
1042 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1043 rll->rll_writer = curthread;
1046 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1050 ztest_rll_unlock(rll_t *rll)
1052 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1054 if (rll->rll_writer) {
1055 ASSERT(rll->rll_readers == 0);
1056 rll->rll_writer = NULL;
1058 ASSERT(rll->rll_readers != 0);
1059 ASSERT(rll->rll_writer == NULL);
1063 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1064 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1066 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1070 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1072 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1074 ztest_rll_lock(rll, type);
1078 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1080 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1082 ztest_rll_unlock(rll);
1086 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1087 uint64_t size, rl_type_t type)
1089 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1090 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1093 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1094 rl->rl_object = object;
1095 rl->rl_offset = offset;
1099 ztest_rll_lock(rll, type);
1105 ztest_range_unlock(rl_t *rl)
1107 rll_t *rll = rl->rl_lock;
1109 ztest_rll_unlock(rll);
1111 umem_free(rl, sizeof (*rl));
1115 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1118 zd->zd_zilog = dmu_objset_zil(os);
1119 zd->zd_shared = szd;
1120 dmu_objset_name(os, zd->zd_name);
1122 if (zd->zd_shared != NULL)
1123 zd->zd_shared->zd_seq = 0;
1125 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1126 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1128 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1129 ztest_rll_init(&zd->zd_object_lock[l]);
1131 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1132 ztest_rll_init(&zd->zd_range_lock[l]);
1136 ztest_zd_fini(ztest_ds_t *zd)
1138 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1140 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1141 ztest_rll_destroy(&zd->zd_object_lock[l]);
1143 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1144 ztest_rll_destroy(&zd->zd_range_lock[l]);
1147 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1150 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1156 * Attempt to assign tx to some transaction group.
1158 error = dmu_tx_assign(tx, txg_how);
1160 if (error == ERESTART) {
1161 ASSERT(txg_how == TXG_NOWAIT);
1164 ASSERT3U(error, ==, ENOSPC);
1165 ztest_record_enospc(tag);
1170 txg = dmu_tx_get_txg(tx);
1176 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1179 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1186 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1189 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1193 diff |= (value - *ip++);
1199 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1200 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1202 bt->bt_magic = BT_MAGIC;
1203 bt->bt_objset = dmu_objset_id(os);
1204 bt->bt_object = object;
1205 bt->bt_offset = offset;
1208 bt->bt_crtxg = crtxg;
1212 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1213 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1215 ASSERT(bt->bt_magic == BT_MAGIC);
1216 ASSERT(bt->bt_objset == dmu_objset_id(os));
1217 ASSERT(bt->bt_object == object);
1218 ASSERT(bt->bt_offset == offset);
1219 ASSERT(bt->bt_gen <= gen);
1220 ASSERT(bt->bt_txg <= txg);
1221 ASSERT(bt->bt_crtxg == crtxg);
1224 static ztest_block_tag_t *
1225 ztest_bt_bonus(dmu_buf_t *db)
1227 dmu_object_info_t doi;
1228 ztest_block_tag_t *bt;
1230 dmu_object_info_from_db(db, &doi);
1231 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1232 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1233 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1242 #define lrz_type lr_mode
1243 #define lrz_blocksize lr_uid
1244 #define lrz_ibshift lr_gid
1245 #define lrz_bonustype lr_rdev
1246 #define lrz_bonuslen lr_crtime[1]
1249 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1251 char *name = (void *)(lr + 1); /* name follows lr */
1252 size_t namesize = strlen(name) + 1;
1255 if (zil_replaying(zd->zd_zilog, tx))
1258 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1259 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1260 sizeof (*lr) + namesize - sizeof (lr_t));
1262 zil_itx_assign(zd->zd_zilog, itx, tx);
1266 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1268 char *name = (void *)(lr + 1); /* name follows lr */
1269 size_t namesize = strlen(name) + 1;
1272 if (zil_replaying(zd->zd_zilog, tx))
1275 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1276 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1277 sizeof (*lr) + namesize - sizeof (lr_t));
1279 itx->itx_oid = object;
1280 zil_itx_assign(zd->zd_zilog, itx, tx);
1284 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1287 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1289 if (zil_replaying(zd->zd_zilog, tx))
1292 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1293 write_state = WR_INDIRECT;
1295 itx = zil_itx_create(TX_WRITE,
1296 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1298 if (write_state == WR_COPIED &&
1299 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1300 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1301 zil_itx_destroy(itx);
1302 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1303 write_state = WR_NEED_COPY;
1305 itx->itx_private = zd;
1306 itx->itx_wr_state = write_state;
1307 itx->itx_sync = (ztest_random(8) == 0);
1308 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1310 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1311 sizeof (*lr) - sizeof (lr_t));
1313 zil_itx_assign(zd->zd_zilog, itx, tx);
1317 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1321 if (zil_replaying(zd->zd_zilog, tx))
1324 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1325 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1326 sizeof (*lr) - sizeof (lr_t));
1328 itx->itx_sync = B_FALSE;
1329 zil_itx_assign(zd->zd_zilog, itx, tx);
1333 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1337 if (zil_replaying(zd->zd_zilog, tx))
1340 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1341 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1342 sizeof (*lr) - sizeof (lr_t));
1344 itx->itx_sync = B_FALSE;
1345 zil_itx_assign(zd->zd_zilog, itx, tx);
1352 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1354 char *name = (void *)(lr + 1); /* name follows lr */
1355 objset_t *os = zd->zd_os;
1356 ztest_block_tag_t *bbt;
1363 byteswap_uint64_array(lr, sizeof (*lr));
1365 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1366 ASSERT(name[0] != '\0');
1368 tx = dmu_tx_create(os);
1370 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1372 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1373 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1375 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1378 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1382 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1384 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1385 if (lr->lr_foid == 0) {
1386 lr->lr_foid = zap_create(os,
1387 lr->lrz_type, lr->lrz_bonustype,
1388 lr->lrz_bonuslen, tx);
1390 error = zap_create_claim(os, lr->lr_foid,
1391 lr->lrz_type, lr->lrz_bonustype,
1392 lr->lrz_bonuslen, tx);
1395 if (lr->lr_foid == 0) {
1396 lr->lr_foid = dmu_object_alloc(os,
1397 lr->lrz_type, 0, lr->lrz_bonustype,
1398 lr->lrz_bonuslen, tx);
1400 error = dmu_object_claim(os, lr->lr_foid,
1401 lr->lrz_type, 0, lr->lrz_bonustype,
1402 lr->lrz_bonuslen, tx);
1407 ASSERT3U(error, ==, EEXIST);
1408 ASSERT(zd->zd_zilog->zl_replay);
1413 ASSERT(lr->lr_foid != 0);
1415 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1416 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1417 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1419 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1420 bbt = ztest_bt_bonus(db);
1421 dmu_buf_will_dirty(db, tx);
1422 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1423 dmu_buf_rele(db, FTAG);
1425 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1428 (void) ztest_log_create(zd, tx, lr);
1436 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1438 char *name = (void *)(lr + 1); /* name follows lr */
1439 objset_t *os = zd->zd_os;
1440 dmu_object_info_t doi;
1442 uint64_t object, txg;
1445 byteswap_uint64_array(lr, sizeof (*lr));
1447 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1448 ASSERT(name[0] != '\0');
1451 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1452 ASSERT(object != 0);
1454 ztest_object_lock(zd, object, RL_WRITER);
1456 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1458 tx = dmu_tx_create(os);
1460 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1461 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1463 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1465 ztest_object_unlock(zd, object);
1469 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1470 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1472 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1475 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1477 (void) ztest_log_remove(zd, tx, lr, object);
1481 ztest_object_unlock(zd, object);
1487 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1489 objset_t *os = zd->zd_os;
1490 void *data = lr + 1; /* data follows lr */
1491 uint64_t offset, length;
1492 ztest_block_tag_t *bt = data;
1493 ztest_block_tag_t *bbt;
1494 uint64_t gen, txg, lrtxg, crtxg;
1495 dmu_object_info_t doi;
1498 arc_buf_t *abuf = NULL;
1502 byteswap_uint64_array(lr, sizeof (*lr));
1504 offset = lr->lr_offset;
1505 length = lr->lr_length;
1507 /* If it's a dmu_sync() block, write the whole block */
1508 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1509 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1510 if (length < blocksize) {
1511 offset -= offset % blocksize;
1516 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1517 byteswap_uint64_array(bt, sizeof (*bt));
1519 if (bt->bt_magic != BT_MAGIC)
1522 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1523 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1525 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1527 dmu_object_info_from_db(db, &doi);
1529 bbt = ztest_bt_bonus(db);
1530 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1532 crtxg = bbt->bt_crtxg;
1533 lrtxg = lr->lr_common.lrc_txg;
1535 tx = dmu_tx_create(os);
1537 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1539 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1540 P2PHASE(offset, length) == 0)
1541 abuf = dmu_request_arcbuf(db, length);
1543 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1546 dmu_return_arcbuf(abuf);
1547 dmu_buf_rele(db, FTAG);
1548 ztest_range_unlock(rl);
1549 ztest_object_unlock(zd, lr->lr_foid);
1555 * Usually, verify the old data before writing new data --
1556 * but not always, because we also want to verify correct
1557 * behavior when the data was not recently read into cache.
1559 ASSERT(offset % doi.doi_data_block_size == 0);
1560 if (ztest_random(4) != 0) {
1561 int prefetch = ztest_random(2) ?
1562 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1563 ztest_block_tag_t rbt;
1565 VERIFY(dmu_read(os, lr->lr_foid, offset,
1566 sizeof (rbt), &rbt, prefetch) == 0);
1567 if (rbt.bt_magic == BT_MAGIC) {
1568 ztest_bt_verify(&rbt, os, lr->lr_foid,
1569 offset, gen, txg, crtxg);
1574 * Writes can appear to be newer than the bonus buffer because
1575 * the ztest_get_data() callback does a dmu_read() of the
1576 * open-context data, which may be different than the data
1577 * as it was when the write was generated.
1579 if (zd->zd_zilog->zl_replay) {
1580 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1581 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1586 * Set the bt's gen/txg to the bonus buffer's gen/txg
1587 * so that all of the usual ASSERTs will work.
1589 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1593 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1595 bcopy(data, abuf->b_data, length);
1596 dmu_assign_arcbuf(db, offset, abuf, tx);
1599 (void) ztest_log_write(zd, tx, lr);
1601 dmu_buf_rele(db, FTAG);
1605 ztest_range_unlock(rl);
1606 ztest_object_unlock(zd, lr->lr_foid);
1612 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1614 objset_t *os = zd->zd_os;
1620 byteswap_uint64_array(lr, sizeof (*lr));
1622 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1623 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1626 tx = dmu_tx_create(os);
1628 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1630 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1632 ztest_range_unlock(rl);
1633 ztest_object_unlock(zd, lr->lr_foid);
1637 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1638 lr->lr_length, tx) == 0);
1640 (void) ztest_log_truncate(zd, tx, lr);
1644 ztest_range_unlock(rl);
1645 ztest_object_unlock(zd, lr->lr_foid);
1651 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1653 objset_t *os = zd->zd_os;
1656 ztest_block_tag_t *bbt;
1657 uint64_t txg, lrtxg, crtxg;
1660 byteswap_uint64_array(lr, sizeof (*lr));
1662 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1664 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1666 tx = dmu_tx_create(os);
1667 dmu_tx_hold_bonus(tx, lr->lr_foid);
1669 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1671 dmu_buf_rele(db, FTAG);
1672 ztest_object_unlock(zd, lr->lr_foid);
1676 bbt = ztest_bt_bonus(db);
1677 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1678 crtxg = bbt->bt_crtxg;
1679 lrtxg = lr->lr_common.lrc_txg;
1681 if (zd->zd_zilog->zl_replay) {
1682 ASSERT(lr->lr_size != 0);
1683 ASSERT(lr->lr_mode != 0);
1687 * Randomly change the size and increment the generation.
1689 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1691 lr->lr_mode = bbt->bt_gen + 1;
1696 * Verify that the current bonus buffer is not newer than our txg.
1698 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1699 MAX(txg, lrtxg), crtxg);
1701 dmu_buf_will_dirty(db, tx);
1703 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1704 ASSERT3U(lr->lr_size, <=, db->db_size);
1705 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1706 bbt = ztest_bt_bonus(db);
1708 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1710 dmu_buf_rele(db, FTAG);
1712 (void) ztest_log_setattr(zd, tx, lr);
1716 ztest_object_unlock(zd, lr->lr_foid);
1721 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1722 NULL, /* 0 no such transaction type */
1723 ztest_replay_create, /* TX_CREATE */
1724 NULL, /* TX_MKDIR */
1725 NULL, /* TX_MKXATTR */
1726 NULL, /* TX_SYMLINK */
1727 ztest_replay_remove, /* TX_REMOVE */
1728 NULL, /* TX_RMDIR */
1730 NULL, /* TX_RENAME */
1731 ztest_replay_write, /* TX_WRITE */
1732 ztest_replay_truncate, /* TX_TRUNCATE */
1733 ztest_replay_setattr, /* TX_SETATTR */
1735 NULL, /* TX_CREATE_ACL */
1736 NULL, /* TX_CREATE_ATTR */
1737 NULL, /* TX_CREATE_ACL_ATTR */
1738 NULL, /* TX_MKDIR_ACL */
1739 NULL, /* TX_MKDIR_ATTR */
1740 NULL, /* TX_MKDIR_ACL_ATTR */
1741 NULL, /* TX_WRITE2 */
1745 * ZIL get_data callbacks
1749 ztest_get_done(zgd_t *zgd, int error)
1751 ztest_ds_t *zd = zgd->zgd_private;
1752 uint64_t object = zgd->zgd_rl->rl_object;
1755 dmu_buf_rele(zgd->zgd_db, zgd);
1757 ztest_range_unlock(zgd->zgd_rl);
1758 ztest_object_unlock(zd, object);
1760 if (error == 0 && zgd->zgd_bp)
1761 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1763 umem_free(zgd, sizeof (*zgd));
1767 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1769 ztest_ds_t *zd = arg;
1770 objset_t *os = zd->zd_os;
1771 uint64_t object = lr->lr_foid;
1772 uint64_t offset = lr->lr_offset;
1773 uint64_t size = lr->lr_length;
1774 blkptr_t *bp = &lr->lr_blkptr;
1775 uint64_t txg = lr->lr_common.lrc_txg;
1777 dmu_object_info_t doi;
1782 ztest_object_lock(zd, object, RL_READER);
1783 error = dmu_bonus_hold(os, object, FTAG, &db);
1785 ztest_object_unlock(zd, object);
1789 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1791 if (crtxg == 0 || crtxg > txg) {
1792 dmu_buf_rele(db, FTAG);
1793 ztest_object_unlock(zd, object);
1797 dmu_object_info_from_db(db, &doi);
1798 dmu_buf_rele(db, FTAG);
1801 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1802 zgd->zgd_zilog = zd->zd_zilog;
1803 zgd->zgd_private = zd;
1805 if (buf != NULL) { /* immediate write */
1806 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1809 error = dmu_read(os, object, offset, size, buf,
1810 DMU_READ_NO_PREFETCH);
1813 size = doi.doi_data_block_size;
1815 offset = P2ALIGN(offset, size);
1817 ASSERT(offset < size);
1821 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1824 error = dmu_buf_hold(os, object, offset, zgd, &db,
1825 DMU_READ_NO_PREFETCH);
1831 ASSERT(db->db_offset == offset);
1832 ASSERT(db->db_size == size);
1834 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1835 ztest_get_done, zgd);
1842 ztest_get_done(zgd, error);
1848 ztest_lr_alloc(size_t lrsize, char *name)
1851 size_t namesize = name ? strlen(name) + 1 : 0;
1853 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1856 bcopy(name, lr + lrsize, namesize);
1862 ztest_lr_free(void *lr, size_t lrsize, char *name)
1864 size_t namesize = name ? strlen(name) + 1 : 0;
1866 umem_free(lr, lrsize + namesize);
1870 * Lookup a bunch of objects. Returns the number of objects not found.
1873 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1878 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1880 for (int i = 0; i < count; i++, od++) {
1882 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1883 sizeof (uint64_t), 1, &od->od_object);
1885 ASSERT(error == ENOENT);
1886 ASSERT(od->od_object == 0);
1890 ztest_block_tag_t *bbt;
1891 dmu_object_info_t doi;
1893 ASSERT(od->od_object != 0);
1894 ASSERT(missing == 0); /* there should be no gaps */
1896 ztest_object_lock(zd, od->od_object, RL_READER);
1897 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1898 od->od_object, FTAG, &db));
1899 dmu_object_info_from_db(db, &doi);
1900 bbt = ztest_bt_bonus(db);
1901 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1902 od->od_type = doi.doi_type;
1903 od->od_blocksize = doi.doi_data_block_size;
1904 od->od_gen = bbt->bt_gen;
1905 dmu_buf_rele(db, FTAG);
1906 ztest_object_unlock(zd, od->od_object);
1914 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1918 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1920 for (int i = 0; i < count; i++, od++) {
1927 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1929 lr->lr_doid = od->od_dir;
1930 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1931 lr->lrz_type = od->od_crtype;
1932 lr->lrz_blocksize = od->od_crblocksize;
1933 lr->lrz_ibshift = ztest_random_ibshift();
1934 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1935 lr->lrz_bonuslen = dmu_bonus_max();
1936 lr->lr_gen = od->od_crgen;
1937 lr->lr_crtime[0] = time(NULL);
1939 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1940 ASSERT(missing == 0);
1944 od->od_object = lr->lr_foid;
1945 od->od_type = od->od_crtype;
1946 od->od_blocksize = od->od_crblocksize;
1947 od->od_gen = od->od_crgen;
1948 ASSERT(od->od_object != 0);
1951 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1958 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
1963 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1967 for (int i = count - 1; i >= 0; i--, od--) {
1973 if (od->od_object == 0)
1976 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1978 lr->lr_doid = od->od_dir;
1980 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
1981 ASSERT3U(error, ==, ENOSPC);
1986 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1993 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
1999 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2001 lr->lr_foid = object;
2002 lr->lr_offset = offset;
2003 lr->lr_length = size;
2005 BP_ZERO(&lr->lr_blkptr);
2007 bcopy(data, lr + 1, size);
2009 error = ztest_replay_write(zd, lr, B_FALSE);
2011 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2017 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2022 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2024 lr->lr_foid = object;
2025 lr->lr_offset = offset;
2026 lr->lr_length = size;
2028 error = ztest_replay_truncate(zd, lr, B_FALSE);
2030 ztest_lr_free(lr, sizeof (*lr), NULL);
2036 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2041 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2043 lr->lr_foid = object;
2047 error = ztest_replay_setattr(zd, lr, B_FALSE);
2049 ztest_lr_free(lr, sizeof (*lr), NULL);
2055 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2057 objset_t *os = zd->zd_os;
2062 txg_wait_synced(dmu_objset_pool(os), 0);
2064 ztest_object_lock(zd, object, RL_READER);
2065 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2067 tx = dmu_tx_create(os);
2069 dmu_tx_hold_write(tx, object, offset, size);
2071 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2074 dmu_prealloc(os, object, offset, size, tx);
2076 txg_wait_synced(dmu_objset_pool(os), txg);
2078 (void) dmu_free_long_range(os, object, offset, size);
2081 ztest_range_unlock(rl);
2082 ztest_object_unlock(zd, object);
2086 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2088 ztest_block_tag_t wbt;
2089 dmu_object_info_t doi;
2090 enum ztest_io_type io_type;
2094 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2095 blocksize = doi.doi_data_block_size;
2096 data = umem_alloc(blocksize, UMEM_NOFAIL);
2099 * Pick an i/o type at random, biased toward writing block tags.
2101 io_type = ztest_random(ZTEST_IO_TYPES);
2102 if (ztest_random(2) == 0)
2103 io_type = ZTEST_IO_WRITE_TAG;
2105 (void) rw_rdlock(&zd->zd_zilog_lock);
2109 case ZTEST_IO_WRITE_TAG:
2110 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2111 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2114 case ZTEST_IO_WRITE_PATTERN:
2115 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2116 if (ztest_random(2) == 0) {
2118 * Induce fletcher2 collisions to ensure that
2119 * zio_ddt_collision() detects and resolves them
2120 * when using fletcher2-verify for deduplication.
2122 ((uint64_t *)data)[0] ^= 1ULL << 63;
2123 ((uint64_t *)data)[4] ^= 1ULL << 63;
2125 (void) ztest_write(zd, object, offset, blocksize, data);
2128 case ZTEST_IO_WRITE_ZEROES:
2129 bzero(data, blocksize);
2130 (void) ztest_write(zd, object, offset, blocksize, data);
2133 case ZTEST_IO_TRUNCATE:
2134 (void) ztest_truncate(zd, object, offset, blocksize);
2137 case ZTEST_IO_SETATTR:
2138 (void) ztest_setattr(zd, object);
2142 (void) rw_unlock(&zd->zd_zilog_lock);
2144 umem_free(data, blocksize);
2148 * Initialize an object description template.
2151 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2152 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2154 od->od_dir = ZTEST_DIROBJ;
2157 od->od_crtype = type;
2158 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2161 od->od_type = DMU_OT_NONE;
2162 od->od_blocksize = 0;
2165 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2166 tag, (int64_t)id, index);
2170 * Lookup or create the objects for a test using the od template.
2171 * If the objects do not all exist, or if 'remove' is specified,
2172 * remove any existing objects and create new ones. Otherwise,
2173 * use the existing objects.
2176 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2178 int count = size / sizeof (*od);
2181 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2182 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2183 (ztest_remove(zd, od, count) != 0 ||
2184 ztest_create(zd, od, count) != 0))
2187 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2194 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2196 zilog_t *zilog = zd->zd_zilog;
2198 (void) rw_rdlock(&zd->zd_zilog_lock);
2200 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2203 * Remember the committed values in zd, which is in parent/child
2204 * shared memory. If we die, the next iteration of ztest_run()
2205 * will verify that the log really does contain this record.
2207 mutex_enter(&zilog->zl_lock);
2208 ASSERT(zd->zd_shared != NULL);
2209 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2210 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2211 mutex_exit(&zilog->zl_lock);
2213 (void) rw_unlock(&zd->zd_zilog_lock);
2217 * This function is designed to simulate the operations that occur during a
2218 * mount/unmount operation. We hold the dataset across these operations in an
2219 * attempt to expose any implicit assumptions about ZIL management.
2223 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2225 objset_t *os = zd->zd_os;
2227 (void) rw_wrlock(&zd->zd_zilog_lock);
2229 /* zfsvfs_teardown() */
2230 zil_close(zd->zd_zilog);
2232 /* zfsvfs_setup() */
2233 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2234 zil_replay(os, zd, ztest_replay_vector);
2236 (void) rw_unlock(&zd->zd_zilog_lock);
2240 * Verify that we can't destroy an active pool, create an existing pool,
2241 * or create a pool with a bad vdev spec.
2245 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2247 ztest_shared_opts_t *zo = &ztest_opts;
2252 * Attempt to create using a bad file.
2254 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2255 VERIFY3U(ENOENT, ==,
2256 spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
2257 nvlist_free(nvroot);
2260 * Attempt to create using a bad mirror.
2262 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2263 VERIFY3U(ENOENT, ==,
2264 spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
2265 nvlist_free(nvroot);
2268 * Attempt to create an existing pool. It shouldn't matter
2269 * what's in the nvroot; we should fail with EEXIST.
2271 (void) rw_rdlock(&ztest_name_lock);
2272 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2273 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
2274 nvlist_free(nvroot);
2275 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2276 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2277 spa_close(spa, FTAG);
2279 (void) rw_unlock(&ztest_name_lock);
2283 vdev_lookup_by_path(vdev_t *vd, const char *path)
2287 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2290 for (int c = 0; c < vd->vdev_children; c++)
2291 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2299 * Find the first available hole which can be used as a top-level.
2302 find_vdev_hole(spa_t *spa)
2304 vdev_t *rvd = spa->spa_root_vdev;
2307 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2309 for (c = 0; c < rvd->vdev_children; c++) {
2310 vdev_t *cvd = rvd->vdev_child[c];
2312 if (cvd->vdev_ishole)
2319 * Verify that vdev_add() works as expected.
2323 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2325 ztest_shared_t *zs = ztest_shared;
2326 spa_t *spa = ztest_spa;
2332 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2334 MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2336 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2338 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2341 * If we have slogs then remove them 1/4 of the time.
2343 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2345 * Grab the guid from the head of the log class rotor.
2347 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2349 spa_config_exit(spa, SCL_VDEV, FTAG);
2352 * We have to grab the zs_name_lock as writer to
2353 * prevent a race between removing a slog (dmu_objset_find)
2354 * and destroying a dataset. Removing the slog will
2355 * grab a reference on the dataset which may cause
2356 * dmu_objset_destroy() to fail with EBUSY thus
2357 * leaving the dataset in an inconsistent state.
2359 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2360 error = spa_vdev_remove(spa, guid, B_FALSE);
2361 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2363 if (error && error != EEXIST)
2364 fatal(0, "spa_vdev_remove() = %d", error);
2366 spa_config_exit(spa, SCL_VDEV, FTAG);
2369 * Make 1/4 of the devices be log devices.
2371 nvroot = make_vdev_root(NULL, NULL,
2372 ztest_opts.zo_vdev_size, 0,
2373 ztest_random(4) == 0, ztest_opts.zo_raidz,
2376 error = spa_vdev_add(spa, nvroot);
2377 nvlist_free(nvroot);
2379 if (error == ENOSPC)
2380 ztest_record_enospc("spa_vdev_add");
2381 else if (error != 0)
2382 fatal(0, "spa_vdev_add() = %d", error);
2385 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2389 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2393 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2395 ztest_shared_t *zs = ztest_shared;
2396 spa_t *spa = ztest_spa;
2397 vdev_t *rvd = spa->spa_root_vdev;
2398 spa_aux_vdev_t *sav;
2403 if (ztest_random(2) == 0) {
2404 sav = &spa->spa_spares;
2405 aux = ZPOOL_CONFIG_SPARES;
2407 sav = &spa->spa_l2cache;
2408 aux = ZPOOL_CONFIG_L2CACHE;
2411 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2413 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2415 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2417 * Pick a random device to remove.
2419 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2422 * Find an unused device we can add.
2424 zs->zs_vdev_aux = 0;
2426 char path[MAXPATHLEN];
2428 (void) snprintf(path, sizeof (path), ztest_aux_template,
2429 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2431 for (c = 0; c < sav->sav_count; c++)
2432 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2435 if (c == sav->sav_count &&
2436 vdev_lookup_by_path(rvd, path) == NULL)
2442 spa_config_exit(spa, SCL_VDEV, FTAG);
2448 nvlist_t *nvroot = make_vdev_root(NULL, aux,
2449 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2450 error = spa_vdev_add(spa, nvroot);
2452 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2453 nvlist_free(nvroot);
2456 * Remove an existing device. Sometimes, dirty its
2457 * vdev state first to make sure we handle removal
2458 * of devices that have pending state changes.
2460 if (ztest_random(2) == 0)
2461 (void) vdev_online(spa, guid, 0, NULL);
2463 error = spa_vdev_remove(spa, guid, B_FALSE);
2464 if (error != 0 && error != EBUSY)
2465 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2468 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2472 * split a pool if it has mirror tlvdevs
2476 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2478 ztest_shared_t *zs = ztest_shared;
2479 spa_t *spa = ztest_spa;
2480 vdev_t *rvd = spa->spa_root_vdev;
2481 nvlist_t *tree, **child, *config, *split, **schild;
2482 uint_t c, children, schildren = 0, lastlogid = 0;
2485 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2487 /* ensure we have a useable config; mirrors of raidz aren't supported */
2488 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2489 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2493 /* clean up the old pool, if any */
2494 (void) spa_destroy("splitp");
2496 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2498 /* generate a config from the existing config */
2499 mutex_enter(&spa->spa_props_lock);
2500 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2502 mutex_exit(&spa->spa_props_lock);
2504 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2507 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2508 for (c = 0; c < children; c++) {
2509 vdev_t *tvd = rvd->vdev_child[c];
2513 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2514 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2516 VERIFY(nvlist_add_string(schild[schildren],
2517 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2518 VERIFY(nvlist_add_uint64(schild[schildren],
2519 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2521 lastlogid = schildren;
2526 VERIFY(nvlist_lookup_nvlist_array(child[c],
2527 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2528 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2531 /* OK, create a config that can be used to split */
2532 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2533 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2534 VDEV_TYPE_ROOT) == 0);
2535 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2536 lastlogid != 0 ? lastlogid : schildren) == 0);
2538 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2539 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2541 for (c = 0; c < schildren; c++)
2542 nvlist_free(schild[c]);
2546 spa_config_exit(spa, SCL_VDEV, FTAG);
2548 (void) rw_wrlock(&ztest_name_lock);
2549 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2550 (void) rw_unlock(&ztest_name_lock);
2552 nvlist_free(config);
2555 (void) printf("successful split - results:\n");
2556 mutex_enter(&spa_namespace_lock);
2557 show_pool_stats(spa);
2558 show_pool_stats(spa_lookup("splitp"));
2559 mutex_exit(&spa_namespace_lock);
2563 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2568 * Verify that we can attach and detach devices.
2572 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2574 ztest_shared_t *zs = ztest_shared;
2575 spa_t *spa = ztest_spa;
2576 spa_aux_vdev_t *sav = &spa->spa_spares;
2577 vdev_t *rvd = spa->spa_root_vdev;
2578 vdev_t *oldvd, *newvd, *pvd;
2582 uint64_t ashift = ztest_get_ashift();
2583 uint64_t oldguid, pguid;
2584 size_t oldsize, newsize;
2585 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2587 int oldvd_has_siblings = B_FALSE;
2588 int newvd_is_spare = B_FALSE;
2590 int error, expected_error;
2592 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2593 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2595 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2598 * Decide whether to do an attach or a replace.
2600 replacing = ztest_random(2);
2603 * Pick a random top-level vdev.
2605 top = ztest_random_vdev_top(spa, B_TRUE);
2608 * Pick a random leaf within it.
2610 leaf = ztest_random(leaves);
2615 oldvd = rvd->vdev_child[top];
2616 if (zs->zs_mirrors >= 1) {
2617 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2618 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2619 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2621 if (ztest_opts.zo_raidz > 1) {
2622 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2623 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2624 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2628 * If we're already doing an attach or replace, oldvd may be a
2629 * mirror vdev -- in which case, pick a random child.
2631 while (oldvd->vdev_children != 0) {
2632 oldvd_has_siblings = B_TRUE;
2633 ASSERT(oldvd->vdev_children >= 2);
2634 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2637 oldguid = oldvd->vdev_guid;
2638 oldsize = vdev_get_min_asize(oldvd);
2639 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2640 (void) strcpy(oldpath, oldvd->vdev_path);
2641 pvd = oldvd->vdev_parent;
2642 pguid = pvd->vdev_guid;
2645 * If oldvd has siblings, then half of the time, detach it.
2647 if (oldvd_has_siblings && ztest_random(2) == 0) {
2648 spa_config_exit(spa, SCL_VDEV, FTAG);
2649 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2650 if (error != 0 && error != ENODEV && error != EBUSY &&
2652 fatal(0, "detach (%s) returned %d", oldpath, error);
2653 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2658 * For the new vdev, choose with equal probability between the two
2659 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2661 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2662 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2663 newvd_is_spare = B_TRUE;
2664 (void) strcpy(newpath, newvd->vdev_path);
2666 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2667 ztest_opts.zo_dir, ztest_opts.zo_pool,
2668 top * leaves + leaf);
2669 if (ztest_random(2) == 0)
2670 newpath[strlen(newpath) - 1] = 'b';
2671 newvd = vdev_lookup_by_path(rvd, newpath);
2675 newsize = vdev_get_min_asize(newvd);
2678 * Make newsize a little bigger or smaller than oldsize.
2679 * If it's smaller, the attach should fail.
2680 * If it's larger, and we're doing a replace,
2681 * we should get dynamic LUN growth when we're done.
2683 newsize = 10 * oldsize / (9 + ztest_random(3));
2687 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2688 * unless it's a replace; in that case any non-replacing parent is OK.
2690 * If newvd is already part of the pool, it should fail with EBUSY.
2692 * If newvd is too small, it should fail with EOVERFLOW.
2694 if (pvd->vdev_ops != &vdev_mirror_ops &&
2695 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2696 pvd->vdev_ops == &vdev_replacing_ops ||
2697 pvd->vdev_ops == &vdev_spare_ops))
2698 expected_error = ENOTSUP;
2699 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2700 expected_error = ENOTSUP;
2701 else if (newvd == oldvd)
2702 expected_error = replacing ? 0 : EBUSY;
2703 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2704 expected_error = EBUSY;
2705 else if (newsize < oldsize)
2706 expected_error = EOVERFLOW;
2707 else if (ashift > oldvd->vdev_top->vdev_ashift)
2708 expected_error = EDOM;
2712 spa_config_exit(spa, SCL_VDEV, FTAG);
2715 * Build the nvlist describing newpath.
2717 root = make_vdev_root(newpath, NULL, newvd == NULL ? newsize : 0,
2718 ashift, 0, 0, 0, 1);
2720 error = spa_vdev_attach(spa, oldguid, root, replacing);
2725 * If our parent was the replacing vdev, but the replace completed,
2726 * then instead of failing with ENOTSUP we may either succeed,
2727 * fail with ENODEV, or fail with EOVERFLOW.
2729 if (expected_error == ENOTSUP &&
2730 (error == 0 || error == ENODEV || error == EOVERFLOW))
2731 expected_error = error;
2734 * If someone grew the LUN, the replacement may be too small.
2736 if (error == EOVERFLOW || error == EBUSY)
2737 expected_error = error;
2739 /* XXX workaround 6690467 */
2740 if (error != expected_error && expected_error != EBUSY) {
2741 fatal(0, "attach (%s %llu, %s %llu, %d) "
2742 "returned %d, expected %d",
2743 oldpath, (longlong_t)oldsize, newpath,
2744 (longlong_t)newsize, replacing, error, expected_error);
2747 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2751 * Callback function which expands the physical size of the vdev.
2754 grow_vdev(vdev_t *vd, void *arg)
2756 spa_t *spa = vd->vdev_spa;
2757 size_t *newsize = arg;
2761 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2762 ASSERT(vd->vdev_ops->vdev_op_leaf);
2764 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2767 fsize = lseek(fd, 0, SEEK_END);
2768 (void) ftruncate(fd, *newsize);
2770 if (ztest_opts.zo_verbose >= 6) {
2771 (void) printf("%s grew from %lu to %lu bytes\n",
2772 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2779 * Callback function which expands a given vdev by calling vdev_online().
2783 online_vdev(vdev_t *vd, void *arg)
2785 spa_t *spa = vd->vdev_spa;
2786 vdev_t *tvd = vd->vdev_top;
2787 uint64_t guid = vd->vdev_guid;
2788 uint64_t generation = spa->spa_config_generation + 1;
2789 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2792 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2793 ASSERT(vd->vdev_ops->vdev_op_leaf);
2795 /* Calling vdev_online will initialize the new metaslabs */
2796 spa_config_exit(spa, SCL_STATE, spa);
2797 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2798 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2801 * If vdev_online returned an error or the underlying vdev_open
2802 * failed then we abort the expand. The only way to know that
2803 * vdev_open fails is by checking the returned newstate.
2805 if (error || newstate != VDEV_STATE_HEALTHY) {
2806 if (ztest_opts.zo_verbose >= 5) {
2807 (void) printf("Unable to expand vdev, state %llu, "
2808 "error %d\n", (u_longlong_t)newstate, error);
2812 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2815 * Since we dropped the lock we need to ensure that we're
2816 * still talking to the original vdev. It's possible this
2817 * vdev may have been detached/replaced while we were
2818 * trying to online it.
2820 if (generation != spa->spa_config_generation) {
2821 if (ztest_opts.zo_verbose >= 5) {
2822 (void) printf("vdev configuration has changed, "
2823 "guid %llu, state %llu, expected gen %llu, "
2826 (u_longlong_t)tvd->vdev_state,
2827 (u_longlong_t)generation,
2828 (u_longlong_t)spa->spa_config_generation);
2836 * Traverse the vdev tree calling the supplied function.
2837 * We continue to walk the tree until we either have walked all
2838 * children or we receive a non-NULL return from the callback.
2839 * If a NULL callback is passed, then we just return back the first
2840 * leaf vdev we encounter.
2843 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2845 if (vd->vdev_ops->vdev_op_leaf) {
2849 return (func(vd, arg));
2852 for (uint_t c = 0; c < vd->vdev_children; c++) {
2853 vdev_t *cvd = vd->vdev_child[c];
2854 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
2861 * Verify that dynamic LUN growth works as expected.
2865 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
2867 spa_t *spa = ztest_spa;
2869 metaslab_class_t *mc;
2870 metaslab_group_t *mg;
2871 size_t psize, newsize;
2873 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
2875 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2876 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2878 top = ztest_random_vdev_top(spa, B_TRUE);
2880 tvd = spa->spa_root_vdev->vdev_child[top];
2883 old_ms_count = tvd->vdev_ms_count;
2884 old_class_space = metaslab_class_get_space(mc);
2887 * Determine the size of the first leaf vdev associated with
2888 * our top-level device.
2890 vd = vdev_walk_tree(tvd, NULL, NULL);
2891 ASSERT3P(vd, !=, NULL);
2892 ASSERT(vd->vdev_ops->vdev_op_leaf);
2894 psize = vd->vdev_psize;
2897 * We only try to expand the vdev if it's healthy, less than 4x its
2898 * original size, and it has a valid psize.
2900 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
2901 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
2902 spa_config_exit(spa, SCL_STATE, spa);
2903 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2907 newsize = psize + psize / 8;
2908 ASSERT3U(newsize, >, psize);
2910 if (ztest_opts.zo_verbose >= 6) {
2911 (void) printf("Expanding LUN %s from %lu to %lu\n",
2912 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
2916 * Growing the vdev is a two step process:
2917 * 1). expand the physical size (i.e. relabel)
2918 * 2). online the vdev to create the new metaslabs
2920 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
2921 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
2922 tvd->vdev_state != VDEV_STATE_HEALTHY) {
2923 if (ztest_opts.zo_verbose >= 5) {
2924 (void) printf("Could not expand LUN because "
2925 "the vdev configuration changed.\n");
2927 spa_config_exit(spa, SCL_STATE, spa);
2928 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2932 spa_config_exit(spa, SCL_STATE, spa);
2935 * Expanding the LUN will update the config asynchronously,
2936 * thus we must wait for the async thread to complete any
2937 * pending tasks before proceeding.
2941 mutex_enter(&spa->spa_async_lock);
2942 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
2943 mutex_exit(&spa->spa_async_lock);
2946 txg_wait_synced(spa_get_dsl(spa), 0);
2947 (void) poll(NULL, 0, 100);
2950 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2952 tvd = spa->spa_root_vdev->vdev_child[top];
2953 new_ms_count = tvd->vdev_ms_count;
2954 new_class_space = metaslab_class_get_space(mc);
2956 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
2957 if (ztest_opts.zo_verbose >= 5) {
2958 (void) printf("Could not verify LUN expansion due to "
2959 "intervening vdev offline or remove.\n");
2961 spa_config_exit(spa, SCL_STATE, spa);
2962 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2967 * Make sure we were able to grow the vdev.
2969 if (new_ms_count <= old_ms_count)
2970 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
2971 old_ms_count, new_ms_count);
2974 * Make sure we were able to grow the pool.
2976 if (new_class_space <= old_class_space)
2977 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
2978 old_class_space, new_class_space);
2980 if (ztest_opts.zo_verbose >= 5) {
2981 char oldnumbuf[6], newnumbuf[6];
2983 nicenum(old_class_space, oldnumbuf);
2984 nicenum(new_class_space, newnumbuf);
2985 (void) printf("%s grew from %s to %s\n",
2986 spa->spa_name, oldnumbuf, newnumbuf);
2989 spa_config_exit(spa, SCL_STATE, spa);
2990 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2994 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
2998 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3001 * Create the objects common to all ztest datasets.
3003 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3004 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3008 ztest_dataset_create(char *dsname)
3010 uint64_t zilset = ztest_random(100);
3011 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3012 ztest_objset_create_cb, NULL);
3014 if (err || zilset < 80)
3017 if (ztest_opts.zo_verbose >= 6)
3018 (void) printf("Setting dataset %s to sync always\n", dsname);
3019 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3020 ZFS_SYNC_ALWAYS, B_FALSE));
3025 ztest_objset_destroy_cb(const char *name, void *arg)
3028 dmu_object_info_t doi;
3032 * Verify that the dataset contains a directory object.
3034 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
3035 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3036 if (error != ENOENT) {
3037 /* We could have crashed in the middle of destroying it */
3038 ASSERT3U(error, ==, 0);
3039 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3040 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3042 dmu_objset_rele(os, FTAG);
3045 * Destroy the dataset.
3047 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
3052 ztest_snapshot_create(char *osname, uint64_t id)
3054 char snapname[MAXNAMELEN];
3057 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3060 error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
3061 NULL, NULL, B_FALSE, B_FALSE, -1);
3062 if (error == ENOSPC) {
3063 ztest_record_enospc(FTAG);
3066 if (error != 0 && error != EEXIST)
3067 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3072 ztest_snapshot_destroy(char *osname, uint64_t id)
3074 char snapname[MAXNAMELEN];
3077 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3080 error = dmu_objset_destroy(snapname, B_FALSE);
3081 if (error != 0 && error != ENOENT)
3082 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3088 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3094 char name[MAXNAMELEN];
3097 (void) rw_rdlock(&ztest_name_lock);
3099 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3100 ztest_opts.zo_pool, (u_longlong_t)id);
3103 * If this dataset exists from a previous run, process its replay log
3104 * half of the time. If we don't replay it, then dmu_objset_destroy()
3105 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3107 if (ztest_random(2) == 0 &&
3108 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3109 ztest_zd_init(&zdtmp, NULL, os);
3110 zil_replay(os, &zdtmp, ztest_replay_vector);
3111 ztest_zd_fini(&zdtmp);
3112 dmu_objset_disown(os, FTAG);
3116 * There may be an old instance of the dataset we're about to
3117 * create lying around from a previous run. If so, destroy it
3118 * and all of its snapshots.
3120 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3121 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3124 * Verify that the destroyed dataset is no longer in the namespace.
3126 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3129 * Verify that we can create a new dataset.
3131 error = ztest_dataset_create(name);
3133 if (error == ENOSPC) {
3134 ztest_record_enospc(FTAG);
3135 (void) rw_unlock(&ztest_name_lock);
3138 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3142 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3144 ztest_zd_init(&zdtmp, NULL, os);
3147 * Open the intent log for it.
3149 zilog = zil_open(os, ztest_get_data);
3152 * Put some objects in there, do a little I/O to them,
3153 * and randomly take a couple of snapshots along the way.
3155 iters = ztest_random(5);
3156 for (int i = 0; i < iters; i++) {
3157 ztest_dmu_object_alloc_free(&zdtmp, id);
3158 if (ztest_random(iters) == 0)
3159 (void) ztest_snapshot_create(name, i);
3163 * Verify that we cannot create an existing dataset.
3165 VERIFY3U(EEXIST, ==,
3166 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3169 * Verify that we can hold an objset that is also owned.
3171 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3172 dmu_objset_rele(os2, FTAG);
3175 * Verify that we cannot own an objset that is already owned.
3178 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3181 dmu_objset_disown(os, FTAG);
3182 ztest_zd_fini(&zdtmp);
3184 (void) rw_unlock(&ztest_name_lock);
3188 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3191 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3193 (void) rw_rdlock(&ztest_name_lock);
3194 (void) ztest_snapshot_destroy(zd->zd_name, id);
3195 (void) ztest_snapshot_create(zd->zd_name, id);
3196 (void) rw_unlock(&ztest_name_lock);
3200 * Cleanup non-standard snapshots and clones.
3203 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3205 char snap1name[MAXNAMELEN];
3206 char clone1name[MAXNAMELEN];
3207 char snap2name[MAXNAMELEN];
3208 char clone2name[MAXNAMELEN];
3209 char snap3name[MAXNAMELEN];
3212 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3213 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3214 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3215 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3216 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3218 error = dmu_objset_destroy(clone2name, B_FALSE);
3219 if (error && error != ENOENT)
3220 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
3221 error = dmu_objset_destroy(snap3name, B_FALSE);
3222 if (error && error != ENOENT)
3223 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
3224 error = dmu_objset_destroy(snap2name, B_FALSE);
3225 if (error && error != ENOENT)
3226 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
3227 error = dmu_objset_destroy(clone1name, B_FALSE);
3228 if (error && error != ENOENT)
3229 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
3230 error = dmu_objset_destroy(snap1name, B_FALSE);
3231 if (error && error != ENOENT)
3232 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
3236 * Verify dsl_dataset_promote handles EBUSY
3239 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3243 char snap1name[MAXNAMELEN];
3244 char clone1name[MAXNAMELEN];
3245 char snap2name[MAXNAMELEN];
3246 char clone2name[MAXNAMELEN];
3247 char snap3name[MAXNAMELEN];
3248 char *osname = zd->zd_name;
3251 (void) rw_rdlock(&ztest_name_lock);
3253 ztest_dsl_dataset_cleanup(osname, id);
3255 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3256 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3257 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3258 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3259 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3261 error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
3262 NULL, NULL, B_FALSE, B_FALSE, -1);
3263 if (error && error != EEXIST) {
3264 if (error == ENOSPC) {
3265 ztest_record_enospc(FTAG);
3268 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3271 error = dmu_objset_hold(snap1name, FTAG, &clone);
3273 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
3275 error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
3276 dmu_objset_rele(clone, FTAG);
3278 if (error == ENOSPC) {
3279 ztest_record_enospc(FTAG);
3282 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3285 error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
3286 NULL, NULL, B_FALSE, B_FALSE, -1);
3287 if (error && error != EEXIST) {
3288 if (error == ENOSPC) {
3289 ztest_record_enospc(FTAG);
3292 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3295 error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
3296 NULL, NULL, B_FALSE, B_FALSE, -1);
3297 if (error && error != EEXIST) {
3298 if (error == ENOSPC) {
3299 ztest_record_enospc(FTAG);
3302 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3305 error = dmu_objset_hold(snap3name, FTAG, &clone);
3307 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3309 error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
3310 dmu_objset_rele(clone, FTAG);
3312 if (error == ENOSPC) {
3313 ztest_record_enospc(FTAG);
3316 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3319 error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
3321 fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
3322 error = dsl_dataset_promote(clone2name, NULL);
3324 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3326 dsl_dataset_disown(ds, FTAG);
3329 ztest_dsl_dataset_cleanup(osname, id);
3331 (void) rw_unlock(&ztest_name_lock);
3335 * Verify that dmu_object_{alloc,free} work as expected.
3338 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3341 int batchsize = sizeof (od) / sizeof (od[0]);
3343 for (int b = 0; b < batchsize; b++)
3344 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3347 * Destroy the previous batch of objects, create a new batch,
3348 * and do some I/O on the new objects.
3350 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3353 while (ztest_random(4 * batchsize) != 0)
3354 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3355 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3359 * Verify that dmu_{read,write} work as expected.
3362 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3364 objset_t *os = zd->zd_os;
3367 int i, freeit, error;
3369 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3370 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3371 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3372 uint64_t regions = 997;
3373 uint64_t stride = 123456789ULL;
3374 uint64_t width = 40;
3375 int free_percent = 5;
3378 * This test uses two objects, packobj and bigobj, that are always
3379 * updated together (i.e. in the same tx) so that their contents are
3380 * in sync and can be compared. Their contents relate to each other
3381 * in a simple way: packobj is a dense array of 'bufwad' structures,
3382 * while bigobj is a sparse array of the same bufwads. Specifically,
3383 * for any index n, there are three bufwads that should be identical:
3385 * packobj, at offset n * sizeof (bufwad_t)
3386 * bigobj, at the head of the nth chunk
3387 * bigobj, at the tail of the nth chunk
3389 * The chunk size is arbitrary. It doesn't have to be a power of two,
3390 * and it doesn't have any relation to the object blocksize.
3391 * The only requirement is that it can hold at least two bufwads.
3393 * Normally, we write the bufwad to each of these locations.
3394 * However, free_percent of the time we instead write zeroes to
3395 * packobj and perform a dmu_free_range() on bigobj. By comparing
3396 * bigobj to packobj, we can verify that the DMU is correctly
3397 * tracking which parts of an object are allocated and free,
3398 * and that the contents of the allocated blocks are correct.
3402 * Read the directory info. If it's the first time, set things up.
3404 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3405 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3407 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3410 bigobj = od[0].od_object;
3411 packobj = od[1].od_object;
3412 chunksize = od[0].od_gen;
3413 ASSERT(chunksize == od[1].od_gen);
3416 * Prefetch a random chunk of the big object.
3417 * Our aim here is to get some async reads in flight
3418 * for blocks that we may free below; the DMU should
3419 * handle this race correctly.
3421 n = ztest_random(regions) * stride + ztest_random(width);
3422 s = 1 + ztest_random(2 * width - 1);
3423 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3426 * Pick a random index and compute the offsets into packobj and bigobj.
3428 n = ztest_random(regions) * stride + ztest_random(width);
3429 s = 1 + ztest_random(width - 1);
3431 packoff = n * sizeof (bufwad_t);
3432 packsize = s * sizeof (bufwad_t);
3434 bigoff = n * chunksize;
3435 bigsize = s * chunksize;
3437 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3438 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3441 * free_percent of the time, free a range of bigobj rather than
3444 freeit = (ztest_random(100) < free_percent);
3447 * Read the current contents of our objects.
3449 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3451 ASSERT3U(error, ==, 0);
3452 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3454 ASSERT3U(error, ==, 0);
3457 * Get a tx for the mods to both packobj and bigobj.
3459 tx = dmu_tx_create(os);
3461 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3464 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3466 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3468 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3470 umem_free(packbuf, packsize);
3471 umem_free(bigbuf, bigsize);
3475 dmu_object_set_checksum(os, bigobj,
3476 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3478 dmu_object_set_compress(os, bigobj,
3479 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3482 * For each index from n to n + s, verify that the existing bufwad
3483 * in packobj matches the bufwads at the head and tail of the
3484 * corresponding chunk in bigobj. Then update all three bufwads
3485 * with the new values we want to write out.
3487 for (i = 0; i < s; i++) {
3489 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3491 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3493 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3495 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3496 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3498 if (pack->bw_txg > txg)
3499 fatal(0, "future leak: got %llx, open txg is %llx",
3502 if (pack->bw_data != 0 && pack->bw_index != n + i)
3503 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3504 pack->bw_index, n, i);
3506 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3507 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3509 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3510 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3513 bzero(pack, sizeof (bufwad_t));
3515 pack->bw_index = n + i;
3517 pack->bw_data = 1 + ztest_random(-2ULL);
3524 * We've verified all the old bufwads, and made new ones.
3525 * Now write them out.
3527 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3530 if (ztest_opts.zo_verbose >= 7) {
3531 (void) printf("freeing offset %llx size %llx"
3533 (u_longlong_t)bigoff,
3534 (u_longlong_t)bigsize,
3537 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3539 if (ztest_opts.zo_verbose >= 7) {
3540 (void) printf("writing offset %llx size %llx"
3542 (u_longlong_t)bigoff,
3543 (u_longlong_t)bigsize,
3546 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3552 * Sanity check the stuff we just wrote.
3555 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3556 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3558 VERIFY(0 == dmu_read(os, packobj, packoff,
3559 packsize, packcheck, DMU_READ_PREFETCH));
3560 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3561 bigsize, bigcheck, DMU_READ_PREFETCH));
3563 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3564 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3566 umem_free(packcheck, packsize);
3567 umem_free(bigcheck, bigsize);
3570 umem_free(packbuf, packsize);
3571 umem_free(bigbuf, bigsize);
3575 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3576 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3584 * For each index from n to n + s, verify that the existing bufwad
3585 * in packobj matches the bufwads at the head and tail of the
3586 * corresponding chunk in bigobj. Then update all three bufwads
3587 * with the new values we want to write out.
3589 for (i = 0; i < s; i++) {
3591 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3593 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3595 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3597 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3598 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3600 if (pack->bw_txg > txg)
3601 fatal(0, "future leak: got %llx, open txg is %llx",
3604 if (pack->bw_data != 0 && pack->bw_index != n + i)
3605 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3606 pack->bw_index, n, i);
3608 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3609 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3611 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3612 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3614 pack->bw_index = n + i;
3616 pack->bw_data = 1 + ztest_random(-2ULL);
3624 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3626 objset_t *os = zd->zd_os;
3632 bufwad_t *packbuf, *bigbuf;
3633 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3634 uint64_t blocksize = ztest_random_blocksize();
3635 uint64_t chunksize = blocksize;
3636 uint64_t regions = 997;
3637 uint64_t stride = 123456789ULL;
3639 dmu_buf_t *bonus_db;
3640 arc_buf_t **bigbuf_arcbufs;
3641 dmu_object_info_t doi;
3644 * This test uses two objects, packobj and bigobj, that are always
3645 * updated together (i.e. in the same tx) so that their contents are
3646 * in sync and can be compared. Their contents relate to each other
3647 * in a simple way: packobj is a dense array of 'bufwad' structures,
3648 * while bigobj is a sparse array of the same bufwads. Specifically,
3649 * for any index n, there are three bufwads that should be identical:
3651 * packobj, at offset n * sizeof (bufwad_t)
3652 * bigobj, at the head of the nth chunk
3653 * bigobj, at the tail of the nth chunk
3655 * The chunk size is set equal to bigobj block size so that
3656 * dmu_assign_arcbuf() can be tested for object updates.
3660 * Read the directory info. If it's the first time, set things up.
3662 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3663 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3665 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3668 bigobj = od[0].od_object;
3669 packobj = od[1].od_object;
3670 blocksize = od[0].od_blocksize;
3671 chunksize = blocksize;
3672 ASSERT(chunksize == od[1].od_gen);
3674 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3675 VERIFY(ISP2(doi.doi_data_block_size));
3676 VERIFY(chunksize == doi.doi_data_block_size);
3677 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3680 * Pick a random index and compute the offsets into packobj and bigobj.
3682 n = ztest_random(regions) * stride + ztest_random(width);
3683 s = 1 + ztest_random(width - 1);
3685 packoff = n * sizeof (bufwad_t);
3686 packsize = s * sizeof (bufwad_t);
3688 bigoff = n * chunksize;
3689 bigsize = s * chunksize;
3691 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3692 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3694 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3696 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3699 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3700 * Iteration 1 test zcopy to already referenced dbufs.
3701 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3702 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3703 * Iteration 4 test zcopy when dbuf is no longer dirty.
3704 * Iteration 5 test zcopy when it can't be done.
3705 * Iteration 6 one more zcopy write.
3707 for (i = 0; i < 7; i++) {
3712 * In iteration 5 (i == 5) use arcbufs
3713 * that don't match bigobj blksz to test
3714 * dmu_assign_arcbuf() when it can't directly
3715 * assign an arcbuf to a dbuf.
3717 for (j = 0; j < s; j++) {
3720 dmu_request_arcbuf(bonus_db, chunksize);
3722 bigbuf_arcbufs[2 * j] =
3723 dmu_request_arcbuf(bonus_db, chunksize / 2);
3724 bigbuf_arcbufs[2 * j + 1] =
3725 dmu_request_arcbuf(bonus_db, chunksize / 2);
3730 * Get a tx for the mods to both packobj and bigobj.
3732 tx = dmu_tx_create(os);
3734 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3735 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3737 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3739 umem_free(packbuf, packsize);
3740 umem_free(bigbuf, bigsize);
3741 for (j = 0; j < s; j++) {
3743 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3746 bigbuf_arcbufs[2 * j]);
3748 bigbuf_arcbufs[2 * j + 1]);
3751 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3752 dmu_buf_rele(bonus_db, FTAG);
3757 * 50% of the time don't read objects in the 1st iteration to
3758 * test dmu_assign_arcbuf() for the case when there're no
3759 * existing dbufs for the specified offsets.
3761 if (i != 0 || ztest_random(2) != 0) {
3762 error = dmu_read(os, packobj, packoff,
3763 packsize, packbuf, DMU_READ_PREFETCH);
3764 ASSERT3U(error, ==, 0);
3765 error = dmu_read(os, bigobj, bigoff, bigsize,
3766 bigbuf, DMU_READ_PREFETCH);
3767 ASSERT3U(error, ==, 0);
3769 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3773 * We've verified all the old bufwads, and made new ones.
3774 * Now write them out.
3776 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3777 if (ztest_opts.zo_verbose >= 7) {
3778 (void) printf("writing offset %llx size %llx"
3780 (u_longlong_t)bigoff,
3781 (u_longlong_t)bigsize,
3784 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3787 bcopy((caddr_t)bigbuf + (off - bigoff),
3788 bigbuf_arcbufs[j]->b_data, chunksize);
3790 bcopy((caddr_t)bigbuf + (off - bigoff),
3791 bigbuf_arcbufs[2 * j]->b_data,
3793 bcopy((caddr_t)bigbuf + (off - bigoff) +
3795 bigbuf_arcbufs[2 * j + 1]->b_data,
3800 VERIFY(dmu_buf_hold(os, bigobj, off,
3801 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3804 dmu_assign_arcbuf(bonus_db, off,
3805 bigbuf_arcbufs[j], tx);
3807 dmu_assign_arcbuf(bonus_db, off,
3808 bigbuf_arcbufs[2 * j], tx);
3809 dmu_assign_arcbuf(bonus_db,
3810 off + chunksize / 2,
3811 bigbuf_arcbufs[2 * j + 1], tx);
3814 dmu_buf_rele(dbt, FTAG);
3820 * Sanity check the stuff we just wrote.
3823 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3824 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3826 VERIFY(0 == dmu_read(os, packobj, packoff,
3827 packsize, packcheck, DMU_READ_PREFETCH));
3828 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3829 bigsize, bigcheck, DMU_READ_PREFETCH));
3831 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3832 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3834 umem_free(packcheck, packsize);
3835 umem_free(bigcheck, bigsize);
3838 txg_wait_open(dmu_objset_pool(os), 0);
3839 } else if (i == 3) {
3840 txg_wait_synced(dmu_objset_pool(os), 0);
3844 dmu_buf_rele(bonus_db, FTAG);
3845 umem_free(packbuf, packsize);
3846 umem_free(bigbuf, bigsize);
3847 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3852 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3855 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3856 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3859 * Have multiple threads write to large offsets in an object
3860 * to verify that parallel writes to an object -- even to the
3861 * same blocks within the object -- doesn't cause any trouble.
3863 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
3865 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3868 while (ztest_random(10) != 0)
3869 ztest_io(zd, od[0].od_object, offset);
3873 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
3876 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
3877 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3878 uint64_t count = ztest_random(20) + 1;
3879 uint64_t blocksize = ztest_random_blocksize();
3882 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3884 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3887 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
3890 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
3892 data = umem_zalloc(blocksize, UMEM_NOFAIL);
3894 while (ztest_random(count) != 0) {
3895 uint64_t randoff = offset + (ztest_random(count) * blocksize);
3896 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
3899 while (ztest_random(4) != 0)
3900 ztest_io(zd, od[0].od_object, randoff);
3903 umem_free(data, blocksize);
3907 * Verify that zap_{create,destroy,add,remove,update} work as expected.
3909 #define ZTEST_ZAP_MIN_INTS 1
3910 #define ZTEST_ZAP_MAX_INTS 4
3911 #define ZTEST_ZAP_MAX_PROPS 1000
3914 ztest_zap(ztest_ds_t *zd, uint64_t id)
3916 objset_t *os = zd->zd_os;
3919 uint64_t txg, last_txg;
3920 uint64_t value[ZTEST_ZAP_MAX_INTS];
3921 uint64_t zl_ints, zl_intsize, prop;
3924 char propname[100], txgname[100];
3926 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3928 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3930 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3933 object = od[0].od_object;
3936 * Generate a known hash collision, and verify that
3937 * we can lookup and remove both entries.
3939 tx = dmu_tx_create(os);
3940 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3941 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3944 for (i = 0; i < 2; i++) {
3946 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
3949 for (i = 0; i < 2; i++) {
3950 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3951 sizeof (uint64_t), 1, &value[i], tx));
3953 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3954 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3955 ASSERT3U(zl_ints, ==, 1);
3957 for (i = 0; i < 2; i++) {
3958 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
3963 * Generate a buch of random entries.
3965 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3967 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3968 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3969 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3970 bzero(value, sizeof (value));
3974 * If these zap entries already exist, validate their contents.
3976 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3978 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3979 ASSERT3U(zl_ints, ==, 1);
3981 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
3982 zl_ints, &last_txg) == 0);
3984 VERIFY(zap_length(os, object, propname, &zl_intsize,
3987 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3988 ASSERT3U(zl_ints, ==, ints);
3990 VERIFY(zap_lookup(os, object, propname, zl_intsize,
3991 zl_ints, value) == 0);
3993 for (i = 0; i < ints; i++) {
3994 ASSERT3U(value[i], ==, last_txg + object + i);
3997 ASSERT3U(error, ==, ENOENT);
4001 * Atomically update two entries in our zap object.
4002 * The first is named txg_%llu, and contains the txg
4003 * in which the property was last updated. The second
4004 * is named prop_%llu, and the nth element of its value
4005 * should be txg + object + n.
4007 tx = dmu_tx_create(os);
4008 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4009 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4014 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4016 for (i = 0; i < ints; i++)
4017 value[i] = txg + object + i;
4019 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4021 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4027 * Remove a random pair of entries.
4029 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4030 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4031 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4033 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4035 if (error == ENOENT)
4038 ASSERT3U(error, ==, 0);
4040 tx = dmu_tx_create(os);
4041 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4042 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4045 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4046 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4051 * Testcase to test the upgrading of a microzap to fatzap.
4054 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4056 objset_t *os = zd->zd_os;
4058 uint64_t object, txg;
4060 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4062 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4065 object = od[0].od_object;
4068 * Add entries to this ZAP and make sure it spills over
4069 * and gets upgraded to a fatzap. Also, since we are adding
4070 * 2050 entries we should see ptrtbl growth and leaf-block split.
4072 for (int i = 0; i < 2050; i++) {
4073 char name[MAXNAMELEN];
4078 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4081 tx = dmu_tx_create(os);
4082 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4083 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4086 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4088 ASSERT(error == 0 || error == EEXIST);
4095 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4097 objset_t *os = zd->zd_os;
4099 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4101 int i, namelen, error;
4102 int micro = ztest_random(2);
4103 char name[20], string_value[20];
4106 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4108 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4111 object = od[0].od_object;
4114 * Generate a random name of the form 'xxx.....' where each
4115 * x is a random printable character and the dots are dots.
4116 * There are 94 such characters, and the name length goes from
4117 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4119 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4121 for (i = 0; i < 3; i++)
4122 name[i] = '!' + ztest_random('~' - '!' + 1);
4123 for (; i < namelen - 1; i++)
4127 if ((namelen & 1) || micro) {
4128 wsize = sizeof (txg);
4134 data = string_value;
4138 VERIFY(zap_count(os, object, &count) == 0);
4139 ASSERT(count != -1ULL);
4142 * Select an operation: length, lookup, add, update, remove.
4144 i = ztest_random(5);
4147 tx = dmu_tx_create(os);
4148 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4149 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4152 bcopy(name, string_value, namelen);
4156 bzero(string_value, namelen);
4162 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4164 ASSERT3U(wsize, ==, zl_wsize);
4165 ASSERT3U(wc, ==, zl_wc);
4167 ASSERT3U(error, ==, ENOENT);
4172 error = zap_lookup(os, object, name, wsize, wc, data);
4174 if (data == string_value &&
4175 bcmp(name, data, namelen) != 0)
4176 fatal(0, "name '%s' != val '%s' len %d",
4177 name, data, namelen);
4179 ASSERT3U(error, ==, ENOENT);
4184 error = zap_add(os, object, name, wsize, wc, data, tx);
4185 ASSERT(error == 0 || error == EEXIST);
4189 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4193 error = zap_remove(os, object, name, tx);
4194 ASSERT(error == 0 || error == ENOENT);
4203 * Commit callback data.
4205 typedef struct ztest_cb_data {
4206 list_node_t zcd_node;
4208 int zcd_expected_err;
4209 boolean_t zcd_added;
4210 boolean_t zcd_called;
4214 /* This is the actual commit callback function */
4216 ztest_commit_callback(void *arg, int error)
4218 ztest_cb_data_t *data = arg;
4219 uint64_t synced_txg;
4221 VERIFY(data != NULL);
4222 VERIFY3S(data->zcd_expected_err, ==, error);
4223 VERIFY(!data->zcd_called);
4225 synced_txg = spa_last_synced_txg(data->zcd_spa);
4226 if (data->zcd_txg > synced_txg)
4227 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4228 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4231 data->zcd_called = B_TRUE;
4233 if (error == ECANCELED) {
4234 ASSERT3U(data->zcd_txg, ==, 0);
4235 ASSERT(!data->zcd_added);
4238 * The private callback data should be destroyed here, but
4239 * since we are going to check the zcd_called field after
4240 * dmu_tx_abort(), we will destroy it there.
4245 /* Was this callback added to the global callback list? */
4246 if (!data->zcd_added)
4249 ASSERT3U(data->zcd_txg, !=, 0);
4251 /* Remove our callback from the list */
4252 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4253 list_remove(&zcl.zcl_callbacks, data);
4254 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4257 umem_free(data, sizeof (ztest_cb_data_t));
4260 /* Allocate and initialize callback data structure */
4261 static ztest_cb_data_t *
4262 ztest_create_cb_data(objset_t *os, uint64_t txg)
4264 ztest_cb_data_t *cb_data;
4266 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4268 cb_data->zcd_txg = txg;
4269 cb_data->zcd_spa = dmu_objset_spa(os);
4275 * If a number of txgs equal to this threshold have been created after a commit
4276 * callback has been registered but not called, then we assume there is an
4277 * implementation bug.
4279 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4282 * Commit callback test.
4285 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4287 objset_t *os = zd->zd_os;
4290 ztest_cb_data_t *cb_data[3], *tmp_cb;
4291 uint64_t old_txg, txg;
4294 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4296 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4299 tx = dmu_tx_create(os);
4301 cb_data[0] = ztest_create_cb_data(os, 0);
4302 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4304 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4306 /* Every once in a while, abort the transaction on purpose */
4307 if (ztest_random(100) == 0)
4311 error = dmu_tx_assign(tx, TXG_NOWAIT);
4313 txg = error ? 0 : dmu_tx_get_txg(tx);
4315 cb_data[0]->zcd_txg = txg;
4316 cb_data[1] = ztest_create_cb_data(os, txg);
4317 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4321 * It's not a strict requirement to call the registered
4322 * callbacks from inside dmu_tx_abort(), but that's what
4323 * it's supposed to happen in the current implementation
4324 * so we will check for that.
4326 for (i = 0; i < 2; i++) {
4327 cb_data[i]->zcd_expected_err = ECANCELED;
4328 VERIFY(!cb_data[i]->zcd_called);
4333 for (i = 0; i < 2; i++) {
4334 VERIFY(cb_data[i]->zcd_called);
4335 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4341 cb_data[2] = ztest_create_cb_data(os, txg);
4342 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4345 * Read existing data to make sure there isn't a future leak.
4347 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4348 &old_txg, DMU_READ_PREFETCH));
4351 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4354 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4356 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4359 * Since commit callbacks don't have any ordering requirement and since
4360 * it is theoretically possible for a commit callback to be called
4361 * after an arbitrary amount of time has elapsed since its txg has been
4362 * synced, it is difficult to reliably determine whether a commit
4363 * callback hasn't been called due to high load or due to a flawed
4366 * In practice, we will assume that if after a certain number of txgs a
4367 * commit callback hasn't been called, then most likely there's an
4368 * implementation bug..
4370 tmp_cb = list_head(&zcl.zcl_callbacks);
4371 if (tmp_cb != NULL &&
4372 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
4373 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4374 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4378 * Let's find the place to insert our callbacks.
4380 * Even though the list is ordered by txg, it is possible for the
4381 * insertion point to not be the end because our txg may already be
4382 * quiescing at this point and other callbacks in the open txg
4383 * (from other objsets) may have sneaked in.
4385 tmp_cb = list_tail(&zcl.zcl_callbacks);
4386 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4387 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4389 /* Add the 3 callbacks to the list */
4390 for (i = 0; i < 3; i++) {
4392 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4394 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4397 cb_data[i]->zcd_added = B_TRUE;
4398 VERIFY(!cb_data[i]->zcd_called);
4400 tmp_cb = cb_data[i];
4403 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4410 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4412 zfs_prop_t proplist[] = {
4414 ZFS_PROP_COMPRESSION,
4419 (void) rw_rdlock(&ztest_name_lock);
4421 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4422 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4423 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4425 (void) rw_unlock(&ztest_name_lock);
4430 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4432 nvlist_t *props = NULL;
4434 (void) rw_rdlock(&ztest_name_lock);
4436 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4437 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4439 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0);
4441 if (ztest_opts.zo_verbose >= 6)
4442 dump_nvlist(props, 4);
4446 (void) rw_unlock(&ztest_name_lock);
4450 * Test snapshot hold/release and deferred destroy.
4453 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4456 objset_t *os = zd->zd_os;
4460 char clonename[100];
4462 char osname[MAXNAMELEN];
4464 (void) rw_rdlock(&ztest_name_lock);
4466 dmu_objset_name(os, osname);
4468 (void) snprintf(snapname, 100, "sh1_%llu", id);
4469 (void) snprintf(fullname, 100, "%s@%s", osname, snapname);
4470 (void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id);
4471 (void) snprintf(tag, 100, "%tag_%llu", id);
4474 * Clean up from any previous run.
4476 (void) dmu_objset_destroy(clonename, B_FALSE);
4477 (void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4478 (void) dmu_objset_destroy(fullname, B_FALSE);
4481 * Create snapshot, clone it, mark snap for deferred destroy,
4482 * destroy clone, verify snap was also destroyed.
4484 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4487 if (error == ENOSPC) {
4488 ztest_record_enospc("dmu_objset_snapshot");
4491 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4494 error = dmu_objset_hold(fullname, FTAG, &origin);
4496 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4498 error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
4499 dmu_objset_rele(origin, FTAG);
4501 if (error == ENOSPC) {
4502 ztest_record_enospc("dmu_objset_clone");
4505 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4508 error = dmu_objset_destroy(fullname, B_TRUE);
4510 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4514 error = dmu_objset_destroy(clonename, B_FALSE);
4516 fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
4518 error = dmu_objset_hold(fullname, FTAG, &origin);
4519 if (error != ENOENT)
4520 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4523 * Create snapshot, add temporary hold, verify that we can't
4524 * destroy a held snapshot, mark for deferred destroy,
4525 * release hold, verify snapshot was destroyed.
4527 error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
4530 if (error == ENOSPC) {
4531 ztest_record_enospc("dmu_objset_snapshot");
4534 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4537 error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
4540 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4542 error = dmu_objset_destroy(fullname, B_FALSE);
4543 if (error != EBUSY) {
4544 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4548 error = dmu_objset_destroy(fullname, B_TRUE);
4550 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4554 error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
4556 fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
4558 VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
4561 (void) rw_unlock(&ztest_name_lock);
4565 * Inject random faults into the on-disk data.
4569 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4571 ztest_shared_t *zs = ztest_shared;
4572 spa_t *spa = ztest_spa;
4576 uint64_t bad = 0x1990c0ffeedecadeULL;
4578 char path0[MAXPATHLEN];
4579 char pathrand[MAXPATHLEN];
4581 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4587 boolean_t islog = B_FALSE;
4589 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4590 maxfaults = MAXFAULTS();
4591 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4592 mirror_save = zs->zs_mirrors;
4593 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4595 ASSERT(leaves >= 1);
4598 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4600 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4602 if (ztest_random(2) == 0) {
4604 * Inject errors on a normal data device or slog device.
4606 top = ztest_random_vdev_top(spa, B_TRUE);
4607 leaf = ztest_random(leaves) + zs->zs_splits;
4610 * Generate paths to the first leaf in this top-level vdev,
4611 * and to the random leaf we selected. We'll induce transient
4612 * write failures and random online/offline activity on leaf 0,
4613 * and we'll write random garbage to the randomly chosen leaf.
4615 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4616 ztest_opts.zo_dir, ztest_opts.zo_pool,
4617 top * leaves + zs->zs_splits);
4618 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4619 ztest_opts.zo_dir, ztest_opts.zo_pool,
4620 top * leaves + leaf);
4622 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4623 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4626 if (vd0 != NULL && maxfaults != 1) {
4628 * Make vd0 explicitly claim to be unreadable,
4629 * or unwriteable, or reach behind its back
4630 * and close the underlying fd. We can do this if
4631 * maxfaults == 0 because we'll fail and reexecute,
4632 * and we can do it if maxfaults >= 2 because we'll
4633 * have enough redundancy. If maxfaults == 1, the
4634 * combination of this with injection of random data
4635 * corruption below exceeds the pool's fault tolerance.
4637 vdev_file_t *vf = vd0->vdev_tsd;
4639 if (vf != NULL && ztest_random(3) == 0) {
4640 (void) close(vf->vf_vnode->v_fd);
4641 vf->vf_vnode->v_fd = -1;
4642 } else if (ztest_random(2) == 0) {
4643 vd0->vdev_cant_read = B_TRUE;
4645 vd0->vdev_cant_write = B_TRUE;
4647 guid0 = vd0->vdev_guid;
4651 * Inject errors on an l2cache device.
4653 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4655 if (sav->sav_count == 0) {
4656 spa_config_exit(spa, SCL_STATE, FTAG);
4659 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4660 guid0 = vd0->vdev_guid;
4661 (void) strcpy(path0, vd0->vdev_path);
4662 (void) strcpy(pathrand, vd0->vdev_path);
4666 maxfaults = INT_MAX; /* no limit on cache devices */
4669 spa_config_exit(spa, SCL_STATE, FTAG);
4672 * If we can tolerate two or more faults, or we're dealing
4673 * with a slog, randomly online/offline vd0.
4675 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4676 if (ztest_random(10) < 6) {
4677 int flags = (ztest_random(2) == 0 ?
4678 ZFS_OFFLINE_TEMPORARY : 0);
4681 * We have to grab the zs_name_lock as writer to
4682 * prevent a race between offlining a slog and
4683 * destroying a dataset. Offlining the slog will
4684 * grab a reference on the dataset which may cause
4685 * dmu_objset_destroy() to fail with EBUSY thus
4686 * leaving the dataset in an inconsistent state.
4689 (void) rw_wrlock(&ztest_name_lock);
4691 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4694 (void) rw_unlock(&ztest_name_lock);
4696 (void) vdev_online(spa, guid0, 0, NULL);
4704 * We have at least single-fault tolerance, so inject data corruption.
4706 fd = open(pathrand, O_RDWR);
4708 if (fd == -1) /* we hit a gap in the device namespace */
4711 fsize = lseek(fd, 0, SEEK_END);
4713 while (--iters != 0) {
4714 offset = ztest_random(fsize / (leaves << bshift)) *
4715 (leaves << bshift) + (leaf << bshift) +
4716 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4718 if (offset >= fsize)
4721 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4722 if (mirror_save != zs->zs_mirrors) {
4723 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4728 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4729 fatal(1, "can't inject bad word at 0x%llx in %s",
4732 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4734 if (ztest_opts.zo_verbose >= 7)
4735 (void) printf("injected bad word into %s,"
4736 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4743 * Verify that DDT repair works as expected.
4746 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4748 ztest_shared_t *zs = ztest_shared;
4749 spa_t *spa = ztest_spa;
4750 objset_t *os = zd->zd_os;
4752 uint64_t object, blocksize, txg, pattern, psize;
4753 enum zio_checksum checksum = spa_dedup_checksum(spa);
4758 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4760 blocksize = ztest_random_blocksize();
4761 blocksize = MIN(blocksize, 2048); /* because we write so many */
4763 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4765 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4769 * Take the name lock as writer to prevent anyone else from changing
4770 * the pool and dataset properies we need to maintain during this test.
4772 (void) rw_wrlock(&ztest_name_lock);
4774 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4776 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4778 (void) rw_unlock(&ztest_name_lock);
4782 object = od[0].od_object;
4783 blocksize = od[0].od_blocksize;
4784 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4786 ASSERT(object != 0);
4788 tx = dmu_tx_create(os);
4789 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4790 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4792 (void) rw_unlock(&ztest_name_lock);
4797 * Write all the copies of our block.
4799 for (int i = 0; i < copies; i++) {
4800 uint64_t offset = i * blocksize;
4801 VERIFY(dmu_buf_hold(os, object, offset, FTAG, &db,
4802 DMU_READ_NO_PREFETCH) == 0);
4803 ASSERT(db->db_offset == offset);
4804 ASSERT(db->db_size == blocksize);
4805 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4806 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4807 dmu_buf_will_fill(db, tx);
4808 ztest_pattern_set(db->db_data, db->db_size, pattern);
4809 dmu_buf_rele(db, FTAG);
4813 txg_wait_synced(spa_get_dsl(spa), txg);
4816 * Find out what block we got.
4818 VERIFY(dmu_buf_hold(os, object, 0, FTAG, &db,
4819 DMU_READ_NO_PREFETCH) == 0);
4820 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4821 dmu_buf_rele(db, FTAG);
4824 * Damage the block. Dedup-ditto will save us when we read it later.
4826 psize = BP_GET_PSIZE(&blk);
4827 buf = zio_buf_alloc(psize);
4828 ztest_pattern_set(buf, psize, ~pattern);
4830 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
4831 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
4832 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
4834 zio_buf_free(buf, psize);
4836 (void) rw_unlock(&ztest_name_lock);
4844 ztest_scrub(ztest_ds_t *zd, uint64_t id)
4846 spa_t *spa = ztest_spa;
4848 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4849 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
4850 (void) spa_scan(spa, POOL_SCAN_SCRUB);
4854 * Change the guid for the pool.
4858 ztest_reguid(ztest_ds_t *zd, uint64_t id)
4860 spa_t *spa = ztest_spa;
4861 uint64_t orig, load;
4863 orig = spa_guid(spa);
4864 load = spa_load_guid(spa);
4865 if (spa_change_guid(spa) != 0)
4868 if (ztest_opts.zo_verbose >= 3) {
4869 (void) printf("Changed guid old %llu -> %llu\n",
4870 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
4873 VERIFY3U(orig, !=, spa_guid(spa));
4874 VERIFY3U(load, ==, spa_load_guid(spa));
4878 * Rename the pool to a different name and then rename it back.
4882 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4884 char *oldname, *newname;
4887 (void) rw_wrlock(&ztest_name_lock);
4889 oldname = ztest_opts.zo_pool;
4890 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4891 (void) strcpy(newname, oldname);
4892 (void) strcat(newname, "_tmp");
4897 VERIFY3U(0, ==, spa_rename(oldname, newname));
4900 * Try to open it under the old name, which shouldn't exist
4902 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4905 * Open it under the new name and make sure it's still the same spa_t.
4907 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
4909 ASSERT(spa == ztest_spa);
4910 spa_close(spa, FTAG);
4913 * Rename it back to the original
4915 VERIFY3U(0, ==, spa_rename(newname, oldname));
4918 * Make sure it can still be opened
4920 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4922 ASSERT(spa == ztest_spa);
4923 spa_close(spa, FTAG);
4925 umem_free(newname, strlen(newname) + 1);
4927 (void) rw_unlock(&ztest_name_lock);
4931 * Verify pool integrity by running zdb.
4934 ztest_run_zdb(char *pool)
4937 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4945 strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb));
4947 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
4948 bin = strstr(zdb, "/usr/bin/");
4949 ztest = strstr(bin, "/ztest");
4951 isalen = ztest - isa;
4955 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
4958 ztest_opts.zo_verbose >= 3 ? "s" : "",
4959 ztest_opts.zo_verbose >= 4 ? "v" : "",
4964 if (ztest_opts.zo_verbose >= 5)
4965 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
4967 fp = popen(zdb, "r");
4970 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
4971 if (ztest_opts.zo_verbose >= 3)
4972 (void) printf("%s", zbuf);
4974 status = pclose(fp);
4979 ztest_dump_core = 0;
4980 if (WIFEXITED(status))
4981 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
4983 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
4987 ztest_walk_pool_directory(char *header)
4991 if (ztest_opts.zo_verbose >= 6)
4992 (void) printf("%s\n", header);
4994 mutex_enter(&spa_namespace_lock);
4995 while ((spa = spa_next(spa)) != NULL)
4996 if (ztest_opts.zo_verbose >= 6)
4997 (void) printf("\t%s\n", spa_name(spa));
4998 mutex_exit(&spa_namespace_lock);
5002 ztest_spa_import_export(char *oldname, char *newname)
5004 nvlist_t *config, *newconfig;
5008 if (ztest_opts.zo_verbose >= 4) {
5009 (void) printf("import/export: old = %s, new = %s\n",
5014 * Clean up from previous runs.
5016 (void) spa_destroy(newname);
5019 * Get the pool's configuration and guid.
5021 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5024 * Kick off a scrub to tickle scrub/export races.
5026 if (ztest_random(2) == 0)
5027 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5029 pool_guid = spa_guid(spa);
5030 spa_close(spa, FTAG);
5032 ztest_walk_pool_directory("pools before export");
5037 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5039 ztest_walk_pool_directory("pools after export");
5044 newconfig = spa_tryimport(config);
5045 ASSERT(newconfig != NULL);
5046 nvlist_free(newconfig);
5049 * Import it under the new name.
5051 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5053 ztest_walk_pool_directory("pools after import");
5056 * Try to import it again -- should fail with EEXIST.
5058 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5061 * Try to import it under a different name -- should fail with EEXIST.
5063 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5066 * Verify that the pool is no longer visible under the old name.
5068 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5071 * Verify that we can open and close the pool using the new name.
5073 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5074 ASSERT(pool_guid == spa_guid(spa));
5075 spa_close(spa, FTAG);
5077 nvlist_free(config);
5081 ztest_resume(spa_t *spa)
5083 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5084 (void) printf("resuming from suspended state\n");
5085 spa_vdev_state_enter(spa, SCL_NONE);
5086 vdev_clear(spa, NULL);
5087 (void) spa_vdev_state_exit(spa, NULL, 0);
5088 (void) zio_resume(spa);
5092 ztest_resume_thread(void *arg)
5096 while (!ztest_exiting) {
5097 if (spa_suspended(spa))
5099 (void) poll(NULL, 0, 100);
5105 ztest_deadman_thread(void *arg)
5107 ztest_shared_t *zs = arg;
5111 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5113 (void) poll(NULL, 0, (int)(1000 * delta));
5115 fatal(0, "failed to complete within %d seconds of deadline", grace);
5121 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5123 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5124 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5125 hrtime_t functime = gethrtime();
5127 for (int i = 0; i < zi->zi_iters; i++)
5128 zi->zi_func(zd, id);
5130 functime = gethrtime() - functime;
5132 atomic_add_64(&zc->zc_count, 1);
5133 atomic_add_64(&zc->zc_time, functime);
5135 if (ztest_opts.zo_verbose >= 4) {
5137 (void) dladdr((void *)zi->zi_func, &dli);
5138 (void) printf("%6.2f sec in %s\n",
5139 (double)functime / NANOSEC, dli.dli_sname);
5144 ztest_thread(void *arg)
5147 uint64_t id = (uintptr_t)arg;
5148 ztest_shared_t *zs = ztest_shared;
5152 ztest_shared_callstate_t *zc;
5154 while ((now = gethrtime()) < zs->zs_thread_stop) {
5156 * See if it's time to force a crash.
5158 if (now > zs->zs_thread_kill)
5162 * If we're getting ENOSPC with some regularity, stop.
5164 if (zs->zs_enospc_count > 10)
5168 * Pick a random function to execute.
5170 rand = ztest_random(ZTEST_FUNCS);
5171 zi = &ztest_info[rand];
5172 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5173 call_next = zc->zc_next;
5175 if (now >= call_next &&
5176 atomic_cas_64(&zc->zc_next, call_next, call_next +
5177 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5178 ztest_execute(rand, zi, id);
5186 ztest_dataset_name(char *dsname, char *pool, int d)
5188 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5192 ztest_dataset_destroy(int d)
5194 char name[MAXNAMELEN];
5196 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5198 if (ztest_opts.zo_verbose >= 3)
5199 (void) printf("Destroying %s to free up space\n", name);
5202 * Cleanup any non-standard clones and snapshots. In general,
5203 * ztest thread t operates on dataset (t % zopt_datasets),
5204 * so there may be more than one thing to clean up.
5206 for (int t = d; t < ztest_opts.zo_threads;
5207 t += ztest_opts.zo_datasets) {
5208 ztest_dsl_dataset_cleanup(name, t);
5211 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5212 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5216 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5218 uint64_t usedobjs, dirobjs, scratch;
5221 * ZTEST_DIROBJ is the object directory for the entire dataset.
5222 * Therefore, the number of objects in use should equal the
5223 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5224 * If not, we have an object leak.
5226 * Note that we can only check this in ztest_dataset_open(),
5227 * when the open-context and syncing-context values agree.
5228 * That's because zap_count() returns the open-context value,
5229 * while dmu_objset_space() returns the rootbp fill count.
5231 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5232 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5233 ASSERT3U(dirobjs + 1, ==, usedobjs);
5237 ztest_dataset_open(int d)
5239 ztest_ds_t *zd = &ztest_ds[d];
5240 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5243 char name[MAXNAMELEN];
5246 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5248 (void) rw_rdlock(&ztest_name_lock);
5250 error = ztest_dataset_create(name);
5251 if (error == ENOSPC) {
5252 (void) rw_unlock(&ztest_name_lock);
5253 ztest_record_enospc(FTAG);
5256 ASSERT(error == 0 || error == EEXIST);
5258 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5259 (void) rw_unlock(&ztest_name_lock);
5261 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5263 zilog = zd->zd_zilog;
5265 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5266 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5267 fatal(0, "missing log records: claimed %llu < committed %llu",
5268 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5270 ztest_dataset_dirobj_verify(zd);
5272 zil_replay(os, zd, ztest_replay_vector);
5274 ztest_dataset_dirobj_verify(zd);
5276 if (ztest_opts.zo_verbose >= 6)
5277 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5279 (u_longlong_t)zilog->zl_parse_blk_count,
5280 (u_longlong_t)zilog->zl_parse_lr_count,
5281 (u_longlong_t)zilog->zl_replaying_seq);
5283 zilog = zil_open(os, ztest_get_data);
5285 if (zilog->zl_replaying_seq != 0 &&
5286 zilog->zl_replaying_seq < committed_seq)
5287 fatal(0, "missing log records: replayed %llu < committed %llu",
5288 zilog->zl_replaying_seq, committed_seq);
5294 ztest_dataset_close(int d)
5296 ztest_ds_t *zd = &ztest_ds[d];
5298 zil_close(zd->zd_zilog);
5299 dmu_objset_rele(zd->zd_os, zd);
5305 * Kick off threads to run tests on all datasets in parallel.
5308 ztest_run(ztest_shared_t *zs)
5313 thread_t resume_tid;
5316 ztest_exiting = B_FALSE;
5319 * Initialize parent/child shared state.
5321 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5322 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5324 zs->zs_thread_start = gethrtime();
5325 zs->zs_thread_stop =
5326 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5327 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5328 zs->zs_thread_kill = zs->zs_thread_stop;
5329 if (ztest_random(100) < ztest_opts.zo_killrate) {
5330 zs->zs_thread_kill -=
5331 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5334 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5336 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5337 offsetof(ztest_cb_data_t, zcd_node));
5342 kernel_init(FREAD | FWRITE);
5343 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5344 spa->spa_debug = B_TRUE;
5347 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5348 zs->zs_guid = dmu_objset_fsid_guid(os);
5349 dmu_objset_rele(os, FTAG);
5351 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5354 * We don't expect the pool to suspend unless maxfaults == 0,
5355 * in which case ztest_fault_inject() temporarily takes away
5356 * the only valid replica.
5358 if (MAXFAULTS() == 0)
5359 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5361 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5364 * Create a thread to periodically resume suspended I/O.
5366 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5370 * Create a deadman thread to abort() if we hang.
5372 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5376 * Verify that we can safely inquire about about any object,
5377 * whether it's allocated or not. To make it interesting,
5378 * we probe a 5-wide window around each power of two.
5379 * This hits all edge cases, including zero and the max.
5381 for (int t = 0; t < 64; t++) {
5382 for (int d = -5; d <= 5; d++) {
5383 error = dmu_object_info(spa->spa_meta_objset,
5384 (1ULL << t) + d, NULL);
5385 ASSERT(error == 0 || error == ENOENT ||
5391 * If we got any ENOSPC errors on the previous run, destroy something.
5393 if (zs->zs_enospc_count != 0) {
5394 int d = ztest_random(ztest_opts.zo_datasets);
5395 ztest_dataset_destroy(d);
5397 zs->zs_enospc_count = 0;
5399 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5402 if (ztest_opts.zo_verbose >= 4)
5403 (void) printf("starting main threads...\n");
5406 * Kick off all the tests that run in parallel.
5408 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5409 if (t < ztest_opts.zo_datasets &&
5410 ztest_dataset_open(t) != 0)
5412 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5413 THR_BOUND, &tid[t]) == 0);
5417 * Wait for all of the tests to complete. We go in reverse order
5418 * so we don't close datasets while threads are still using them.
5420 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5421 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5422 if (t < ztest_opts.zo_datasets)
5423 ztest_dataset_close(t);
5426 txg_wait_synced(spa_get_dsl(spa), 0);
5428 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5429 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5431 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5433 /* Kill the resume thread */
5434 ztest_exiting = B_TRUE;
5435 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5439 * Right before closing the pool, kick off a bunch of async I/O;
5440 * spa_close() should wait for it to complete.
5442 for (uint64_t object = 1; object < 50; object++)
5443 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5445 spa_close(spa, FTAG);
5448 * Verify that we can loop over all pools.
5450 mutex_enter(&spa_namespace_lock);
5451 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5452 if (ztest_opts.zo_verbose > 3)
5453 (void) printf("spa_next: found %s\n", spa_name(spa));
5454 mutex_exit(&spa_namespace_lock);
5457 * Verify that we can export the pool and reimport it under a
5460 if (ztest_random(2) == 0) {
5461 char name[MAXNAMELEN];
5462 (void) snprintf(name, MAXNAMELEN, "%s_import",
5463 ztest_opts.zo_pool);
5464 ztest_spa_import_export(ztest_opts.zo_pool, name);
5465 ztest_spa_import_export(name, ztest_opts.zo_pool);
5470 list_destroy(&zcl.zcl_callbacks);
5472 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5474 (void) rwlock_destroy(&ztest_name_lock);
5475 (void) _mutex_destroy(&ztest_vdev_lock);
5481 ztest_ds_t *zd = &ztest_ds[0];
5485 if (ztest_opts.zo_verbose >= 3)
5486 (void) printf("testing spa_freeze()...\n");
5488 kernel_init(FREAD | FWRITE);
5489 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5490 VERIFY3U(0, ==, ztest_dataset_open(0));
5493 * Force the first log block to be transactionally allocated.
5494 * We have to do this before we freeze the pool -- otherwise
5495 * the log chain won't be anchored.
5497 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5498 ztest_dmu_object_alloc_free(zd, 0);
5499 zil_commit(zd->zd_zilog, 0);
5502 txg_wait_synced(spa_get_dsl(spa), 0);
5505 * Freeze the pool. This stops spa_sync() from doing anything,
5506 * so that the only way to record changes from now on is the ZIL.
5511 * Run tests that generate log records but don't alter the pool config
5512 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5513 * We do a txg_wait_synced() after each iteration to force the txg
5514 * to increase well beyond the last synced value in the uberblock.
5515 * The ZIL should be OK with that.
5517 while (ztest_random(10) != 0 &&
5518 numloops++ < ztest_opts.zo_maxloops) {
5519 ztest_dmu_write_parallel(zd, 0);
5520 ztest_dmu_object_alloc_free(zd, 0);
5521 txg_wait_synced(spa_get_dsl(spa), 0);
5525 * Commit all of the changes we just generated.
5527 zil_commit(zd->zd_zilog, 0);
5528 txg_wait_synced(spa_get_dsl(spa), 0);
5531 * Close our dataset and close the pool.
5533 ztest_dataset_close(0);
5534 spa_close(spa, FTAG);
5538 * Open and close the pool and dataset to induce log replay.
5540 kernel_init(FREAD | FWRITE);
5541 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5542 VERIFY3U(0, ==, ztest_dataset_open(0));
5543 ztest_dataset_close(0);
5544 spa_close(spa, FTAG);
5549 print_time(hrtime_t t, char *timebuf)
5551 hrtime_t s = t / NANOSEC;
5552 hrtime_t m = s / 60;
5553 hrtime_t h = m / 60;
5554 hrtime_t d = h / 24;
5563 (void) sprintf(timebuf,
5564 "%llud%02lluh%02llum%02llus", d, h, m, s);
5566 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5568 (void) sprintf(timebuf, "%llum%02llus", m, s);
5570 (void) sprintf(timebuf, "%llus", s);
5578 if (ztest_random(2) == 0)
5581 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5582 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5588 * Create a storage pool with the given name and initial vdev size.
5589 * Then test spa_freeze() functionality.
5592 ztest_init(ztest_shared_t *zs)
5595 nvlist_t *nvroot, *props;
5597 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5598 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5600 kernel_init(FREAD | FWRITE);
5603 * Create the storage pool.
5605 (void) spa_destroy(ztest_opts.zo_pool);
5606 ztest_shared->zs_vdev_next_leaf = 0;
5608 zs->zs_mirrors = ztest_opts.zo_mirrors;
5609 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5610 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5611 props = make_random_props();
5612 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props,
5614 nvlist_free(nvroot);
5616 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5617 zs->zs_metaslab_sz =
5618 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5619 spa_close(spa, FTAG);
5623 ztest_run_zdb(ztest_opts.zo_pool);
5627 ztest_run_zdb(ztest_opts.zo_pool);
5629 (void) rwlock_destroy(&ztest_name_lock);
5630 (void) _mutex_destroy(&ztest_vdev_lock);
5639 char *tmp = tempnam(NULL, NULL);
5640 fd = open(tmp, O_RDWR | O_CREAT, 0700);
5641 ASSERT3U(fd, ==, ZTEST_FD_DATA);
5645 char tmp[MAXPATHLEN];
5647 strlcpy(tmp, ztest_opts.zo_dir, MAXPATHLEN);
5648 strlcat(tmp, "/ztest.XXXXXX", MAXPATHLEN);
5650 ASSERT3U(fd, ==, ZTEST_FD_DATA);
5653 fd = open("/dev/urandom", O_RDONLY);
5654 ASSERT3U(fd, ==, ZTEST_FD_RAND);
5660 ztest_shared_hdr_t *hdr;
5663 pwrite(ZTEST_FD_DATA, "", 1, 0);
5666 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5667 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5668 ASSERT(hdr != MAP_FAILED);
5670 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5671 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5672 hdr->zh_size = sizeof (ztest_shared_t);
5673 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5674 hdr->zh_stats_count = ZTEST_FUNCS;
5675 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5676 hdr->zh_ds_count = ztest_opts.zo_datasets;
5678 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5685 ztest_shared_hdr_t *hdr;
5688 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5689 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
5690 ASSERT(hdr != MAP_FAILED);
5692 size = hdr->zh_hdr_size;
5693 size += hdr->zh_opts_size;
5694 size += hdr->zh_size;
5695 size += hdr->zh_stats_size * hdr->zh_stats_count;
5696 size += hdr->zh_ds_size * hdr->zh_ds_count;
5698 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5699 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5700 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5701 ASSERT(hdr != MAP_FAILED);
5702 buf = (uint8_t *)hdr;
5704 offset = hdr->zh_hdr_size;
5705 ztest_shared_opts = (void *)&buf[offset];
5706 offset += hdr->zh_opts_size;
5707 ztest_shared = (void *)&buf[offset];
5708 offset += hdr->zh_size;
5709 ztest_shared_callstate = (void *)&buf[offset];
5710 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5711 ztest_shared_ds = (void *)&buf[offset];
5715 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5719 char cmdbuf[MAXPATHLEN];
5724 (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf));
5729 fatal(1, "fork failed");
5731 if (pid == 0) { /* child */
5732 char *emptyargv[2] = { cmd, NULL };
5734 struct rlimit rl = { 1024, 1024 };
5735 (void) setrlimit(RLIMIT_NOFILE, &rl);
5736 (void) enable_extended_FILE_stdio(-1, -1);
5737 if (libpath != NULL)
5738 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5740 (void) execv(cmd, emptyargv);
5742 (void) execvp(cmd, emptyargv);
5744 ztest_dump_core = B_FALSE;
5745 fatal(B_TRUE, "exec failed: %s", cmd);
5748 while (waitpid(pid, &status, 0) != pid)
5750 if (statusp != NULL)
5753 if (WIFEXITED(status)) {
5754 if (WEXITSTATUS(status) != 0) {
5755 (void) fprintf(stderr, "child exited with code %d\n",
5756 WEXITSTATUS(status));
5760 } else if (WIFSIGNALED(status)) {
5761 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5762 (void) fprintf(stderr, "child died with signal %d\n",
5768 (void) fprintf(stderr, "something strange happened to child\n");
5775 ztest_run_init(void)
5777 ztest_shared_t *zs = ztest_shared;
5779 ASSERT(ztest_opts.zo_init != 0);
5782 * Blow away any existing copy of zpool.cache
5784 (void) remove(spa_config_path);
5787 * Create and initialize our storage pool.
5789 for (int i = 1; i <= ztest_opts.zo_init; i++) {
5790 bzero(zs, sizeof (ztest_shared_t));
5791 if (ztest_opts.zo_verbose >= 3 &&
5792 ztest_opts.zo_init != 1) {
5793 (void) printf("ztest_init(), pass %d\n", i);
5800 main(int argc, char **argv)
5808 ztest_shared_callstate_t *zc;
5812 char cmd[MAXNAMELEN];
5815 boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR));
5816 ASSERT(ischild || errno == EBADF);
5818 (void) setvbuf(stdout, NULL, _IOLBF, 0);
5821 process_options(argc, argv);
5826 bcopy(&ztest_opts, ztest_shared_opts,
5827 sizeof (*ztest_shared_opts));
5830 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
5832 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
5834 /* Override location of zpool.cache */
5835 (void) asprintf((char **)&spa_config_path, "%s/zpool.cache",
5838 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
5843 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
5844 metaslab_df_alloc_threshold =
5845 zs->zs_metaslab_df_alloc_threshold;
5854 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
5856 if (ztest_opts.zo_verbose >= 1) {
5857 (void) printf("%llu vdevs, %d datasets, %d threads,"
5858 " %llu seconds...\n",
5859 (u_longlong_t)ztest_opts.zo_vdevs,
5860 ztest_opts.zo_datasets,
5861 ztest_opts.zo_threads,
5862 (u_longlong_t)ztest_opts.zo_time);
5865 (void) strlcpy(cmd, getexecname(), sizeof (cmd));
5867 zs->zs_do_init = B_TRUE;
5868 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
5869 if (ztest_opts.zo_verbose >= 1) {
5870 (void) printf("Executing older ztest for "
5871 "initialization: %s\n", ztest_opts.zo_alt_ztest);
5873 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
5874 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
5876 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
5878 zs->zs_do_init = B_FALSE;
5880 zs->zs_proc_start = gethrtime();
5881 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
5883 for (int f = 0; f < ZTEST_FUNCS; f++) {
5884 zi = &ztest_info[f];
5885 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5886 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
5887 zc->zc_next = UINT64_MAX;
5889 zc->zc_next = zs->zs_proc_start +
5890 ztest_random(2 * zi->zi_interval[0] + 1);
5894 * Run the tests in a loop. These tests include fault injection
5895 * to verify that self-healing data works, and forced crashes
5896 * to verify that we never lose on-disk consistency.
5898 while (gethrtime() < zs->zs_proc_stop) {
5903 * Initialize the workload counters for each function.
5905 for (int f = 0; f < ZTEST_FUNCS; f++) {
5906 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5911 /* Set the allocation switch size */
5912 zs->zs_metaslab_df_alloc_threshold =
5913 ztest_random(zs->zs_metaslab_sz / 4) + 1;
5915 if (!hasalt || ztest_random(2) == 0) {
5916 if (hasalt && ztest_opts.zo_verbose >= 1) {
5917 (void) printf("Executing newer ztest: %s\n",
5921 killed = exec_child(cmd, NULL, B_TRUE, &status);
5923 if (hasalt && ztest_opts.zo_verbose >= 1) {
5924 (void) printf("Executing older ztest: %s\n",
5925 ztest_opts.zo_alt_ztest);
5928 killed = exec_child(ztest_opts.zo_alt_ztest,
5929 ztest_opts.zo_alt_libpath, B_TRUE, &status);
5936 if (ztest_opts.zo_verbose >= 1) {
5937 hrtime_t now = gethrtime();
5939 now = MIN(now, zs->zs_proc_stop);
5940 print_time(zs->zs_proc_stop - now, timebuf);
5941 nicenum(zs->zs_space, numbuf);
5943 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
5944 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
5946 WIFEXITED(status) ? "Complete" : "SIGKILL",
5947 (u_longlong_t)zs->zs_enospc_count,
5948 100.0 * zs->zs_alloc / zs->zs_space,
5950 100.0 * (now - zs->zs_proc_start) /
5951 (ztest_opts.zo_time * NANOSEC), timebuf);
5954 if (ztest_opts.zo_verbose >= 2) {
5955 (void) printf("\nWorkload summary:\n\n");
5956 (void) printf("%7s %9s %s\n",
5957 "Calls", "Time", "Function");
5958 (void) printf("%7s %9s %s\n",
5959 "-----", "----", "--------");
5960 for (int f = 0; f < ZTEST_FUNCS; f++) {
5963 zi = &ztest_info[f];
5964 zc = ZTEST_GET_SHARED_CALLSTATE(f);
5965 print_time(zc->zc_time, timebuf);
5966 (void) dladdr((void *)zi->zi_func, &dli);
5967 (void) printf("%7llu %9s %s\n",
5968 (u_longlong_t)zc->zc_count, timebuf,
5971 (void) printf("\n");
5975 * It's possible that we killed a child during a rename test,
5976 * in which case we'll have a 'ztest_tmp' pool lying around
5977 * instead of 'ztest'. Do a blind rename in case this happened.
5980 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
5981 spa_close(spa, FTAG);
5983 char tmpname[MAXNAMELEN];
5985 kernel_init(FREAD | FWRITE);
5986 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
5987 ztest_opts.zo_pool);
5988 (void) spa_rename(tmpname, ztest_opts.zo_pool);
5992 ztest_run_zdb(ztest_opts.zo_pool);
5995 if (ztest_opts.zo_verbose >= 1) {
5997 (void) printf("%d runs of older ztest: %s\n", older,
5998 ztest_opts.zo_alt_ztest);
5999 (void) printf("%d runs of newer ztest: %s\n", newer,
6002 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6003 kills, iters - kills, (100.0 * kills) / MAX(1, iters));