2 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * @ingroup failpoint_private
33 * @defgroup failpoint fail(9) Facility
35 * Failpoints allow for injecting fake errors into running code on the fly,
36 * without modifying code or recompiling with flags. Failpoints are always
37 * present, and are very efficient when disabled. Failpoints are described
41 * @defgroup failpoint_private Private fail(9) Implementation functions
43 * Private implementations for the actual failpoint code.
48 * @addtogroup failpoint_private
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
55 #include <sys/ctype.h>
56 #include <sys/errno.h>
58 #include <sys/kernel.h>
59 #include <sys/libkern.h>
60 #include <sys/limits.h>
62 #include <sys/malloc.h>
63 #include <sys/mutex.h>
66 #include <sys/sleepqueue.h>
68 #include <sys/sysctl.h>
69 #include <sys/types.h>
71 #include <machine/atomic.h>
72 #include <machine/stdarg.h>
74 #ifdef ILOG_DEFINE_FOR_FILE
75 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
78 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
79 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
80 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
81 #define fs_free(ptr) fp_free(ptr)
82 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
86 * These define the wchans that are used for sleeping, pausing respectively.
87 * They are chosen arbitrarily but need to be distinct to the failpoint and
88 * the sleep/pause distinction.
90 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
91 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
94 * Don't allow more than this many entries in a fail point set by sysctl.
95 * The 99.99...% case is to have 1 entry. I can't imagine having this many
96 * entries, so it should not limit us. Saves on re-mallocs while holding
97 * a non-sleepable lock.
99 #define FP_MAX_ENTRY_COUNT 20
101 /* Used to drain sbufs to the sysctl output */
102 int fail_sysctl_drain_func(void *, const char *, int);
104 /* Head of tailq of struct fail_point_entry */
105 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
108 * fp entries garbage list; outstanding entries are cleaned up in the
111 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
112 static struct fail_point_setting_garbage fp_setting_garbage =
113 STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
114 static struct mtx mtx_garbage_list;
115 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
118 static struct sx sx_fp_set;
119 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
123 * Don't change these without changing fail_type_strings in fail.c.
124 * @ingroup failpoint_private
127 FAIL_POINT_OFF, /**< don't fail */
128 FAIL_POINT_PANIC, /**< panic */
129 FAIL_POINT_RETURN, /**< return an errorcode */
130 FAIL_POINT_BREAK, /**< break into the debugger */
131 FAIL_POINT_PRINT, /**< print a message */
132 FAIL_POINT_SLEEP, /**< sleep for some msecs */
133 FAIL_POINT_PAUSE, /**< sleep until failpoint is set to off */
134 FAIL_POINT_YIELD, /**< yield the cpu */
135 FAIL_POINT_DELAY, /**< busy wait the cpu */
137 FAIL_POINT_INVALID = -1
143 } fail_type_strings[] = {
144 #define FP_TYPE_NM_LEN(s) { s, sizeof(s) - 1 }
145 [FAIL_POINT_OFF] = FP_TYPE_NM_LEN("off"),
146 [FAIL_POINT_PANIC] = FP_TYPE_NM_LEN("panic"),
147 [FAIL_POINT_RETURN] = FP_TYPE_NM_LEN("return"),
148 [FAIL_POINT_BREAK] = FP_TYPE_NM_LEN("break"),
149 [FAIL_POINT_PRINT] = FP_TYPE_NM_LEN("print"),
150 [FAIL_POINT_SLEEP] = FP_TYPE_NM_LEN("sleep"),
151 [FAIL_POINT_PAUSE] = FP_TYPE_NM_LEN("pause"),
152 [FAIL_POINT_YIELD] = FP_TYPE_NM_LEN("yield"),
153 [FAIL_POINT_DELAY] = FP_TYPE_NM_LEN("delay"),
156 #define FE_COUNT_UNTRACKED (INT_MIN)
159 * Internal structure tracking a single term of a complete failpoint.
160 * @ingroup failpoint_private
162 struct fail_point_entry {
163 volatile bool fe_stale;
164 enum fail_point_t fe_type; /**< type of entry */
165 int fe_arg; /**< argument to type (e.g. return value) */
166 int fe_prob; /**< likelihood of firing in millionths */
167 int fe_count; /**< number of times to fire, -1 means infinite */
168 pid_t fe_pid; /**< only fail for this process */
169 struct fail_point *fe_parent; /**< backpointer to fp */
170 TAILQ_ENTRY(fail_point_entry) fe_entries; /**< next entry ptr */
173 struct fail_point_setting {
174 STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
175 struct fail_point_entry_queue fp_entry_queue;
176 struct fail_point * fs_parent;
177 struct mtx feq_mtx; /* Gives fail_point_pause something to do. */
181 * Defines stating the equivalent of probablilty one (100%)
184 PROB_MAX = 1000000, /* probability between zero and this number */
185 PROB_DIGITS = 6 /* number of zero's in above number */
188 /* Get a ref on an fp's fp_setting */
189 static inline struct fail_point_setting *fail_point_setting_get_ref(
190 struct fail_point *fp);
191 /* Release a ref on an fp_setting */
192 static inline void fail_point_setting_release_ref(struct fail_point *fp);
193 /* Allocate and initialize a struct fail_point_setting */
194 static struct fail_point_setting *fail_point_setting_new(struct
196 /* Free a struct fail_point_setting */
197 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
198 /* Allocate and initialize a struct fail_point_entry */
199 static struct fail_point_entry *fail_point_entry_new(struct
200 fail_point_setting *);
201 /* Free a struct fail_point_entry */
202 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
203 /* Append fp setting to garbage list */
204 static inline void fail_point_setting_garbage_append(
205 struct fail_point_setting *fp_setting);
206 /* Swap fp's setting with fp_setting_new */
207 static inline struct fail_point_setting *
208 fail_point_swap_settings(struct fail_point *fp,
209 struct fail_point_setting *fp_setting_new);
210 /* Free up any zero-ref setting in the garbage queue */
211 static void fail_point_garbage_collect(void);
212 /* If this fail point's setting are empty, then swap it out to NULL. */
213 static inline void fail_point_eval_swap_out(struct fail_point *fp,
214 struct fail_point_setting *fp_setting);
217 fail_point_is_off(struct fail_point *fp)
220 struct fail_point_setting *fp_setting;
221 struct fail_point_entry *ent;
225 fp_setting = fail_point_setting_get_ref(fp);
226 if (fp_setting != NULL) {
227 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
229 if (!ent->fe_stale) {
235 fail_point_setting_release_ref(fp);
240 /* Allocate and initialize a struct fail_point_setting */
241 static struct fail_point_setting *
242 fail_point_setting_new(struct fail_point *fp)
244 struct fail_point_setting *fs_new;
246 fs_new = fs_malloc();
247 fs_new->fs_parent = fp;
248 TAILQ_INIT(&fs_new->fp_entry_queue);
249 mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
251 fail_point_setting_garbage_append(fs_new);
256 /* Free a struct fail_point_setting */
258 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
260 struct fail_point_entry *ent;
262 while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
263 ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
264 TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
265 fail_point_entry_destroy(ent);
271 /* Allocate and initialize a struct fail_point_entry */
272 static struct fail_point_entry *
273 fail_point_entry_new(struct fail_point_setting *fp_setting)
275 struct fail_point_entry *fp_entry;
277 fp_entry = fp_malloc(sizeof(struct fail_point_entry),
279 fp_entry->fe_parent = fp_setting->fs_parent;
280 fp_entry->fe_prob = PROB_MAX;
281 fp_entry->fe_pid = NO_PID;
282 fp_entry->fe_count = FE_COUNT_UNTRACKED;
283 TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
289 /* Free a struct fail_point_entry */
291 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
297 /* Get a ref on an fp's fp_setting */
298 static inline struct fail_point_setting *
299 fail_point_setting_get_ref(struct fail_point *fp)
301 struct fail_point_setting *fp_setting;
303 /* Invariant: if we have a ref, our pointer to fp_setting is safe */
304 atomic_add_acq_32(&fp->fp_ref_cnt, 1);
305 fp_setting = fp->fp_setting;
310 /* Release a ref on an fp_setting */
312 fail_point_setting_release_ref(struct fail_point *fp)
315 KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
316 atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
319 /* Append fp entries to fp garbage list */
321 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
324 mtx_lock_spin(&mtx_garbage_list);
325 STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
327 mtx_unlock_spin(&mtx_garbage_list);
330 /* Swap fp's entries with fp_setting_new */
331 static struct fail_point_setting *
332 fail_point_swap_settings(struct fail_point *fp,
333 struct fail_point_setting *fp_setting_new)
335 struct fail_point_setting *fp_setting_old;
337 fp_setting_old = fp->fp_setting;
338 fp->fp_setting = fp_setting_new;
340 return (fp_setting_old);
344 fail_point_eval_swap_out(struct fail_point *fp,
345 struct fail_point_setting *fp_setting)
348 /* We may have already been swapped out and replaced; ignore. */
349 if (fp->fp_setting == fp_setting)
350 fail_point_swap_settings(fp, NULL);
353 /* Free up any zero-ref entries in the garbage queue */
355 fail_point_garbage_collect()
357 struct fail_point_setting *fs_current, *fs_next;
358 struct fail_point_setting_garbage fp_ents_free_list;
361 * We will transfer the entries to free to fp_ents_free_list while holding
362 * the spin mutex, then free it after we drop the lock. This avoids
363 * triggering witness due to sleepable mutexes in the memory
366 STAILQ_INIT(&fp_ents_free_list);
368 mtx_lock_spin(&mtx_garbage_list);
369 STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
371 if (fs_current->fs_parent->fp_setting != fs_current &&
372 fs_current->fs_parent->fp_ref_cnt == 0) {
373 STAILQ_REMOVE(&fp_setting_garbage, fs_current,
374 fail_point_setting, fs_garbage_link);
375 STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
379 mtx_unlock_spin(&mtx_garbage_list);
381 STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
383 fail_point_setting_destroy(fs_current);
386 /* Drain out all refs from this fail point */
388 fail_point_drain(struct fail_point *fp, int expected_ref)
390 struct fail_point_setting *entries;
392 entries = fail_point_swap_settings(fp, NULL);
394 * We have unpaused all threads; so we will wait no longer
395 * than the time taken for the longest remaining sleep, or
396 * the length of time of a long-running code block.
398 while (fp->fp_ref_cnt > expected_ref) {
399 wakeup(FP_PAUSE_CHANNEL(fp));
400 tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
402 fail_point_swap_settings(fp, entries);
406 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
407 struct mtx *mtx_sleep)
410 if (fp->fp_pre_sleep_fn)
411 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
413 msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
415 if (fp->fp_post_sleep_fn)
416 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
420 fail_point_sleep(struct fail_point *fp, int msecs,
421 enum fail_point_return_code *pret)
425 /* Convert from millisecs to ticks, rounding up */
426 timo = howmany(msecs * hz, 1000);
429 if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
430 if (fp->fp_pre_sleep_fn)
431 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
433 tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
435 if (fp->fp_post_sleep_fn)
436 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
438 if (fp->fp_pre_sleep_fn)
439 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
441 timeout(fp->fp_post_sleep_fn, fp->fp_post_sleep_arg,
443 *pret = FAIL_POINT_RC_QUEUED;
448 static char *parse_fail_point(struct fail_point_setting *, char *);
449 static char *parse_term(struct fail_point_setting *, char *);
450 static char *parse_number(int *out_units, int *out_decimal, char *);
451 static char *parse_type(struct fail_point_entry *, char *);
454 * Initialize a fail_point. The name is formed in a printf-like fashion
455 * from "fmt" and subsequent arguments. This function is generally used
456 * for custom failpoints located at odd places in the sysctl tree, and is
457 * not explicitly needed for standard in-line-declared failpoints.
462 fail_point_init(struct fail_point *fp, const char *fmt, ...)
468 fp->fp_setting = NULL;
471 /* Figure out the size of the name. */
473 n = vsnprintf(NULL, 0, fmt, ap);
476 /* Allocate the name and fill it in. */
477 name = fp_malloc(n + 1, M_WAITOK);
480 vsnprintf(name, n + 1, fmt, ap);
484 fp->fp_location = "";
485 fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
486 fp->fp_pre_sleep_fn = NULL;
487 fp->fp_pre_sleep_arg = NULL;
488 fp->fp_post_sleep_fn = NULL;
489 fp->fp_post_sleep_arg = NULL;
493 * Free the resources held by a fail_point, and wake any paused threads.
494 * Thou shalt not allow threads to hit this fail point after you enter this
495 * function, nor shall you call this multiple times for a given fp.
499 fail_point_destroy(struct fail_point *fp)
502 fail_point_drain(fp, 0);
504 if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
505 fp_free(__DECONST(void *, fp->fp_name));
510 sx_xlock(&sx_fp_set);
511 fail_point_garbage_collect();
512 sx_xunlock(&sx_fp_set);
516 * This does the real work of evaluating a fail point. If the fail point tells
517 * us to return a value, this function returns 1 and fills in 'return_value'
518 * (return_value is allowed to be null). If the fail point tells us to panic,
519 * we never return. Otherwise we just return 0 after doing some work, which
520 * means "keep going".
522 enum fail_point_return_code
523 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
525 bool execute = false;
526 struct fail_point_entry *ent;
527 struct fail_point_setting *fp_setting;
528 enum fail_point_return_code ret;
534 ret = FAIL_POINT_RC_CONTINUE;
535 cont = 0; /* don't continue by default */
537 fp_setting = fail_point_setting_get_ref(fp);
538 if (fp_setting == NULL)
541 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
546 if (ent->fe_prob < PROB_MAX &&
547 ent->fe_prob < random() % PROB_MAX)
550 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
553 if (ent->fe_count != FE_COUNT_UNTRACKED) {
554 count = ent->fe_count;
556 if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
561 count = ent->fe_count;
563 if (execute == false)
564 /* We lost the race; consider the entry stale and bail now */
567 ent->fe_stale = true;
570 switch (ent->fe_type) {
571 case FAIL_POINT_PANIC:
572 panic("fail point %s panicking", fp->fp_name);
575 case FAIL_POINT_RETURN:
576 if (return_value != NULL)
577 *return_value = ent->fe_arg;
578 ret = FAIL_POINT_RC_RETURN;
581 case FAIL_POINT_BREAK:
582 printf("fail point %s breaking to debugger\n",
587 case FAIL_POINT_PRINT:
588 printf("fail point %s executing\n", fp->fp_name);
592 case FAIL_POINT_SLEEP:
595 fail_point_sleep(fp, msecs, &ret);
598 case FAIL_POINT_PAUSE:
600 * Pausing is inherently strange with multiple
601 * entries given our design. That is because some
602 * entries could be unreachable, for instance in cases like:
603 * pause->return. We can never reach the return entry.
604 * The sysctl layer actually truncates all entries after
605 * a pause for this reason.
607 mtx_lock_spin(&fp_setting->feq_mtx);
608 fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
609 mtx_unlock_spin(&fp_setting->feq_mtx);
612 case FAIL_POINT_YIELD:
616 case FAIL_POINT_DELAY:
629 if (fail_point_is_off(fp))
630 fail_point_eval_swap_out(fp, fp_setting);
633 fail_point_setting_release_ref(fp);
640 * Translate internal fail_point structure into human-readable text.
643 fail_point_get(struct fail_point *fp, struct sbuf *sb,
646 struct fail_point_entry *ent;
647 struct fail_point_setting *fp_setting;
648 struct fail_point_entry *fp_entry_cpy;
651 int printed_entry_count;
655 printed_entry_count = 0;
657 fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
658 (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
660 fp_setting = fail_point_setting_get_ref(fp);
662 if (fp_setting != NULL) {
663 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
667 KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
668 ("FP entry list larger than allowed"));
670 fp_entry_cpy[printed_entry_count] = *ent;
671 ++printed_entry_count;
674 fail_point_setting_release_ref(fp);
676 /* This is our equivalent of a NULL terminator */
677 fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
679 while (idx < printed_entry_count) {
680 ent = &fp_entry_cpy[idx];
682 if (ent->fe_prob < PROB_MAX) {
683 int decimal = ent->fe_prob % (PROB_MAX / 100);
684 int units = ent->fe_prob / (PROB_MAX / 100);
685 sbuf_printf(sb, "%d", units);
687 int digits = PROB_DIGITS - 2;
688 while (!(decimal % 10)) {
692 sbuf_printf(sb, ".%0*d", digits, decimal);
694 sbuf_printf(sb, "%%");
696 if (ent->fe_count >= 0)
697 sbuf_printf(sb, "%d*", ent->fe_count);
698 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
700 sbuf_printf(sb, "(%d)", ent->fe_arg);
701 if (ent->fe_pid != NO_PID)
702 sbuf_printf(sb, "[pid %d]", ent->fe_pid);
703 if (TAILQ_NEXT(ent, fe_entries))
704 sbuf_printf(sb, "->");
706 if (!printed_entry_count)
707 sbuf_printf(sb, "off");
709 fp_free(fp_entry_cpy);
711 /* Print number of sleeping threads. queue=0 is the argument
712 * used by msleep when sending our threads to sleep. */
713 sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
714 sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
717 sbuf_printf(sb, "},\n");
718 sbuf_printf(sb, "sleeping_thread_count = %d,\n",
721 sbuf_printf(sb, "paused_thread_stacks = {\n");
722 sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
725 sbuf_printf(sb, "},\n");
726 sbuf_printf(sb, "paused_thread_count = %d\n",
732 * Set an internal fail_point structure from a human-readable failpoint string
733 * in a lock-safe manner.
736 fail_point_set(struct fail_point *fp, char *buf)
738 struct fail_point_entry *ent, *ent_next;
739 struct fail_point_setting *entries;
740 bool should_wake_paused;
741 bool should_truncate;
745 should_wake_paused = false;
746 should_truncate = false;
748 /* Parse new entries. */
750 * ref protects our new malloc'd stuff from being garbage collected
753 fail_point_setting_get_ref(fp);
754 entries = fail_point_setting_new(fp);
755 if (parse_fail_point(entries, buf) == NULL) {
756 STAILQ_REMOVE(&fp_setting_garbage, entries,
757 fail_point_setting, fs_garbage_link);
758 fail_point_setting_destroy(entries);
764 * Transfer the entries we are going to keep to a new list.
765 * Get rid of useless zero probability entries, and entries with hit
767 * If 'off' is present, and it has no hit count set, then all entries
768 * after it are discarded since they are unreachable.
770 TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
771 if (ent->fe_prob == 0 || ent->fe_count == 0) {
772 printf("Discarding entry which cannot execute %s\n",
773 fail_type_strings[ent->fe_type].name);
774 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
778 } else if (should_truncate) {
779 printf("Discarding unreachable entry %s\n",
780 fail_type_strings[ent->fe_type].name);
781 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
787 if (ent->fe_type == FAIL_POINT_OFF) {
788 should_wake_paused = true;
789 if (ent->fe_count == FE_COUNT_UNTRACKED) {
790 should_truncate = true;
791 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
795 } else if (ent->fe_type == FAIL_POINT_PAUSE) {
796 should_truncate = true;
797 } else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
798 FAIL_POINT_NONSLEEPABLE)) {
800 * If this fail point is annotated as being in a
801 * non-sleepable ctx, convert sleep to delay and
802 * convert the msec argument to usecs.
804 printf("Sleep call request on fail point in "
805 "non-sleepable context; using delay instead "
807 ent->fe_type = FAIL_POINT_DELAY;
812 if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
813 entries = fail_point_swap_settings(fp, NULL);
815 wakeup(FP_PAUSE_CHANNEL(fp));
817 if (should_wake_paused)
818 wakeup(FP_PAUSE_CHANNEL(fp));
819 fail_point_swap_settings(fp, entries);
825 IWARNING("Failed to set %s %s to %s",
826 fp->fp_name, fp->fp_location, buf);
828 INOTICE("Set %s %s to %s",
829 fp->fp_name, fp->fp_location, buf);
830 #endif /* IWARNING */
832 fail_point_setting_release_ref(fp);
836 #define MAX_FAIL_POINT_BUF 1023
839 * Handle kernel failpoint set/get.
843 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
845 struct fail_point *fp;
847 struct sbuf *sb_check;
855 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
859 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
863 * Lock protects any new entries from being garbage collected before we
864 * can link them to the fail point.
866 sx_xlock(&sx_fp_set);
868 if (req->newlen > MAX_FAIL_POINT_BUF) {
873 buf = fp_malloc(req->newlen + 1, M_WAITOK);
875 error = SYSCTL_IN(req, buf, req->newlen);
878 buf[req->newlen] = '\0';
880 error = fail_point_set(fp, buf);
883 fail_point_garbage_collect();
884 sx_xunlock(&sx_fp_set);
887 fail_point_get(fp, &sb, false);
900 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
902 struct fail_point *fp;
903 struct sbuf sb, *sb_check;
907 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
911 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
914 fail_point_get(fp, &sb, true);
920 * Lock protects any new entries from being garbage collected before we
921 * can link them to the fail point.
923 sx_xlock(&sx_fp_set);
924 fail_point_garbage_collect();
925 sx_xunlock(&sx_fp_set);
931 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
933 struct sysctl_req *sa;
938 error = SYSCTL_OUT(sa, buf, len);
948 * Internal helper function to translate a human-readable failpoint string
949 * into a internally-parsable fail_point structure.
952 parse_fail_point(struct fail_point_setting *ents, char *p)
955 * <term> ( "->" <term> )*
961 p = parse_term(ents, p);
967 if (p[0] != '-' || p[1] != '>' ||
968 (p = parse_term(ents, p+2)) == NULL ||
969 term_count > FP_MAX_ENTRY_COUNT)
976 * Internal helper function to parse an individual term from a failpoint.
979 parse_term(struct fail_point_setting *ents, char *p)
981 struct fail_point_entry *ent;
983 ent = fail_point_entry_new(ents);
987 * ( (<float> "%") | (<integer> "*" ) )*
989 * [ "(" <integer> ")" ]
990 * [ "[pid " <integer> "]" ]
993 /* ( (<float> "%") | (<integer> "*" ) )* */
994 while (isdigit(*p) || *p == '.') {
997 p = parse_number(&units, &decimal, p);
1002 if (units > 100) /* prevent overflow early */
1004 ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1005 if (ent->fe_prob > PROB_MAX)
1006 ent->fe_prob = PROB_MAX;
1007 } else if (*p == '*') {
1008 if (!units || units < 0 || decimal)
1010 ent->fe_count = units;
1017 p = parse_type(ent, p);
1023 /* [ "(" <integer> ")" ] */
1027 if (!isdigit(*p) && *p != '-')
1029 ent->fe_arg = strtol(p, &p, 0);
1033 /* [ "[pid " <integer> "]" ] */
1034 #define PID_STRING "[pid "
1035 if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1037 p += sizeof(PID_STRING) - 1;
1040 ent->fe_pid = strtol(p, &p, 0);
1048 * Internal helper function to parse a numeric for a failpoint term.
1051 parse_number(int *out_units, int *out_decimal, char *p)
1057 * <integer> [ "." <integer> ] |
1063 *out_units = strtol(p, &p, 10);
1064 if (p == old_p && *p != '.')
1067 /* fractional part */
1072 while (isdigit(*p)) {
1073 int digit = *p - '0';
1074 if (digits < PROB_DIGITS - 2)
1075 *out_decimal = *out_decimal * 10 + digit;
1076 else if (digits == PROB_DIGITS - 2 && digit >= 5)
1081 if (!digits) /* need at least one digit after '.' */
1083 while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1087 return (p); /* success */
1091 * Internal helper function to parse an individual type for a failpoint term.
1094 parse_type(struct fail_point_entry *ent, char *beg)
1096 enum fail_point_t type;
1099 for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1100 len = fail_type_strings[type].nmlen;
1101 if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1102 ent->fe_type = type;
1109 /* The fail point sysctl tree. */
1110 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points");
1112 /* Debugging/testing stuff for fail point */
1114 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1117 KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1120 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1121 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_test_fail_point, "A",
1122 "Trigger test fail points");