2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @ingroup failpoint_private
35 * @defgroup failpoint fail(9) Facility
37 * Failpoints allow for injecting fake errors into running code on the fly,
38 * without modifying code or recompiling with flags. Failpoints are always
39 * present, and are very efficient when disabled. Failpoints are described
43 * @defgroup failpoint_private Private fail(9) Implementation functions
45 * Private implementations for the actual failpoint code.
50 * @addtogroup failpoint_private
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
57 #include "opt_stack.h"
59 #include <sys/ctype.h>
60 #include <sys/errno.h>
62 #include <sys/kernel.h>
63 #include <sys/libkern.h>
64 #include <sys/limits.h>
66 #include <sys/malloc.h>
67 #include <sys/mutex.h>
70 #include <sys/sleepqueue.h>
72 #include <sys/sysctl.h>
73 #include <sys/types.h>
75 #include <machine/atomic.h>
76 #include <machine/stdarg.h>
78 #ifdef ILOG_DEFINE_FOR_FILE
79 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
82 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
83 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
84 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
85 #define fs_free(ptr) fp_free(ptr)
86 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
90 * These define the wchans that are used for sleeping, pausing respectively.
91 * They are chosen arbitrarily but need to be distinct to the failpoint and
92 * the sleep/pause distinction.
94 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
95 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
98 * Don't allow more than this many entries in a fail point set by sysctl.
99 * The 99.99...% case is to have 1 entry. I can't imagine having this many
100 * entries, so it should not limit us. Saves on re-mallocs while holding
101 * a non-sleepable lock.
103 #define FP_MAX_ENTRY_COUNT 20
105 /* Used to drain sbufs to the sysctl output */
106 int fail_sysctl_drain_func(void *, const char *, int);
108 /* Head of tailq of struct fail_point_entry */
109 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
112 * fp entries garbage list; outstanding entries are cleaned up in the
115 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
116 static struct fail_point_setting_garbage fp_setting_garbage =
117 STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
118 static struct mtx mtx_garbage_list;
119 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
122 static struct sx sx_fp_set;
123 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
127 * Don't change these without changing fail_type_strings in fail.c.
128 * @ingroup failpoint_private
131 FAIL_POINT_OFF, /**< don't fail */
132 FAIL_POINT_PANIC, /**< panic */
133 FAIL_POINT_RETURN, /**< return an errorcode */
134 FAIL_POINT_BREAK, /**< break into the debugger */
135 FAIL_POINT_PRINT, /**< print a message */
136 FAIL_POINT_SLEEP, /**< sleep for some msecs */
137 FAIL_POINT_PAUSE, /**< sleep until failpoint is set to off */
138 FAIL_POINT_YIELD, /**< yield the cpu */
139 FAIL_POINT_DELAY, /**< busy wait the cpu */
141 FAIL_POINT_INVALID = -1
147 } fail_type_strings[] = {
148 #define FP_TYPE_NM_LEN(s) { s, sizeof(s) - 1 }
149 [FAIL_POINT_OFF] = FP_TYPE_NM_LEN("off"),
150 [FAIL_POINT_PANIC] = FP_TYPE_NM_LEN("panic"),
151 [FAIL_POINT_RETURN] = FP_TYPE_NM_LEN("return"),
152 [FAIL_POINT_BREAK] = FP_TYPE_NM_LEN("break"),
153 [FAIL_POINT_PRINT] = FP_TYPE_NM_LEN("print"),
154 [FAIL_POINT_SLEEP] = FP_TYPE_NM_LEN("sleep"),
155 [FAIL_POINT_PAUSE] = FP_TYPE_NM_LEN("pause"),
156 [FAIL_POINT_YIELD] = FP_TYPE_NM_LEN("yield"),
157 [FAIL_POINT_DELAY] = FP_TYPE_NM_LEN("delay"),
160 #define FE_COUNT_UNTRACKED (INT_MIN)
163 * Internal structure tracking a single term of a complete failpoint.
164 * @ingroup failpoint_private
166 struct fail_point_entry {
167 volatile bool fe_stale;
168 enum fail_point_t fe_type; /**< type of entry */
169 int fe_arg; /**< argument to type (e.g. return value) */
170 int fe_prob; /**< likelihood of firing in millionths */
171 int32_t fe_count; /**< number of times to fire, -1 means infinite */
172 pid_t fe_pid; /**< only fail for this process */
173 struct fail_point *fe_parent; /**< backpointer to fp */
174 TAILQ_ENTRY(fail_point_entry) fe_entries; /**< next entry ptr */
177 struct fail_point_setting {
178 STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
179 struct fail_point_entry_queue fp_entry_queue;
180 struct fail_point * fs_parent;
181 struct mtx feq_mtx; /* Gives fail_point_pause something to do. */
185 * Defines stating the equivalent of probablilty one (100%)
188 PROB_MAX = 1000000, /* probability between zero and this number */
189 PROB_DIGITS = 6 /* number of zero's in above number */
192 /* Get a ref on an fp's fp_setting */
193 static inline struct fail_point_setting *fail_point_setting_get_ref(
194 struct fail_point *fp);
195 /* Release a ref on an fp_setting */
196 static inline void fail_point_setting_release_ref(struct fail_point *fp);
197 /* Allocate and initialize a struct fail_point_setting */
198 static struct fail_point_setting *fail_point_setting_new(struct
200 /* Free a struct fail_point_setting */
201 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
202 /* Allocate and initialize a struct fail_point_entry */
203 static struct fail_point_entry *fail_point_entry_new(struct
204 fail_point_setting *);
205 /* Free a struct fail_point_entry */
206 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
207 /* Append fp setting to garbage list */
208 static inline void fail_point_setting_garbage_append(
209 struct fail_point_setting *fp_setting);
210 /* Swap fp's setting with fp_setting_new */
211 static inline struct fail_point_setting *
212 fail_point_swap_settings(struct fail_point *fp,
213 struct fail_point_setting *fp_setting_new);
214 /* Free up any zero-ref setting in the garbage queue */
215 static void fail_point_garbage_collect(void);
216 /* If this fail point's setting are empty, then swap it out to NULL. */
217 static inline void fail_point_eval_swap_out(struct fail_point *fp,
218 struct fail_point_setting *fp_setting);
221 fail_point_is_off(struct fail_point *fp)
224 struct fail_point_setting *fp_setting;
225 struct fail_point_entry *ent;
229 fp_setting = fail_point_setting_get_ref(fp);
230 if (fp_setting != NULL) {
231 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
233 if (!ent->fe_stale) {
239 fail_point_setting_release_ref(fp);
244 /* Allocate and initialize a struct fail_point_setting */
245 static struct fail_point_setting *
246 fail_point_setting_new(struct fail_point *fp)
248 struct fail_point_setting *fs_new;
250 fs_new = fs_malloc();
251 fs_new->fs_parent = fp;
252 TAILQ_INIT(&fs_new->fp_entry_queue);
253 mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
255 fail_point_setting_garbage_append(fs_new);
260 /* Free a struct fail_point_setting */
262 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
264 struct fail_point_entry *ent;
266 while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
267 ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
268 TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
269 fail_point_entry_destroy(ent);
275 /* Allocate and initialize a struct fail_point_entry */
276 static struct fail_point_entry *
277 fail_point_entry_new(struct fail_point_setting *fp_setting)
279 struct fail_point_entry *fp_entry;
281 fp_entry = fp_malloc(sizeof(struct fail_point_entry),
283 fp_entry->fe_parent = fp_setting->fs_parent;
284 fp_entry->fe_prob = PROB_MAX;
285 fp_entry->fe_pid = NO_PID;
286 fp_entry->fe_count = FE_COUNT_UNTRACKED;
287 TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
293 /* Free a struct fail_point_entry */
295 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
301 /* Get a ref on an fp's fp_setting */
302 static inline struct fail_point_setting *
303 fail_point_setting_get_ref(struct fail_point *fp)
305 struct fail_point_setting *fp_setting;
307 /* Invariant: if we have a ref, our pointer to fp_setting is safe */
308 atomic_add_acq_32(&fp->fp_ref_cnt, 1);
309 fp_setting = fp->fp_setting;
314 /* Release a ref on an fp_setting */
316 fail_point_setting_release_ref(struct fail_point *fp)
319 KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
320 atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
323 /* Append fp entries to fp garbage list */
325 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
328 mtx_lock_spin(&mtx_garbage_list);
329 STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
331 mtx_unlock_spin(&mtx_garbage_list);
334 /* Swap fp's entries with fp_setting_new */
335 static struct fail_point_setting *
336 fail_point_swap_settings(struct fail_point *fp,
337 struct fail_point_setting *fp_setting_new)
339 struct fail_point_setting *fp_setting_old;
341 fp_setting_old = fp->fp_setting;
342 fp->fp_setting = fp_setting_new;
344 return (fp_setting_old);
348 fail_point_eval_swap_out(struct fail_point *fp,
349 struct fail_point_setting *fp_setting)
352 /* We may have already been swapped out and replaced; ignore. */
353 if (fp->fp_setting == fp_setting)
354 fail_point_swap_settings(fp, NULL);
357 /* Free up any zero-ref entries in the garbage queue */
359 fail_point_garbage_collect(void)
361 struct fail_point_setting *fs_current, *fs_next;
362 struct fail_point_setting_garbage fp_ents_free_list;
365 * We will transfer the entries to free to fp_ents_free_list while holding
366 * the spin mutex, then free it after we drop the lock. This avoids
367 * triggering witness due to sleepable mutexes in the memory
370 STAILQ_INIT(&fp_ents_free_list);
372 mtx_lock_spin(&mtx_garbage_list);
373 STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
375 if (fs_current->fs_parent->fp_setting != fs_current &&
376 fs_current->fs_parent->fp_ref_cnt == 0) {
377 STAILQ_REMOVE(&fp_setting_garbage, fs_current,
378 fail_point_setting, fs_garbage_link);
379 STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
383 mtx_unlock_spin(&mtx_garbage_list);
385 STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
387 fail_point_setting_destroy(fs_current);
390 /* Drain out all refs from this fail point */
392 fail_point_drain(struct fail_point *fp, int expected_ref)
394 struct fail_point_setting *entries;
396 entries = fail_point_swap_settings(fp, NULL);
398 * We have unpaused all threads; so we will wait no longer
399 * than the time taken for the longest remaining sleep, or
400 * the length of time of a long-running code block.
402 while (fp->fp_ref_cnt > expected_ref) {
403 wakeup(FP_PAUSE_CHANNEL(fp));
404 tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
407 callout_drain(fp->fp_callout);
408 fail_point_swap_settings(fp, entries);
412 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
413 struct mtx *mtx_sleep)
416 if (fp->fp_pre_sleep_fn)
417 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
419 msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
421 if (fp->fp_post_sleep_fn)
422 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
426 fail_point_sleep(struct fail_point *fp, int msecs,
427 enum fail_point_return_code *pret)
431 /* Convert from millisecs to ticks, rounding up */
432 timo = howmany((int64_t)msecs * hz, 1000L);
435 if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
436 if (fp->fp_pre_sleep_fn)
437 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
439 tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
441 if (fp->fp_post_sleep_fn)
442 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
444 if (fp->fp_pre_sleep_fn)
445 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
447 callout_reset(fp->fp_callout, timo,
448 fp->fp_post_sleep_fn, fp->fp_post_sleep_arg);
449 *pret = FAIL_POINT_RC_QUEUED;
454 static char *parse_fail_point(struct fail_point_setting *, char *);
455 static char *parse_term(struct fail_point_setting *, char *);
456 static char *parse_number(int *out_units, int *out_decimal, char *);
457 static char *parse_type(struct fail_point_entry *, char *);
460 * Initialize a fail_point. The name is formed in a printf-like fashion
461 * from "fmt" and subsequent arguments. This function is generally used
462 * for custom failpoints located at odd places in the sysctl tree, and is
463 * not explicitly needed for standard in-line-declared failpoints.
468 fail_point_init(struct fail_point *fp, const char *fmt, ...)
474 fp->fp_setting = NULL;
477 /* Figure out the size of the name. */
479 n = vsnprintf(NULL, 0, fmt, ap);
482 /* Allocate the name and fill it in. */
483 name = fp_malloc(n + 1, M_WAITOK);
486 vsnprintf(name, n + 1, fmt, ap);
490 fp->fp_location = "";
491 fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
492 fp->fp_pre_sleep_fn = NULL;
493 fp->fp_pre_sleep_arg = NULL;
494 fp->fp_post_sleep_fn = NULL;
495 fp->fp_post_sleep_arg = NULL;
499 fail_point_alloc_callout(struct fail_point *fp)
503 * This assumes that calls to fail_point_use_timeout_path()
506 if (fp->fp_callout != NULL)
508 fp->fp_callout = fp_malloc(sizeof(*fp->fp_callout), M_WAITOK);
509 callout_init(fp->fp_callout, CALLOUT_MPSAFE);
513 * Free the resources held by a fail_point, and wake any paused threads.
514 * Thou shalt not allow threads to hit this fail point after you enter this
515 * function, nor shall you call this multiple times for a given fp.
519 fail_point_destroy(struct fail_point *fp)
522 fail_point_drain(fp, 0);
524 if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
525 fp_free(__DECONST(void *, fp->fp_name));
529 if (fp->fp_callout) {
530 fp_free(fp->fp_callout);
531 fp->fp_callout = NULL;
534 sx_xlock(&sx_fp_set);
535 fail_point_garbage_collect();
536 sx_xunlock(&sx_fp_set);
540 * This does the real work of evaluating a fail point. If the fail point tells
541 * us to return a value, this function returns 1 and fills in 'return_value'
542 * (return_value is allowed to be null). If the fail point tells us to panic,
543 * we never return. Otherwise we just return 0 after doing some work, which
544 * means "keep going".
546 enum fail_point_return_code
547 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
549 bool execute = false;
550 struct fail_point_entry *ent;
551 struct fail_point_setting *fp_setting;
552 enum fail_point_return_code ret;
558 ret = FAIL_POINT_RC_CONTINUE;
559 cont = 0; /* don't continue by default */
561 fp_setting = fail_point_setting_get_ref(fp);
562 if (fp_setting == NULL)
565 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
569 if (ent->fe_prob < PROB_MAX &&
570 ent->fe_prob < random() % PROB_MAX)
573 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
576 if (ent->fe_count != FE_COUNT_UNTRACKED) {
577 count = ent->fe_count;
579 if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
584 count = ent->fe_count;
586 if (execute == false)
587 /* We lost the race; consider the entry stale and bail now */
590 ent->fe_stale = true;
593 switch (ent->fe_type) {
594 case FAIL_POINT_PANIC:
595 panic("fail point %s panicking", fp->fp_name);
598 case FAIL_POINT_RETURN:
599 if (return_value != NULL)
600 *return_value = ent->fe_arg;
601 ret = FAIL_POINT_RC_RETURN;
604 case FAIL_POINT_BREAK:
605 printf("fail point %s breaking to debugger\n",
610 case FAIL_POINT_PRINT:
611 printf("fail point %s executing\n", fp->fp_name);
615 case FAIL_POINT_SLEEP:
618 fail_point_sleep(fp, msecs, &ret);
621 case FAIL_POINT_PAUSE:
623 * Pausing is inherently strange with multiple
624 * entries given our design. That is because some
625 * entries could be unreachable, for instance in cases like:
626 * pause->return. We can never reach the return entry.
627 * The sysctl layer actually truncates all entries after
628 * a pause for this reason.
630 mtx_lock_spin(&fp_setting->feq_mtx);
631 fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
632 mtx_unlock_spin(&fp_setting->feq_mtx);
635 case FAIL_POINT_YIELD:
636 kern_yield(PRI_UNCHANGED);
639 case FAIL_POINT_DELAY:
652 if (fail_point_is_off(fp))
653 fail_point_eval_swap_out(fp, fp_setting);
656 fail_point_setting_release_ref(fp);
662 * Translate internal fail_point structure into human-readable text.
665 fail_point_get(struct fail_point *fp, struct sbuf *sb,
668 struct fail_point_entry *ent;
669 struct fail_point_setting *fp_setting;
670 struct fail_point_entry *fp_entry_cpy;
673 int printed_entry_count;
677 printed_entry_count = 0;
679 fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
680 (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
682 fp_setting = fail_point_setting_get_ref(fp);
684 if (fp_setting != NULL) {
685 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
689 KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
690 ("FP entry list larger than allowed"));
692 fp_entry_cpy[printed_entry_count] = *ent;
693 ++printed_entry_count;
696 fail_point_setting_release_ref(fp);
698 /* This is our equivalent of a NULL terminator */
699 fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
701 while (idx < printed_entry_count) {
702 ent = &fp_entry_cpy[idx];
704 if (ent->fe_prob < PROB_MAX) {
705 int decimal = ent->fe_prob % (PROB_MAX / 100);
706 int units = ent->fe_prob / (PROB_MAX / 100);
707 sbuf_printf(sb, "%d", units);
709 int digits = PROB_DIGITS - 2;
710 while (!(decimal % 10)) {
714 sbuf_printf(sb, ".%0*d", digits, decimal);
716 sbuf_printf(sb, "%%");
718 if (ent->fe_count >= 0)
719 sbuf_printf(sb, "%d*", ent->fe_count);
720 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
722 sbuf_printf(sb, "(%d)", ent->fe_arg);
723 if (ent->fe_pid != NO_PID)
724 sbuf_printf(sb, "[pid %d]", ent->fe_pid);
725 if (TAILQ_NEXT(ent, fe_entries))
726 sbuf_printf(sb, "->");
728 if (!printed_entry_count)
729 sbuf_printf(sb, "off");
731 fp_free(fp_entry_cpy);
734 /* Print number of sleeping threads. queue=0 is the argument
735 * used by msleep when sending our threads to sleep. */
736 sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
737 sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
740 sbuf_printf(sb, "},\n");
742 sbuf_printf(sb, "sleeping_thread_count = %d,\n",
746 sbuf_printf(sb, "paused_thread_stacks = {\n");
747 sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
750 sbuf_printf(sb, "},\n");
752 sbuf_printf(sb, "paused_thread_count = %d\n",
758 * Set an internal fail_point structure from a human-readable failpoint string
759 * in a lock-safe manner.
762 fail_point_set(struct fail_point *fp, char *buf)
764 struct fail_point_entry *ent, *ent_next;
765 struct fail_point_setting *entries;
766 bool should_wake_paused;
767 bool should_truncate;
771 should_wake_paused = false;
772 should_truncate = false;
774 /* Parse new entries. */
776 * ref protects our new malloc'd stuff from being garbage collected
779 fail_point_setting_get_ref(fp);
780 entries = fail_point_setting_new(fp);
781 if (parse_fail_point(entries, buf) == NULL) {
782 STAILQ_REMOVE(&fp_setting_garbage, entries,
783 fail_point_setting, fs_garbage_link);
784 fail_point_setting_destroy(entries);
790 * Transfer the entries we are going to keep to a new list.
791 * Get rid of useless zero probability entries, and entries with hit
793 * If 'off' is present, and it has no hit count set, then all entries
794 * after it are discarded since they are unreachable.
796 TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
797 if (ent->fe_prob == 0 || ent->fe_count == 0) {
798 printf("Discarding entry which cannot execute %s\n",
799 fail_type_strings[ent->fe_type].name);
800 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
804 } else if (should_truncate) {
805 printf("Discarding unreachable entry %s\n",
806 fail_type_strings[ent->fe_type].name);
807 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
813 if (ent->fe_type == FAIL_POINT_OFF) {
814 should_wake_paused = true;
815 if (ent->fe_count == FE_COUNT_UNTRACKED) {
816 should_truncate = true;
817 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
821 } else if (ent->fe_type == FAIL_POINT_PAUSE) {
822 should_truncate = true;
823 } else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
824 FAIL_POINT_NONSLEEPABLE)) {
826 * If this fail point is annotated as being in a
827 * non-sleepable ctx, convert sleep to delay and
828 * convert the msec argument to usecs.
830 printf("Sleep call request on fail point in "
831 "non-sleepable context; using delay instead "
833 ent->fe_type = FAIL_POINT_DELAY;
838 if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
839 entries = fail_point_swap_settings(fp, NULL);
841 wakeup(FP_PAUSE_CHANNEL(fp));
843 if (should_wake_paused)
844 wakeup(FP_PAUSE_CHANNEL(fp));
845 fail_point_swap_settings(fp, entries);
851 IWARNING("Failed to set %s %s to %s",
852 fp->fp_name, fp->fp_location, buf);
854 INOTICE("Set %s %s to %s",
855 fp->fp_name, fp->fp_location, buf);
856 #endif /* IWARNING */
858 fail_point_setting_release_ref(fp);
862 #define MAX_FAIL_POINT_BUF 1023
865 * Handle kernel failpoint set/get.
868 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
870 struct fail_point *fp;
872 struct sbuf sb, *sb_check;
879 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
883 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
887 * Lock protects any new entries from being garbage collected before we
888 * can link them to the fail point.
890 sx_xlock(&sx_fp_set);
892 if (req->newlen > MAX_FAIL_POINT_BUF) {
897 buf = fp_malloc(req->newlen + 1, M_WAITOK);
899 error = SYSCTL_IN(req, buf, req->newlen);
902 buf[req->newlen] = '\0';
904 error = fail_point_set(fp, buf);
907 fail_point_garbage_collect();
908 sx_xunlock(&sx_fp_set);
911 fail_point_get(fp, &sb, false);
924 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
926 struct fail_point *fp;
927 struct sbuf sb, *sb_check;
931 sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
935 sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
938 fail_point_get(fp, &sb, true);
944 * Lock protects any new entries from being garbage collected before we
945 * can link them to the fail point.
947 sx_xlock(&sx_fp_set);
948 fail_point_garbage_collect();
949 sx_xunlock(&sx_fp_set);
955 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
957 struct sysctl_req *sa;
962 error = SYSCTL_OUT(sa, buf, len);
971 * Internal helper function to translate a human-readable failpoint string
972 * into a internally-parsable fail_point structure.
975 parse_fail_point(struct fail_point_setting *ents, char *p)
978 * <term> ( "->" <term> )*
984 p = parse_term(ents, p);
990 if (p[0] != '-' || p[1] != '>' ||
991 (p = parse_term(ents, p+2)) == NULL ||
992 term_count > FP_MAX_ENTRY_COUNT)
999 * Internal helper function to parse an individual term from a failpoint.
1002 parse_term(struct fail_point_setting *ents, char *p)
1004 struct fail_point_entry *ent;
1006 ent = fail_point_entry_new(ents);
1010 * ( (<float> "%") | (<integer> "*" ) )*
1012 * [ "(" <integer> ")" ]
1013 * [ "[pid " <integer> "]" ]
1016 /* ( (<float> "%") | (<integer> "*" ) )* */
1017 while (isdigit(*p) || *p == '.') {
1020 p = parse_number(&units, &decimal, p);
1025 if (units > 100) /* prevent overflow early */
1027 ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1028 if (ent->fe_prob > PROB_MAX)
1029 ent->fe_prob = PROB_MAX;
1030 } else if (*p == '*') {
1031 if (!units || units < 0 || decimal)
1033 ent->fe_count = units;
1040 p = parse_type(ent, p);
1046 /* [ "(" <integer> ")" ] */
1050 if (!isdigit(*p) && *p != '-')
1052 ent->fe_arg = strtol(p, &p, 0);
1056 /* [ "[pid " <integer> "]" ] */
1057 #define PID_STRING "[pid "
1058 if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1060 p += sizeof(PID_STRING) - 1;
1063 ent->fe_pid = strtol(p, &p, 0);
1071 * Internal helper function to parse a numeric for a failpoint term.
1074 parse_number(int *out_units, int *out_decimal, char *p)
1080 * <integer> [ "." <integer> ] |
1086 *out_units = strtol(p, &p, 10);
1087 if (p == old_p && *p != '.')
1090 /* fractional part */
1095 while (isdigit(*p)) {
1096 int digit = *p - '0';
1097 if (digits < PROB_DIGITS - 2)
1098 *out_decimal = *out_decimal * 10 + digit;
1099 else if (digits == PROB_DIGITS - 2 && digit >= 5)
1104 if (!digits) /* need at least one digit after '.' */
1106 while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1110 return (p); /* success */
1114 * Internal helper function to parse an individual type for a failpoint term.
1117 parse_type(struct fail_point_entry *ent, char *beg)
1119 enum fail_point_t type;
1122 for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1123 len = fail_type_strings[type].nmlen;
1124 if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1125 ent->fe_type = type;
1132 /* The fail point sysctl tree. */
1133 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1136 /* Debugging/testing stuff for fail point */
1138 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1141 KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1144 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1145 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1146 sysctl_test_fail_point, "A",
1147 "Trigger test fail points");