]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_fail.c
fail(9): Only gather/print stacks if STACK is enabled
[FreeBSD/FreeBSD.git] / sys / kern / kern_fail.c
1 /*-
2  * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 /**
26  * @file
27  *
28  * fail(9) Facility.
29  *
30  * @ingroup failpoint_private
31  */
32 /**
33  * @defgroup failpoint fail(9) Facility
34  *
35  * Failpoints allow for injecting fake errors into running code on the fly,
36  * without modifying code or recompiling with flags.  Failpoints are always
37  * present, and are very efficient when disabled.  Failpoints are described
38  * in man fail(9).
39  */
40 /**
41  * @defgroup failpoint_private Private fail(9) Implementation functions
42  *
43  * Private implementations for the actual failpoint code.
44  *
45  * @ingroup failpoint
46  */
47 /**
48  * @addtogroup failpoint_private
49  * @{
50  */
51
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54
55 #include "opt_stack.h"
56
57 #include <sys/ctype.h>
58 #include <sys/errno.h>
59 #include <sys/fail.h>
60 #include <sys/kernel.h>
61 #include <sys/libkern.h>
62 #include <sys/limits.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/proc.h>
67 #include <sys/sbuf.h>
68 #include <sys/sleepqueue.h>
69 #include <sys/sx.h>
70 #include <sys/sysctl.h>
71 #include <sys/types.h>
72
73 #include <machine/atomic.h>
74 #include <machine/stdarg.h>
75
76 #ifdef ILOG_DEFINE_FOR_FILE
77 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
78 #endif
79
80 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
81 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
82 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
83 #define fs_free(ptr) fp_free(ptr)
84 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
85         M_WAITOK | M_ZERO)
86
87  /**
88   * These define the wchans that are used for sleeping, pausing respectively.
89   * They are chosen arbitrarily but need to be distinct to the failpoint and
90   * the sleep/pause distinction.
91   */
92 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
93 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
94
95 /**
96  * Don't allow more than this many entries in a fail point set by sysctl.
97  * The 99.99...% case is to have 1 entry.  I can't imagine having this many
98  * entries, so it should not limit us.  Saves on re-mallocs while holding
99  * a non-sleepable lock.
100  */
101 #define FP_MAX_ENTRY_COUNT 20
102
103 /* Used to drain sbufs to the sysctl output */
104 int fail_sysctl_drain_func(void *, const char *, int);
105
106 /* Head of tailq of struct fail_point_entry */
107 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
108
109 /**
110  * fp entries garbage list; outstanding entries are cleaned up in the
111  * garbage collector
112  */
113 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
114 static struct fail_point_setting_garbage fp_setting_garbage =
115         STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
116 static struct mtx mtx_garbage_list;
117 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
118         MTX_SPIN);
119
120 static struct sx sx_fp_set;
121 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
122
123 /**
124  * Failpoint types.
125  * Don't change these without changing fail_type_strings in fail.c.
126  * @ingroup failpoint_private
127  */
128 enum fail_point_t {
129         FAIL_POINT_OFF,         /**< don't fail */
130         FAIL_POINT_PANIC,       /**< panic */
131         FAIL_POINT_RETURN,      /**< return an errorcode */
132         FAIL_POINT_BREAK,       /**< break into the debugger */
133         FAIL_POINT_PRINT,       /**< print a message */
134         FAIL_POINT_SLEEP,       /**< sleep for some msecs */
135         FAIL_POINT_PAUSE,       /**< sleep until failpoint is set to off */
136         FAIL_POINT_YIELD,       /**< yield the cpu */
137         FAIL_POINT_DELAY,       /**< busy wait the cpu */
138         FAIL_POINT_NUMTYPES,
139         FAIL_POINT_INVALID = -1
140 };
141
142 static struct {
143         const char *name;
144         int     nmlen;
145 } fail_type_strings[] = {
146 #define FP_TYPE_NM_LEN(s)       { s, sizeof(s) - 1 }
147         [FAIL_POINT_OFF] =      FP_TYPE_NM_LEN("off"),
148         [FAIL_POINT_PANIC] =    FP_TYPE_NM_LEN("panic"),
149         [FAIL_POINT_RETURN] =   FP_TYPE_NM_LEN("return"),
150         [FAIL_POINT_BREAK] =    FP_TYPE_NM_LEN("break"),
151         [FAIL_POINT_PRINT] =    FP_TYPE_NM_LEN("print"),
152         [FAIL_POINT_SLEEP] =    FP_TYPE_NM_LEN("sleep"),
153         [FAIL_POINT_PAUSE] =    FP_TYPE_NM_LEN("pause"),
154         [FAIL_POINT_YIELD] =    FP_TYPE_NM_LEN("yield"),
155         [FAIL_POINT_DELAY] =    FP_TYPE_NM_LEN("delay"),
156 };
157
158 #define FE_COUNT_UNTRACKED (INT_MIN)
159
160 /**
161  * Internal structure tracking a single term of a complete failpoint.
162  * @ingroup failpoint_private
163  */
164 struct fail_point_entry {
165         volatile bool   fe_stale;
166         enum fail_point_t       fe_type;        /**< type of entry */
167         int             fe_arg;         /**< argument to type (e.g. return value) */
168         int             fe_prob;        /**< likelihood of firing in millionths */
169         int             fe_count;       /**< number of times to fire, -1 means infinite */
170         pid_t           fe_pid;         /**< only fail for this process */
171         struct fail_point       *fe_parent;     /**< backpointer to fp */
172         TAILQ_ENTRY(fail_point_entry)   fe_entries; /**< next entry ptr */
173 };
174
175 struct fail_point_setting {
176         STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
177         struct fail_point_entry_queue fp_entry_queue;
178         struct fail_point * fs_parent;
179         struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
180 };
181
182 /**
183  * Defines stating the equivalent of probablilty one (100%)
184  */
185 enum {
186         PROB_MAX = 1000000,     /* probability between zero and this number */
187         PROB_DIGITS = 6         /* number of zero's in above number */
188 };
189
190 /* Get a ref on an fp's fp_setting */
191 static inline struct fail_point_setting *fail_point_setting_get_ref(
192         struct fail_point *fp);
193 /* Release a ref on an fp_setting */
194 static inline void fail_point_setting_release_ref(struct fail_point *fp);
195 /* Allocate and initialize a struct fail_point_setting */
196 static struct fail_point_setting *fail_point_setting_new(struct
197         fail_point *);
198 /* Free a struct fail_point_setting */
199 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
200 /* Allocate and initialize a struct fail_point_entry */
201 static struct fail_point_entry *fail_point_entry_new(struct
202         fail_point_setting *);
203 /* Free a struct fail_point_entry */
204 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
205 /* Append fp setting to garbage list */
206 static inline void fail_point_setting_garbage_append(
207         struct fail_point_setting *fp_setting);
208 /* Swap fp's setting with fp_setting_new */
209 static inline struct fail_point_setting *
210         fail_point_swap_settings(struct fail_point *fp,
211         struct fail_point_setting *fp_setting_new);
212 /* Free up any zero-ref setting in the garbage queue */
213 static void fail_point_garbage_collect(void);
214 /* If this fail point's setting are empty, then swap it out to NULL. */
215 static inline void fail_point_eval_swap_out(struct fail_point *fp,
216         struct fail_point_setting *fp_setting);
217
218 bool
219 fail_point_is_off(struct fail_point *fp)
220 {
221         bool return_val;
222         struct fail_point_setting *fp_setting;
223         struct fail_point_entry *ent;
224
225         return_val = true;
226
227         fp_setting = fail_point_setting_get_ref(fp);
228         if (fp_setting != NULL) {
229                 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
230                     fe_entries) {
231                         if (!ent->fe_stale) {
232                                 return_val = false;
233                                 break;
234                         }
235                 }
236         }
237         fail_point_setting_release_ref(fp);
238
239         return (return_val);
240 }
241
242 /* Allocate and initialize a struct fail_point_setting */
243 static struct fail_point_setting *
244 fail_point_setting_new(struct fail_point *fp)
245 {
246         struct fail_point_setting *fs_new;
247
248         fs_new = fs_malloc();
249         fs_new->fs_parent = fp;
250         TAILQ_INIT(&fs_new->fp_entry_queue);
251         mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
252
253         fail_point_setting_garbage_append(fs_new);
254
255         return (fs_new);
256 }
257
258 /* Free a struct fail_point_setting */
259 static void
260 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
261 {
262         struct fail_point_entry *ent;
263
264         while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
265                 ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
266                 TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
267                 fail_point_entry_destroy(ent);
268         }
269
270         fs_free(fp_setting);
271 }
272
273 /* Allocate and initialize a struct fail_point_entry */
274 static struct fail_point_entry *
275 fail_point_entry_new(struct fail_point_setting *fp_setting)
276 {
277         struct fail_point_entry *fp_entry;
278
279         fp_entry = fp_malloc(sizeof(struct fail_point_entry),
280                 M_WAITOK | M_ZERO);
281         fp_entry->fe_parent = fp_setting->fs_parent;
282         fp_entry->fe_prob = PROB_MAX;
283         fp_entry->fe_pid = NO_PID;
284         fp_entry->fe_count = FE_COUNT_UNTRACKED;
285         TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
286                 fe_entries);
287
288         return (fp_entry);
289 }
290
291 /* Free a struct fail_point_entry */
292 static void
293 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
294 {
295
296         fp_free(fp_entry);
297 }
298
299 /* Get a ref on an fp's fp_setting */
300 static inline struct fail_point_setting *
301 fail_point_setting_get_ref(struct fail_point *fp)
302 {
303         struct fail_point_setting *fp_setting;
304
305         /* Invariant: if we have a ref, our pointer to fp_setting is safe */
306         atomic_add_acq_32(&fp->fp_ref_cnt, 1);
307         fp_setting = fp->fp_setting;
308
309         return (fp_setting);
310 }
311
312 /* Release a ref on an fp_setting */
313 static inline void
314 fail_point_setting_release_ref(struct fail_point *fp)
315 {
316
317         KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
318         atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
319 }
320
321 /* Append fp entries to fp garbage list */
322 static inline void
323 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
324 {
325
326         mtx_lock_spin(&mtx_garbage_list);
327         STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
328                 fs_garbage_link);
329         mtx_unlock_spin(&mtx_garbage_list);
330 }
331
332 /* Swap fp's entries with fp_setting_new */
333 static struct fail_point_setting *
334 fail_point_swap_settings(struct fail_point *fp,
335         struct fail_point_setting *fp_setting_new)
336 {
337         struct fail_point_setting *fp_setting_old;
338
339         fp_setting_old = fp->fp_setting;
340         fp->fp_setting = fp_setting_new;
341
342         return (fp_setting_old);
343 }
344
345 static inline void
346 fail_point_eval_swap_out(struct fail_point *fp,
347         struct fail_point_setting *fp_setting)
348 {
349
350         /* We may have already been swapped out and replaced; ignore. */
351         if (fp->fp_setting == fp_setting)
352                 fail_point_swap_settings(fp, NULL);
353 }
354
355 /* Free up any zero-ref entries in the garbage queue */
356 static void
357 fail_point_garbage_collect()
358 {
359         struct fail_point_setting *fs_current, *fs_next;
360         struct fail_point_setting_garbage fp_ents_free_list;
361
362         /**
363           * We will transfer the entries to free to fp_ents_free_list while holding
364           * the spin mutex, then free it after we drop the lock. This avoids
365           * triggering witness due to sleepable mutexes in the memory
366           * allocator.
367           */
368         STAILQ_INIT(&fp_ents_free_list);
369
370         mtx_lock_spin(&mtx_garbage_list);
371         STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
372             fs_next) {
373                 if (fs_current->fs_parent->fp_setting != fs_current &&
374                         fs_current->fs_parent->fp_ref_cnt == 0) {
375                         STAILQ_REMOVE(&fp_setting_garbage, fs_current,
376                                 fail_point_setting, fs_garbage_link);
377                         STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
378                                 fs_garbage_link);
379                 }
380         }
381         mtx_unlock_spin(&mtx_garbage_list);
382
383         STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
384                 fs_next)
385                 fail_point_setting_destroy(fs_current);
386 }
387
388 /* Drain out all refs from this fail point */
389 static inline void
390 fail_point_drain(struct fail_point *fp, int expected_ref)
391 {
392         struct fail_point_setting *entries;
393
394         entries = fail_point_swap_settings(fp, NULL);
395         /**
396          * We have unpaused all threads; so we will wait no longer
397          * than the time taken for the longest remaining sleep, or
398          * the length of time of a long-running code block.
399          */
400         while (fp->fp_ref_cnt > expected_ref) {
401                 wakeup(FP_PAUSE_CHANNEL(fp));
402                 tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
403         }
404         fail_point_swap_settings(fp, entries);
405 }
406
407 static inline void
408 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
409         struct mtx *mtx_sleep)
410 {
411
412         if (fp->fp_pre_sleep_fn)
413                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
414
415         msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
416
417         if (fp->fp_post_sleep_fn)
418                 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
419 }
420
421 static inline void
422 fail_point_sleep(struct fail_point *fp, int msecs,
423         enum fail_point_return_code *pret)
424 {
425         int timo;
426
427         /* Convert from millisecs to ticks, rounding up */
428         timo = howmany(msecs * hz, 1000);
429
430         if (timo > 0) {
431                 if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
432                         if (fp->fp_pre_sleep_fn)
433                                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
434
435                         tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
436
437                         if (fp->fp_post_sleep_fn)
438                                 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
439                 } else {
440                         if (fp->fp_pre_sleep_fn)
441                                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
442
443                         timeout(fp->fp_post_sleep_fn, fp->fp_post_sleep_arg,
444                                 timo);
445                         *pret = FAIL_POINT_RC_QUEUED;
446                 }
447         }
448 }
449
450 static char *parse_fail_point(struct fail_point_setting *, char *);
451 static char *parse_term(struct fail_point_setting *, char *);
452 static char *parse_number(int *out_units, int *out_decimal, char *);
453 static char *parse_type(struct fail_point_entry *, char *);
454
455 /**
456  * Initialize a fail_point.  The name is formed in a printf-like fashion
457  * from "fmt" and subsequent arguments.  This function is generally used
458  * for custom failpoints located at odd places in the sysctl tree, and is
459  * not explicitly needed for standard in-line-declared failpoints.
460  *
461  * @ingroup failpoint
462  */
463 void
464 fail_point_init(struct fail_point *fp, const char *fmt, ...)
465 {
466         va_list ap;
467         char *name;
468         int n;
469
470         fp->fp_setting = NULL;
471         fp->fp_flags = 0;
472
473         /* Figure out the size of the name. */
474         va_start(ap, fmt);
475         n = vsnprintf(NULL, 0, fmt, ap);
476         va_end(ap);
477
478         /* Allocate the name and fill it in. */
479         name = fp_malloc(n + 1, M_WAITOK);
480         if (name != NULL) {
481                 va_start(ap, fmt);
482                 vsnprintf(name, n + 1, fmt, ap);
483                 va_end(ap);
484         }
485         fp->fp_name = name;
486         fp->fp_location = "";
487         fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
488         fp->fp_pre_sleep_fn = NULL;
489         fp->fp_pre_sleep_arg = NULL;
490         fp->fp_post_sleep_fn = NULL;
491         fp->fp_post_sleep_arg = NULL;
492 }
493
494 /**
495  * Free the resources held by a fail_point, and wake any paused threads.
496  * Thou shalt not allow threads to hit this fail point after you enter this
497  * function, nor shall you call this multiple times for a given fp.
498  * @ingroup failpoint
499  */
500 void
501 fail_point_destroy(struct fail_point *fp)
502 {
503
504         fail_point_drain(fp, 0);
505
506         if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
507                 fp_free(__DECONST(void *, fp->fp_name));
508                 fp->fp_name = NULL;
509         }
510         fp->fp_flags = 0;
511
512         sx_xlock(&sx_fp_set);
513         fail_point_garbage_collect();
514         sx_xunlock(&sx_fp_set);
515 }
516
517 /**
518  * This does the real work of evaluating a fail point. If the fail point tells
519  * us to return a value, this function returns 1 and fills in 'return_value'
520  * (return_value is allowed to be null). If the fail point tells us to panic,
521  * we never return. Otherwise we just return 0 after doing some work, which
522  * means "keep going".
523  */
524 enum fail_point_return_code
525 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
526 {
527         bool execute = false;
528         struct fail_point_entry *ent;
529         struct fail_point_setting *fp_setting;
530         enum fail_point_return_code ret;
531         int cont;
532         int count;
533         int msecs;
534         int usecs;
535
536         ret = FAIL_POINT_RC_CONTINUE;
537         cont = 0; /* don't continue by default */
538
539         fp_setting = fail_point_setting_get_ref(fp);
540         if (fp_setting == NULL)
541                 goto abort;
542
543         TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
544
545                 if (ent->fe_stale)
546                         continue;
547
548                 if (ent->fe_prob < PROB_MAX &&
549                     ent->fe_prob < random() % PROB_MAX)
550                         continue;
551
552                 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
553                         continue;
554
555                 if (ent->fe_count != FE_COUNT_UNTRACKED) {
556                         count = ent->fe_count;
557                         while (count > 0) {
558                                 if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
559                                         count--;
560                                         execute = true;
561                                         break;
562                                 }
563                                 count = ent->fe_count;
564                         }
565                         if (execute == false)
566                                 /* We lost the race; consider the entry stale and bail now */
567                                 continue;
568                         if (count == 0)
569                                 ent->fe_stale = true;
570                 }
571
572                 switch (ent->fe_type) {
573                 case FAIL_POINT_PANIC:
574                         panic("fail point %s panicking", fp->fp_name);
575                         /* NOTREACHED */
576
577                 case FAIL_POINT_RETURN:
578                         if (return_value != NULL)
579                                 *return_value = ent->fe_arg;
580                         ret = FAIL_POINT_RC_RETURN;
581                         break;
582
583                 case FAIL_POINT_BREAK:
584                         printf("fail point %s breaking to debugger\n",
585                                 fp->fp_name);
586                         breakpoint();
587                         break;
588
589                 case FAIL_POINT_PRINT:
590                         printf("fail point %s executing\n", fp->fp_name);
591                         cont = ent->fe_arg;
592                         break;
593
594                 case FAIL_POINT_SLEEP:
595                         msecs = ent->fe_arg;
596                         if (msecs)
597                                 fail_point_sleep(fp, msecs, &ret);
598                         break;
599
600                 case FAIL_POINT_PAUSE:
601                         /**
602                          * Pausing is inherently strange with multiple
603                          * entries given our design.  That is because some
604                          * entries could be unreachable, for instance in cases like:
605                          * pause->return. We can never reach the return entry.
606                          * The sysctl layer actually truncates all entries after
607                          * a pause for this reason.
608                          */
609                         mtx_lock_spin(&fp_setting->feq_mtx);
610                         fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
611                         mtx_unlock_spin(&fp_setting->feq_mtx);
612                         break;
613
614                 case FAIL_POINT_YIELD:
615                         kern_yield(-1);
616                         break;
617
618                 case FAIL_POINT_DELAY:
619                         usecs = ent->fe_arg;
620                         DELAY(usecs);
621                         break;
622
623                 default:
624                         break;
625                 }
626
627                 if (cont == 0)
628                         break;
629         }
630
631         if (fail_point_is_off(fp))
632                 fail_point_eval_swap_out(fp, fp_setting);
633
634 abort:
635         fail_point_setting_release_ref(fp);
636
637         return (ret);
638
639 }
640
641 /**
642  * Translate internal fail_point structure into human-readable text.
643  */
644 static void
645 fail_point_get(struct fail_point *fp, struct sbuf *sb,
646         bool verbose)
647 {
648         struct fail_point_entry *ent;
649         struct fail_point_setting *fp_setting;
650         struct fail_point_entry *fp_entry_cpy;
651         int cnt_sleeping;
652         int idx;
653         int printed_entry_count;
654
655         cnt_sleeping = 0;
656         idx = 0;
657         printed_entry_count = 0;
658
659         fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
660                 (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
661
662         fp_setting = fail_point_setting_get_ref(fp);
663
664         if (fp_setting != NULL) {
665                 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
666                         if (ent->fe_stale)
667                                 continue;
668
669                         KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
670                                 ("FP entry list larger than allowed"));
671
672                         fp_entry_cpy[printed_entry_count] = *ent;
673                         ++printed_entry_count;
674                 }
675         }
676         fail_point_setting_release_ref(fp);
677
678         /* This is our equivalent of a NULL terminator */
679         fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
680
681         while (idx < printed_entry_count) {
682                 ent = &fp_entry_cpy[idx];
683                 ++idx;
684                 if (ent->fe_prob < PROB_MAX) {
685                         int decimal = ent->fe_prob % (PROB_MAX / 100);
686                         int units = ent->fe_prob / (PROB_MAX / 100);
687                         sbuf_printf(sb, "%d", units);
688                         if (decimal) {
689                                 int digits = PROB_DIGITS - 2;
690                                 while (!(decimal % 10)) {
691                                         digits--;
692                                         decimal /= 10;
693                                 }
694                                 sbuf_printf(sb, ".%0*d", digits, decimal);
695                         }
696                         sbuf_printf(sb, "%%");
697                 }
698                 if (ent->fe_count >= 0)
699                         sbuf_printf(sb, "%d*", ent->fe_count);
700                 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
701                 if (ent->fe_arg)
702                         sbuf_printf(sb, "(%d)", ent->fe_arg);
703                 if (ent->fe_pid != NO_PID)
704                         sbuf_printf(sb, "[pid %d]", ent->fe_pid);
705                 if (TAILQ_NEXT(ent, fe_entries))
706                         sbuf_printf(sb, "->");
707         }
708         if (!printed_entry_count)
709                 sbuf_printf(sb, "off");
710
711         fp_free(fp_entry_cpy);
712         if (verbose) {
713 #ifdef STACK
714                 /* Print number of sleeping threads. queue=0 is the argument
715                  * used by msleep when sending our threads to sleep. */
716                 sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
717                 sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
718                         &cnt_sleeping);
719
720                 sbuf_printf(sb, "},\n");
721 #endif
722                 sbuf_printf(sb, "sleeping_thread_count = %d,\n",
723                         cnt_sleeping);
724
725 #ifdef STACK
726                 sbuf_printf(sb, "paused_thread_stacks = {\n");
727                 sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
728                         &cnt_sleeping);
729
730                 sbuf_printf(sb, "},\n");
731 #endif
732                 sbuf_printf(sb, "paused_thread_count = %d\n",
733                         cnt_sleeping);
734         }
735 }
736
737 /**
738  * Set an internal fail_point structure from a human-readable failpoint string
739  * in a lock-safe manner.
740  */
741 static int
742 fail_point_set(struct fail_point *fp, char *buf)
743 {
744         struct fail_point_entry *ent, *ent_next;
745         struct fail_point_setting *entries;
746         bool should_wake_paused;
747         bool should_truncate;
748         int error;
749
750         error = 0;
751         should_wake_paused = false;
752         should_truncate = false;
753
754         /* Parse new entries. */
755         /**
756          * ref protects our new malloc'd stuff from being garbage collected
757          * before we link it.
758          */
759         fail_point_setting_get_ref(fp);
760         entries = fail_point_setting_new(fp);
761         if (parse_fail_point(entries, buf) == NULL) {
762                 STAILQ_REMOVE(&fp_setting_garbage, entries,
763                         fail_point_setting, fs_garbage_link);
764                 fail_point_setting_destroy(entries);
765                 error = EINVAL;
766                 goto end;
767         }
768
769         /**
770          * Transfer the entries we are going to keep to a new list.
771          * Get rid of useless zero probability entries, and entries with hit
772          * count 0.
773          * If 'off' is present, and it has no hit count set, then all entries
774          *       after it are discarded since they are unreachable.
775          */
776         TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
777                 if (ent->fe_prob == 0 || ent->fe_count == 0) {
778                         printf("Discarding entry which cannot execute %s\n",
779                                 fail_type_strings[ent->fe_type].name);
780                         TAILQ_REMOVE(&entries->fp_entry_queue, ent,
781                                 fe_entries);
782                         fp_free(ent);
783                         continue;
784                 } else if (should_truncate) {
785                         printf("Discarding unreachable entry %s\n",
786                                 fail_type_strings[ent->fe_type].name);
787                         TAILQ_REMOVE(&entries->fp_entry_queue, ent,
788                                 fe_entries);
789                         fp_free(ent);
790                         continue;
791                 }
792
793                 if (ent->fe_type == FAIL_POINT_OFF) {
794                         should_wake_paused = true;
795                         if (ent->fe_count == FE_COUNT_UNTRACKED) {
796                                 should_truncate = true;
797                                 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
798                                         fe_entries);
799                                 fp_free(ent);
800                         }
801                 } else if (ent->fe_type == FAIL_POINT_PAUSE) {
802                         should_truncate = true;
803                 } else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
804                         FAIL_POINT_NONSLEEPABLE)) {
805                         /**
806                          * If this fail point is annotated as being in a
807                          * non-sleepable ctx, convert sleep to delay and
808                          * convert the msec argument to usecs.
809                          */
810                         printf("Sleep call request on fail point in "
811                                 "non-sleepable context; using delay instead "
812                                 "of sleep\n");
813                         ent->fe_type = FAIL_POINT_DELAY;
814                         ent->fe_arg *= 1000;
815                 }
816         }
817
818         if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
819                 entries = fail_point_swap_settings(fp, NULL);
820                 if (entries != NULL)
821                         wakeup(FP_PAUSE_CHANNEL(fp));
822         } else {
823                 if (should_wake_paused)
824                         wakeup(FP_PAUSE_CHANNEL(fp));
825                 fail_point_swap_settings(fp, entries);
826         }
827
828 end:
829 #ifdef IWARNING
830         if (error)
831                 IWARNING("Failed to set %s %s to %s",
832                     fp->fp_name, fp->fp_location, buf);
833         else
834                 INOTICE("Set %s %s to %s",
835                     fp->fp_name, fp->fp_location, buf);
836 #endif /* IWARNING */
837
838         fail_point_setting_release_ref(fp);
839         return (error);
840 }
841
842 #define MAX_FAIL_POINT_BUF      1023
843
844 /**
845  * Handle kernel failpoint set/get.
846  */
847
848 int
849 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
850 {
851         struct fail_point *fp;
852         char *buf;
853         struct sbuf *sb_check;
854         struct sbuf sb;
855         int error;
856
857         error = 0;
858         fp = arg1;
859         buf = NULL;
860
861         sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
862         if (sb_check != &sb)
863                 return (ENOMEM);
864
865         sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
866
867         /* Setting */
868         /**
869          * Lock protects any new entries from being garbage collected before we
870          * can link them to the fail point.
871          */
872         sx_xlock(&sx_fp_set);
873         if (req->newptr) {
874                 if (req->newlen > MAX_FAIL_POINT_BUF) {
875                         error = EINVAL;
876                         goto out;
877                 }
878
879                 buf = fp_malloc(req->newlen + 1, M_WAITOK);
880
881                 error = SYSCTL_IN(req, buf, req->newlen);
882                 if (error)
883                         goto out;
884                 buf[req->newlen] = '\0';
885
886                 error = fail_point_set(fp, buf);
887         }
888
889         fail_point_garbage_collect();
890         sx_xunlock(&sx_fp_set);
891
892         /* Retrieving. */
893         fail_point_get(fp, &sb, false);
894
895 out:
896         sbuf_finish(&sb);
897         sbuf_delete(&sb);
898
899         if (buf)
900                 fp_free(buf);
901
902         return (error);
903 }
904
905 int
906 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
907 {
908         struct fail_point *fp;
909         struct sbuf sb, *sb_check;
910
911         fp = arg1;
912
913         sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
914         if (sb_check != &sb)
915                 return (ENOMEM);
916
917         sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
918
919         /* Retrieving. */
920         fail_point_get(fp, &sb, true);
921
922         sbuf_finish(&sb);
923         sbuf_delete(&sb);
924
925         /**
926          * Lock protects any new entries from being garbage collected before we
927          * can link them to the fail point.
928          */
929         sx_xlock(&sx_fp_set);
930         fail_point_garbage_collect();
931         sx_xunlock(&sx_fp_set);
932
933         return (0);
934 }
935
936 int
937 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
938 {
939         struct sysctl_req *sa;
940         int error;
941
942         sa = sysctl_args;
943
944         error = SYSCTL_OUT(sa, buf, len);
945
946         if (error == ENOMEM)
947                 return (-1);
948         else
949                 return (len);
950 }
951
952
953 /**
954  * Internal helper function to translate a human-readable failpoint string
955  * into a internally-parsable fail_point structure.
956  */
957 static char *
958 parse_fail_point(struct fail_point_setting *ents, char *p)
959 {
960         /*  <fail_point> ::
961          *      <term> ( "->" <term> )*
962          */
963         uint8_t term_count;
964
965         term_count = 1;
966
967         p = parse_term(ents, p);
968         if (p == NULL)
969                 return (NULL);
970
971         while (*p != '\0') {
972                 term_count++;
973                 if (p[0] != '-' || p[1] != '>' ||
974                         (p = parse_term(ents, p+2)) == NULL ||
975                         term_count > FP_MAX_ENTRY_COUNT)
976                         return (NULL);
977         }
978         return (p);
979 }
980
981 /**
982  * Internal helper function to parse an individual term from a failpoint.
983  */
984 static char *
985 parse_term(struct fail_point_setting *ents, char *p)
986 {
987         struct fail_point_entry *ent;
988
989         ent = fail_point_entry_new(ents);
990
991         /*
992          * <term> ::
993          *     ( (<float> "%") | (<integer> "*" ) )*
994          *     <type>
995          *     [ "(" <integer> ")" ]
996          *     [ "[pid " <integer> "]" ]
997          */
998
999         /* ( (<float> "%") | (<integer> "*" ) )* */
1000         while (isdigit(*p) || *p == '.') {
1001                 int units, decimal;
1002
1003                 p = parse_number(&units, &decimal, p);
1004                 if (p == NULL)
1005                         return (NULL);
1006
1007                 if (*p == '%') {
1008                         if (units > 100) /* prevent overflow early */
1009                                 units = 100;
1010                         ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1011                         if (ent->fe_prob > PROB_MAX)
1012                                 ent->fe_prob = PROB_MAX;
1013                 } else if (*p == '*') {
1014                         if (!units || units < 0 || decimal)
1015                                 return (NULL);
1016                         ent->fe_count = units;
1017                 } else
1018                         return (NULL);
1019                 p++;
1020         }
1021
1022         /* <type> */
1023         p = parse_type(ent, p);
1024         if (p == NULL)
1025                 return (NULL);
1026         if (*p == '\0')
1027                 return (p);
1028
1029         /* [ "(" <integer> ")" ] */
1030         if (*p != '(')
1031                 return (p);
1032         p++;
1033         if (!isdigit(*p) && *p != '-')
1034                 return (NULL);
1035         ent->fe_arg = strtol(p, &p, 0);
1036         if (*p++ != ')')
1037                 return (NULL);
1038
1039         /* [ "[pid " <integer> "]" ] */
1040 #define PID_STRING "[pid "
1041         if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1042                 return (p);
1043         p += sizeof(PID_STRING) - 1;
1044         if (!isdigit(*p))
1045                 return (NULL);
1046         ent->fe_pid = strtol(p, &p, 0);
1047         if (*p++ != ']')
1048                 return (NULL);
1049
1050         return (p);
1051 }
1052
1053 /**
1054  * Internal helper function to parse a numeric for a failpoint term.
1055  */
1056 static char *
1057 parse_number(int *out_units, int *out_decimal, char *p)
1058 {
1059         char *old_p;
1060
1061         /**
1062          *  <number> ::
1063          *      <integer> [ "." <integer> ] |
1064          *      "." <integer>
1065          */
1066
1067         /* whole part */
1068         old_p = p;
1069         *out_units = strtol(p, &p, 10);
1070         if (p == old_p && *p != '.')
1071                 return (NULL);
1072
1073         /* fractional part */
1074         *out_decimal = 0;
1075         if (*p == '.') {
1076                 int digits = 0;
1077                 p++;
1078                 while (isdigit(*p)) {
1079                         int digit = *p - '0';
1080                         if (digits < PROB_DIGITS - 2)
1081                                 *out_decimal = *out_decimal * 10 + digit;
1082                         else if (digits == PROB_DIGITS - 2 && digit >= 5)
1083                                 (*out_decimal)++;
1084                         digits++;
1085                         p++;
1086                 }
1087                 if (!digits) /* need at least one digit after '.' */
1088                         return (NULL);
1089                 while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1090                         *out_decimal *= 10;
1091         }
1092
1093         return (p); /* success */
1094 }
1095
1096 /**
1097  * Internal helper function to parse an individual type for a failpoint term.
1098  */
1099 static char *
1100 parse_type(struct fail_point_entry *ent, char *beg)
1101 {
1102         enum fail_point_t type;
1103         int len;
1104
1105         for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1106                 len = fail_type_strings[type].nmlen;
1107                 if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1108                         ent->fe_type = type;
1109                         return (beg + len);
1110                 }
1111         }
1112         return (NULL);
1113 }
1114
1115 /* The fail point sysctl tree. */
1116 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW, 0, "fail points");
1117
1118 /* Debugging/testing stuff for fail point */
1119 static int
1120 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1121 {
1122
1123         KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1124         return (0);
1125 }
1126 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1127         CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_test_fail_point, "A",
1128         "Trigger test fail points");