]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_fail.c
Merge sendmail 8.16.1 to HEAD: See contrib/sendmail/RELEASE_NOTES for details
[FreeBSD/FreeBSD.git] / sys / kern / kern_fail.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Isilon Inc http://www.isilon.com/
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /**
28  * @file
29  *
30  * fail(9) Facility.
31  *
32  * @ingroup failpoint_private
33  */
34 /**
35  * @defgroup failpoint fail(9) Facility
36  *
37  * Failpoints allow for injecting fake errors into running code on the fly,
38  * without modifying code or recompiling with flags.  Failpoints are always
39  * present, and are very efficient when disabled.  Failpoints are described
40  * in man fail(9).
41  */
42 /**
43  * @defgroup failpoint_private Private fail(9) Implementation functions
44  *
45  * Private implementations for the actual failpoint code.
46  *
47  * @ingroup failpoint
48  */
49 /**
50  * @addtogroup failpoint_private
51  * @{
52  */
53
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
56
57 #include "opt_stack.h"
58
59 #include <sys/ctype.h>
60 #include <sys/errno.h>
61 #include <sys/fail.h>
62 #include <sys/kernel.h>
63 #include <sys/libkern.h>
64 #include <sys/limits.h>
65 #include <sys/lock.h>
66 #include <sys/malloc.h>
67 #include <sys/mutex.h>
68 #include <sys/proc.h>
69 #include <sys/sbuf.h>
70 #include <sys/sleepqueue.h>
71 #include <sys/sx.h>
72 #include <sys/sysctl.h>
73 #include <sys/types.h>
74
75 #include <machine/atomic.h>
76 #include <machine/stdarg.h>
77
78 #ifdef ILOG_DEFINE_FOR_FILE
79 ILOG_DEFINE_FOR_FILE(L_ISI_FAIL_POINT, L_ILOG, fail_point);
80 #endif
81
82 static MALLOC_DEFINE(M_FAIL_POINT, "Fail Points", "fail points system");
83 #define fp_free(ptr) free(ptr, M_FAIL_POINT)
84 #define fp_malloc(size, flags) malloc((size), M_FAIL_POINT, (flags))
85 #define fs_free(ptr) fp_free(ptr)
86 #define fs_malloc() fp_malloc(sizeof(struct fail_point_setting), \
87     M_WAITOK | M_ZERO)
88
89 /**
90  * These define the wchans that are used for sleeping, pausing respectively.
91  * They are chosen arbitrarily but need to be distinct to the failpoint and
92  * the sleep/pause distinction.
93  */
94 #define FP_SLEEP_CHANNEL(fp) (void*)(fp)
95 #define FP_PAUSE_CHANNEL(fp) __DEVOLATILE(void*, &fp->fp_setting)
96
97 /**
98  * Don't allow more than this many entries in a fail point set by sysctl.
99  * The 99.99...% case is to have 1 entry.  I can't imagine having this many
100  * entries, so it should not limit us.  Saves on re-mallocs while holding
101  * a non-sleepable lock.
102  */
103 #define FP_MAX_ENTRY_COUNT 20
104
105 /* Used to drain sbufs to the sysctl output */
106 int fail_sysctl_drain_func(void *, const char *, int);
107
108 /* Head of tailq of struct fail_point_entry */
109 TAILQ_HEAD(fail_point_entry_queue, fail_point_entry);
110
111 /**
112  * fp entries garbage list; outstanding entries are cleaned up in the
113  * garbage collector
114  */
115 STAILQ_HEAD(fail_point_setting_garbage, fail_point_setting);
116 static struct fail_point_setting_garbage fp_setting_garbage =
117         STAILQ_HEAD_INITIALIZER(fp_setting_garbage);
118 static struct mtx mtx_garbage_list;
119 MTX_SYSINIT(mtx_garbage_list, &mtx_garbage_list, "fail point garbage mtx",
120         MTX_SPIN);
121
122 static struct sx sx_fp_set;
123 SX_SYSINIT(sx_fp_set, &sx_fp_set, "fail point set sx");
124
125 /**
126  * Failpoint types.
127  * Don't change these without changing fail_type_strings in fail.c.
128  * @ingroup failpoint_private
129  */
130 enum fail_point_t {
131         FAIL_POINT_OFF,         /**< don't fail */
132         FAIL_POINT_PANIC,       /**< panic */
133         FAIL_POINT_RETURN,      /**< return an errorcode */
134         FAIL_POINT_BREAK,       /**< break into the debugger */
135         FAIL_POINT_PRINT,       /**< print a message */
136         FAIL_POINT_SLEEP,       /**< sleep for some msecs */
137         FAIL_POINT_PAUSE,       /**< sleep until failpoint is set to off */
138         FAIL_POINT_YIELD,       /**< yield the cpu */
139         FAIL_POINT_DELAY,       /**< busy wait the cpu */
140         FAIL_POINT_NUMTYPES,
141         FAIL_POINT_INVALID = -1
142 };
143
144 static struct {
145         const char *name;
146         int     nmlen;
147 } fail_type_strings[] = {
148 #define FP_TYPE_NM_LEN(s)       { s, sizeof(s) - 1 }
149         [FAIL_POINT_OFF] =      FP_TYPE_NM_LEN("off"),
150         [FAIL_POINT_PANIC] =    FP_TYPE_NM_LEN("panic"),
151         [FAIL_POINT_RETURN] =   FP_TYPE_NM_LEN("return"),
152         [FAIL_POINT_BREAK] =    FP_TYPE_NM_LEN("break"),
153         [FAIL_POINT_PRINT] =    FP_TYPE_NM_LEN("print"),
154         [FAIL_POINT_SLEEP] =    FP_TYPE_NM_LEN("sleep"),
155         [FAIL_POINT_PAUSE] =    FP_TYPE_NM_LEN("pause"),
156         [FAIL_POINT_YIELD] =    FP_TYPE_NM_LEN("yield"),
157         [FAIL_POINT_DELAY] =    FP_TYPE_NM_LEN("delay"),
158 };
159
160 #define FE_COUNT_UNTRACKED (INT_MIN)
161
162 /**
163  * Internal structure tracking a single term of a complete failpoint.
164  * @ingroup failpoint_private
165  */
166 struct fail_point_entry {
167         volatile bool   fe_stale;
168         enum fail_point_t       fe_type;        /**< type of entry */
169         int             fe_arg;         /**< argument to type (e.g. return value) */
170         int             fe_prob;        /**< likelihood of firing in millionths */
171         int32_t         fe_count;       /**< number of times to fire, -1 means infinite */
172         pid_t           fe_pid;         /**< only fail for this process */
173         struct fail_point       *fe_parent;     /**< backpointer to fp */
174         TAILQ_ENTRY(fail_point_entry)   fe_entries; /**< next entry ptr */
175 };
176
177 struct fail_point_setting {
178         STAILQ_ENTRY(fail_point_setting) fs_garbage_link;
179         struct fail_point_entry_queue fp_entry_queue;
180         struct fail_point * fs_parent;
181         struct mtx feq_mtx; /* Gives fail_point_pause something to do.  */
182 };
183
184 /**
185  * Defines stating the equivalent of probablilty one (100%)
186  */
187 enum {
188         PROB_MAX = 1000000,     /* probability between zero and this number */
189         PROB_DIGITS = 6         /* number of zero's in above number */
190 };
191
192 /* Get a ref on an fp's fp_setting */
193 static inline struct fail_point_setting *fail_point_setting_get_ref(
194         struct fail_point *fp);
195 /* Release a ref on an fp_setting */
196 static inline void fail_point_setting_release_ref(struct fail_point *fp);
197 /* Allocate and initialize a struct fail_point_setting */
198 static struct fail_point_setting *fail_point_setting_new(struct
199         fail_point *);
200 /* Free a struct fail_point_setting */
201 static void fail_point_setting_destroy(struct fail_point_setting *fp_setting);
202 /* Allocate and initialize a struct fail_point_entry */
203 static struct fail_point_entry *fail_point_entry_new(struct
204         fail_point_setting *);
205 /* Free a struct fail_point_entry */
206 static void fail_point_entry_destroy(struct fail_point_entry *fp_entry);
207 /* Append fp setting to garbage list */
208 static inline void fail_point_setting_garbage_append(
209         struct fail_point_setting *fp_setting);
210 /* Swap fp's setting with fp_setting_new */
211 static inline struct fail_point_setting *
212         fail_point_swap_settings(struct fail_point *fp,
213         struct fail_point_setting *fp_setting_new);
214 /* Free up any zero-ref setting in the garbage queue */
215 static void fail_point_garbage_collect(void);
216 /* If this fail point's setting are empty, then swap it out to NULL. */
217 static inline void fail_point_eval_swap_out(struct fail_point *fp,
218         struct fail_point_setting *fp_setting);
219
220 bool
221 fail_point_is_off(struct fail_point *fp)
222 {
223         bool return_val;
224         struct fail_point_setting *fp_setting;
225         struct fail_point_entry *ent;
226
227         return_val = true;
228
229         fp_setting = fail_point_setting_get_ref(fp);
230         if (fp_setting != NULL) {
231                 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue,
232                     fe_entries) {
233                         if (!ent->fe_stale) {
234                                 return_val = false;
235                                 break;
236                         }
237                 }
238         }
239         fail_point_setting_release_ref(fp);
240
241         return (return_val);
242 }
243
244 /* Allocate and initialize a struct fail_point_setting */
245 static struct fail_point_setting *
246 fail_point_setting_new(struct fail_point *fp)
247 {
248         struct fail_point_setting *fs_new;
249
250         fs_new = fs_malloc();
251         fs_new->fs_parent = fp;
252         TAILQ_INIT(&fs_new->fp_entry_queue);
253         mtx_init(&fs_new->feq_mtx, "fail point entries", NULL, MTX_SPIN);
254
255         fail_point_setting_garbage_append(fs_new);
256
257         return (fs_new);
258 }
259
260 /* Free a struct fail_point_setting */
261 static void
262 fail_point_setting_destroy(struct fail_point_setting *fp_setting)
263 {
264         struct fail_point_entry *ent;
265
266         while (!TAILQ_EMPTY(&fp_setting->fp_entry_queue)) {
267                 ent = TAILQ_FIRST(&fp_setting->fp_entry_queue);
268                 TAILQ_REMOVE(&fp_setting->fp_entry_queue, ent, fe_entries);
269                 fail_point_entry_destroy(ent);
270         }
271
272         fs_free(fp_setting);
273 }
274
275 /* Allocate and initialize a struct fail_point_entry */
276 static struct fail_point_entry *
277 fail_point_entry_new(struct fail_point_setting *fp_setting)
278 {
279         struct fail_point_entry *fp_entry;
280
281         fp_entry = fp_malloc(sizeof(struct fail_point_entry),
282                 M_WAITOK | M_ZERO);
283         fp_entry->fe_parent = fp_setting->fs_parent;
284         fp_entry->fe_prob = PROB_MAX;
285         fp_entry->fe_pid = NO_PID;
286         fp_entry->fe_count = FE_COUNT_UNTRACKED;
287         TAILQ_INSERT_TAIL(&fp_setting->fp_entry_queue, fp_entry,
288                 fe_entries);
289
290         return (fp_entry);
291 }
292
293 /* Free a struct fail_point_entry */
294 static void
295 fail_point_entry_destroy(struct fail_point_entry *fp_entry)
296 {
297
298         fp_free(fp_entry);
299 }
300
301 /* Get a ref on an fp's fp_setting */
302 static inline struct fail_point_setting *
303 fail_point_setting_get_ref(struct fail_point *fp)
304 {
305         struct fail_point_setting *fp_setting;
306
307         /* Invariant: if we have a ref, our pointer to fp_setting is safe */
308         atomic_add_acq_32(&fp->fp_ref_cnt, 1);
309         fp_setting = fp->fp_setting;
310
311         return (fp_setting);
312 }
313
314 /* Release a ref on an fp_setting */
315 static inline void
316 fail_point_setting_release_ref(struct fail_point *fp)
317 {
318
319         KASSERT(&fp->fp_ref_cnt > 0, ("Attempting to deref w/no refs"));
320         atomic_subtract_rel_32(&fp->fp_ref_cnt, 1);
321 }
322
323 /* Append fp entries to fp garbage list */
324 static inline void
325 fail_point_setting_garbage_append(struct fail_point_setting *fp_setting)
326 {
327
328         mtx_lock_spin(&mtx_garbage_list);
329         STAILQ_INSERT_TAIL(&fp_setting_garbage, fp_setting,
330                 fs_garbage_link);
331         mtx_unlock_spin(&mtx_garbage_list);
332 }
333
334 /* Swap fp's entries with fp_setting_new */
335 static struct fail_point_setting *
336 fail_point_swap_settings(struct fail_point *fp,
337         struct fail_point_setting *fp_setting_new)
338 {
339         struct fail_point_setting *fp_setting_old;
340
341         fp_setting_old = fp->fp_setting;
342         fp->fp_setting = fp_setting_new;
343
344         return (fp_setting_old);
345 }
346
347 static inline void
348 fail_point_eval_swap_out(struct fail_point *fp,
349         struct fail_point_setting *fp_setting)
350 {
351
352         /* We may have already been swapped out and replaced; ignore. */
353         if (fp->fp_setting == fp_setting)
354                 fail_point_swap_settings(fp, NULL);
355 }
356
357 /* Free up any zero-ref entries in the garbage queue */
358 static void
359 fail_point_garbage_collect(void)
360 {
361         struct fail_point_setting *fs_current, *fs_next;
362         struct fail_point_setting_garbage fp_ents_free_list;
363
364         /**
365           * We will transfer the entries to free to fp_ents_free_list while holding
366           * the spin mutex, then free it after we drop the lock. This avoids
367           * triggering witness due to sleepable mutexes in the memory
368           * allocator.
369           */
370         STAILQ_INIT(&fp_ents_free_list);
371
372         mtx_lock_spin(&mtx_garbage_list);
373         STAILQ_FOREACH_SAFE(fs_current, &fp_setting_garbage, fs_garbage_link,
374             fs_next) {
375                 if (fs_current->fs_parent->fp_setting != fs_current &&
376                         fs_current->fs_parent->fp_ref_cnt == 0) {
377                         STAILQ_REMOVE(&fp_setting_garbage, fs_current,
378                                 fail_point_setting, fs_garbage_link);
379                         STAILQ_INSERT_HEAD(&fp_ents_free_list, fs_current,
380                                 fs_garbage_link);
381                 }
382         }
383         mtx_unlock_spin(&mtx_garbage_list);
384
385         STAILQ_FOREACH_SAFE(fs_current, &fp_ents_free_list, fs_garbage_link,
386                 fs_next)
387                 fail_point_setting_destroy(fs_current);
388 }
389
390 /* Drain out all refs from this fail point */
391 static inline void
392 fail_point_drain(struct fail_point *fp, int expected_ref)
393 {
394         struct fail_point_setting *entries;
395
396         entries = fail_point_swap_settings(fp, NULL);
397         /**
398          * We have unpaused all threads; so we will wait no longer
399          * than the time taken for the longest remaining sleep, or
400          * the length of time of a long-running code block.
401          */
402         while (fp->fp_ref_cnt > expected_ref) {
403                 wakeup(FP_PAUSE_CHANNEL(fp));
404                 tsleep(&fp, PWAIT, "fail_point_drain", hz / 100);
405         }
406         if (fp->fp_callout)
407                 callout_drain(fp->fp_callout);
408         fail_point_swap_settings(fp, entries);
409 }
410
411 static inline void
412 fail_point_pause(struct fail_point *fp, enum fail_point_return_code *pret,
413         struct mtx *mtx_sleep)
414 {
415
416         if (fp->fp_pre_sleep_fn)
417                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
418
419         msleep_spin(FP_PAUSE_CHANNEL(fp), mtx_sleep, "failpt", 0);
420
421         if (fp->fp_post_sleep_fn)
422                 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
423 }
424
425 static inline void
426 fail_point_sleep(struct fail_point *fp, int msecs,
427         enum fail_point_return_code *pret)
428 {
429         int timo;
430
431         /* Convert from millisecs to ticks, rounding up */
432         timo = howmany((int64_t)msecs * hz, 1000L);
433
434         if (timo > 0) {
435                 if (!(fp->fp_flags & FAIL_POINT_USE_TIMEOUT_PATH)) {
436                         if (fp->fp_pre_sleep_fn)
437                                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
438
439                         tsleep(FP_SLEEP_CHANNEL(fp), PWAIT, "failpt", timo);
440
441                         if (fp->fp_post_sleep_fn)
442                                 fp->fp_post_sleep_fn(fp->fp_post_sleep_arg);
443                 } else {
444                         if (fp->fp_pre_sleep_fn)
445                                 fp->fp_pre_sleep_fn(fp->fp_pre_sleep_arg);
446
447                         callout_reset(fp->fp_callout, timo,
448                             fp->fp_post_sleep_fn, fp->fp_post_sleep_arg);
449                         *pret = FAIL_POINT_RC_QUEUED;
450                 }
451         }
452 }
453
454 static char *parse_fail_point(struct fail_point_setting *, char *);
455 static char *parse_term(struct fail_point_setting *, char *);
456 static char *parse_number(int *out_units, int *out_decimal, char *);
457 static char *parse_type(struct fail_point_entry *, char *);
458
459 /**
460  * Initialize a fail_point.  The name is formed in a printf-like fashion
461  * from "fmt" and subsequent arguments.  This function is generally used
462  * for custom failpoints located at odd places in the sysctl tree, and is
463  * not explicitly needed for standard in-line-declared failpoints.
464  *
465  * @ingroup failpoint
466  */
467 void
468 fail_point_init(struct fail_point *fp, const char *fmt, ...)
469 {
470         va_list ap;
471         char *name;
472         int n;
473
474         fp->fp_setting = NULL;
475         fp->fp_flags = 0;
476
477         /* Figure out the size of the name. */
478         va_start(ap, fmt);
479         n = vsnprintf(NULL, 0, fmt, ap);
480         va_end(ap);
481
482         /* Allocate the name and fill it in. */
483         name = fp_malloc(n + 1, M_WAITOK);
484         if (name != NULL) {
485                 va_start(ap, fmt);
486                 vsnprintf(name, n + 1, fmt, ap);
487                 va_end(ap);
488         }
489         fp->fp_name = name;
490         fp->fp_location = "";
491         fp->fp_flags |= FAIL_POINT_DYNAMIC_NAME;
492         fp->fp_pre_sleep_fn = NULL;
493         fp->fp_pre_sleep_arg = NULL;
494         fp->fp_post_sleep_fn = NULL;
495         fp->fp_post_sleep_arg = NULL;
496 }
497
498 void
499 fail_point_alloc_callout(struct fail_point *fp)
500 {
501
502         /**
503          * This assumes that calls to fail_point_use_timeout_path()
504          * will not race.
505          */
506         if (fp->fp_callout != NULL)
507                 return;
508         fp->fp_callout = fp_malloc(sizeof(*fp->fp_callout), M_WAITOK);
509         callout_init(fp->fp_callout, CALLOUT_MPSAFE);
510 }
511
512 /**
513  * Free the resources held by a fail_point, and wake any paused threads.
514  * Thou shalt not allow threads to hit this fail point after you enter this
515  * function, nor shall you call this multiple times for a given fp.
516  * @ingroup failpoint
517  */
518 void
519 fail_point_destroy(struct fail_point *fp)
520 {
521
522         fail_point_drain(fp, 0);
523
524         if ((fp->fp_flags & FAIL_POINT_DYNAMIC_NAME) != 0) {
525                 fp_free(__DECONST(void *, fp->fp_name));
526                 fp->fp_name = NULL;
527         }
528         fp->fp_flags = 0;
529         if (fp->fp_callout) {
530                 fp_free(fp->fp_callout);
531                 fp->fp_callout = NULL;
532         }
533
534         sx_xlock(&sx_fp_set);
535         fail_point_garbage_collect();
536         sx_xunlock(&sx_fp_set);
537 }
538
539 /**
540  * This does the real work of evaluating a fail point. If the fail point tells
541  * us to return a value, this function returns 1 and fills in 'return_value'
542  * (return_value is allowed to be null). If the fail point tells us to panic,
543  * we never return. Otherwise we just return 0 after doing some work, which
544  * means "keep going".
545  */
546 enum fail_point_return_code
547 fail_point_eval_nontrivial(struct fail_point *fp, int *return_value)
548 {
549         bool execute = false;
550         struct fail_point_entry *ent;
551         struct fail_point_setting *fp_setting;
552         enum fail_point_return_code ret;
553         int cont;
554         int count;
555         int msecs;
556         int usecs;
557
558         ret = FAIL_POINT_RC_CONTINUE;
559         cont = 0; /* don't continue by default */
560
561         fp_setting = fail_point_setting_get_ref(fp);
562         if (fp_setting == NULL)
563                 goto abort;
564
565         TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
566
567                 if (ent->fe_stale)
568                         continue;
569
570                 if (ent->fe_prob < PROB_MAX &&
571                     ent->fe_prob < random() % PROB_MAX)
572                         continue;
573
574                 if (ent->fe_pid != NO_PID && ent->fe_pid != curproc->p_pid)
575                         continue;
576
577                 if (ent->fe_count != FE_COUNT_UNTRACKED) {
578                         count = ent->fe_count;
579                         while (count > 0) {
580                                 if (atomic_cmpset_32(&ent->fe_count, count, count - 1)) {
581                                         count--;
582                                         execute = true;
583                                         break;
584                                 }
585                                 count = ent->fe_count;
586                         }
587                         if (execute == false)
588                                 /* We lost the race; consider the entry stale and bail now */
589                                 continue;
590                         if (count == 0)
591                                 ent->fe_stale = true;
592                 }
593
594                 switch (ent->fe_type) {
595                 case FAIL_POINT_PANIC:
596                         panic("fail point %s panicking", fp->fp_name);
597                         /* NOTREACHED */
598
599                 case FAIL_POINT_RETURN:
600                         if (return_value != NULL)
601                                 *return_value = ent->fe_arg;
602                         ret = FAIL_POINT_RC_RETURN;
603                         break;
604
605                 case FAIL_POINT_BREAK:
606                         printf("fail point %s breaking to debugger\n",
607                                 fp->fp_name);
608                         breakpoint();
609                         break;
610
611                 case FAIL_POINT_PRINT:
612                         printf("fail point %s executing\n", fp->fp_name);
613                         cont = ent->fe_arg;
614                         break;
615
616                 case FAIL_POINT_SLEEP:
617                         msecs = ent->fe_arg;
618                         if (msecs)
619                                 fail_point_sleep(fp, msecs, &ret);
620                         break;
621
622                 case FAIL_POINT_PAUSE:
623                         /**
624                          * Pausing is inherently strange with multiple
625                          * entries given our design.  That is because some
626                          * entries could be unreachable, for instance in cases like:
627                          * pause->return. We can never reach the return entry.
628                          * The sysctl layer actually truncates all entries after
629                          * a pause for this reason.
630                          */
631                         mtx_lock_spin(&fp_setting->feq_mtx);
632                         fail_point_pause(fp, &ret, &fp_setting->feq_mtx);
633                         mtx_unlock_spin(&fp_setting->feq_mtx);
634                         break;
635
636                 case FAIL_POINT_YIELD:
637                         kern_yield(PRI_UNCHANGED);
638                         break;
639
640                 case FAIL_POINT_DELAY:
641                         usecs = ent->fe_arg;
642                         DELAY(usecs);
643                         break;
644
645                 default:
646                         break;
647                 }
648
649                 if (cont == 0)
650                         break;
651         }
652
653         if (fail_point_is_off(fp))
654                 fail_point_eval_swap_out(fp, fp_setting);
655
656 abort:
657         fail_point_setting_release_ref(fp);
658
659         return (ret);
660 }
661
662 /**
663  * Translate internal fail_point structure into human-readable text.
664  */
665 static void
666 fail_point_get(struct fail_point *fp, struct sbuf *sb,
667         bool verbose)
668 {
669         struct fail_point_entry *ent;
670         struct fail_point_setting *fp_setting;
671         struct fail_point_entry *fp_entry_cpy;
672         int cnt_sleeping;
673         int idx;
674         int printed_entry_count;
675
676         cnt_sleeping = 0;
677         idx = 0;
678         printed_entry_count = 0;
679
680         fp_entry_cpy = fp_malloc(sizeof(struct fail_point_entry) *
681                 (FP_MAX_ENTRY_COUNT + 1), M_WAITOK);
682
683         fp_setting = fail_point_setting_get_ref(fp);
684
685         if (fp_setting != NULL) {
686                 TAILQ_FOREACH(ent, &fp_setting->fp_entry_queue, fe_entries) {
687                         if (ent->fe_stale)
688                                 continue;
689
690                         KASSERT(printed_entry_count < FP_MAX_ENTRY_COUNT,
691                                 ("FP entry list larger than allowed"));
692
693                         fp_entry_cpy[printed_entry_count] = *ent;
694                         ++printed_entry_count;
695                 }
696         }
697         fail_point_setting_release_ref(fp);
698
699         /* This is our equivalent of a NULL terminator */
700         fp_entry_cpy[printed_entry_count].fe_type = FAIL_POINT_INVALID;
701
702         while (idx < printed_entry_count) {
703                 ent = &fp_entry_cpy[idx];
704                 ++idx;
705                 if (ent->fe_prob < PROB_MAX) {
706                         int decimal = ent->fe_prob % (PROB_MAX / 100);
707                         int units = ent->fe_prob / (PROB_MAX / 100);
708                         sbuf_printf(sb, "%d", units);
709                         if (decimal) {
710                                 int digits = PROB_DIGITS - 2;
711                                 while (!(decimal % 10)) {
712                                         digits--;
713                                         decimal /= 10;
714                                 }
715                                 sbuf_printf(sb, ".%0*d", digits, decimal);
716                         }
717                         sbuf_printf(sb, "%%");
718                 }
719                 if (ent->fe_count >= 0)
720                         sbuf_printf(sb, "%d*", ent->fe_count);
721                 sbuf_printf(sb, "%s", fail_type_strings[ent->fe_type].name);
722                 if (ent->fe_arg)
723                         sbuf_printf(sb, "(%d)", ent->fe_arg);
724                 if (ent->fe_pid != NO_PID)
725                         sbuf_printf(sb, "[pid %d]", ent->fe_pid);
726                 if (TAILQ_NEXT(ent, fe_entries))
727                         sbuf_printf(sb, "->");
728         }
729         if (!printed_entry_count)
730                 sbuf_printf(sb, "off");
731
732         fp_free(fp_entry_cpy);
733         if (verbose) {
734 #ifdef STACK
735                 /* Print number of sleeping threads. queue=0 is the argument
736                  * used by msleep when sending our threads to sleep. */
737                 sbuf_printf(sb, "\nsleeping_thread_stacks = {\n");
738                 sleepq_sbuf_print_stacks(sb, FP_SLEEP_CHANNEL(fp), 0,
739                         &cnt_sleeping);
740
741                 sbuf_printf(sb, "},\n");
742 #endif
743                 sbuf_printf(sb, "sleeping_thread_count = %d,\n",
744                         cnt_sleeping);
745
746 #ifdef STACK
747                 sbuf_printf(sb, "paused_thread_stacks = {\n");
748                 sleepq_sbuf_print_stacks(sb, FP_PAUSE_CHANNEL(fp), 0,
749                         &cnt_sleeping);
750
751                 sbuf_printf(sb, "},\n");
752 #endif
753                 sbuf_printf(sb, "paused_thread_count = %d\n",
754                         cnt_sleeping);
755         }
756 }
757
758 /**
759  * Set an internal fail_point structure from a human-readable failpoint string
760  * in a lock-safe manner.
761  */
762 static int
763 fail_point_set(struct fail_point *fp, char *buf)
764 {
765         struct fail_point_entry *ent, *ent_next;
766         struct fail_point_setting *entries;
767         bool should_wake_paused;
768         bool should_truncate;
769         int error;
770
771         error = 0;
772         should_wake_paused = false;
773         should_truncate = false;
774
775         /* Parse new entries. */
776         /**
777          * ref protects our new malloc'd stuff from being garbage collected
778          * before we link it.
779          */
780         fail_point_setting_get_ref(fp);
781         entries = fail_point_setting_new(fp);
782         if (parse_fail_point(entries, buf) == NULL) {
783                 STAILQ_REMOVE(&fp_setting_garbage, entries,
784                         fail_point_setting, fs_garbage_link);
785                 fail_point_setting_destroy(entries);
786                 error = EINVAL;
787                 goto end;
788         }
789
790         /**
791          * Transfer the entries we are going to keep to a new list.
792          * Get rid of useless zero probability entries, and entries with hit
793          * count 0.
794          * If 'off' is present, and it has no hit count set, then all entries
795          *       after it are discarded since they are unreachable.
796          */
797         TAILQ_FOREACH_SAFE(ent, &entries->fp_entry_queue, fe_entries, ent_next) {
798                 if (ent->fe_prob == 0 || ent->fe_count == 0) {
799                         printf("Discarding entry which cannot execute %s\n",
800                                 fail_type_strings[ent->fe_type].name);
801                         TAILQ_REMOVE(&entries->fp_entry_queue, ent,
802                                 fe_entries);
803                         fp_free(ent);
804                         continue;
805                 } else if (should_truncate) {
806                         printf("Discarding unreachable entry %s\n",
807                                 fail_type_strings[ent->fe_type].name);
808                         TAILQ_REMOVE(&entries->fp_entry_queue, ent,
809                                 fe_entries);
810                         fp_free(ent);
811                         continue;
812                 }
813
814                 if (ent->fe_type == FAIL_POINT_OFF) {
815                         should_wake_paused = true;
816                         if (ent->fe_count == FE_COUNT_UNTRACKED) {
817                                 should_truncate = true;
818                                 TAILQ_REMOVE(&entries->fp_entry_queue, ent,
819                                         fe_entries);
820                                 fp_free(ent);
821                         }
822                 } else if (ent->fe_type == FAIL_POINT_PAUSE) {
823                         should_truncate = true;
824                 } else if (ent->fe_type == FAIL_POINT_SLEEP && (fp->fp_flags &
825                         FAIL_POINT_NONSLEEPABLE)) {
826                         /**
827                          * If this fail point is annotated as being in a
828                          * non-sleepable ctx, convert sleep to delay and
829                          * convert the msec argument to usecs.
830                          */
831                         printf("Sleep call request on fail point in "
832                                 "non-sleepable context; using delay instead "
833                                 "of sleep\n");
834                         ent->fe_type = FAIL_POINT_DELAY;
835                         ent->fe_arg *= 1000;
836                 }
837         }
838
839         if (TAILQ_EMPTY(&entries->fp_entry_queue)) {
840                 entries = fail_point_swap_settings(fp, NULL);
841                 if (entries != NULL)
842                         wakeup(FP_PAUSE_CHANNEL(fp));
843         } else {
844                 if (should_wake_paused)
845                         wakeup(FP_PAUSE_CHANNEL(fp));
846                 fail_point_swap_settings(fp, entries);
847         }
848
849 end:
850 #ifdef IWARNING
851         if (error)
852                 IWARNING("Failed to set %s %s to %s",
853                     fp->fp_name, fp->fp_location, buf);
854         else
855                 INOTICE("Set %s %s to %s",
856                     fp->fp_name, fp->fp_location, buf);
857 #endif /* IWARNING */
858
859         fail_point_setting_release_ref(fp);
860         return (error);
861 }
862
863 #define MAX_FAIL_POINT_BUF      1023
864
865 /**
866  * Handle kernel failpoint set/get.
867  */
868 int
869 fail_point_sysctl(SYSCTL_HANDLER_ARGS)
870 {
871         struct fail_point *fp;
872         char *buf;
873         struct sbuf sb, *sb_check;
874         int error;
875
876         buf = NULL;
877         error = 0;
878         fp = arg1;
879
880         sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
881         if (sb_check != &sb)
882                 return (ENOMEM);
883
884         sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
885
886         /* Setting */
887         /**
888          * Lock protects any new entries from being garbage collected before we
889          * can link them to the fail point.
890          */
891         sx_xlock(&sx_fp_set);
892         if (req->newptr) {
893                 if (req->newlen > MAX_FAIL_POINT_BUF) {
894                         error = EINVAL;
895                         goto out;
896                 }
897
898                 buf = fp_malloc(req->newlen + 1, M_WAITOK);
899
900                 error = SYSCTL_IN(req, buf, req->newlen);
901                 if (error)
902                         goto out;
903                 buf[req->newlen] = '\0';
904
905                 error = fail_point_set(fp, buf);
906         }
907
908         fail_point_garbage_collect();
909         sx_xunlock(&sx_fp_set);
910
911         /* Retrieving. */
912         fail_point_get(fp, &sb, false);
913
914 out:
915         sbuf_finish(&sb);
916         sbuf_delete(&sb);
917
918         if (buf)
919                 fp_free(buf);
920
921         return (error);
922 }
923
924 int
925 fail_point_sysctl_status(SYSCTL_HANDLER_ARGS)
926 {
927         struct fail_point *fp;
928         struct sbuf sb, *sb_check;
929
930         fp = arg1;
931
932         sb_check = sbuf_new(&sb, NULL, 1024, SBUF_AUTOEXTEND);
933         if (sb_check != &sb)
934                 return (ENOMEM);
935
936         sbuf_set_drain(&sb, (sbuf_drain_func *)fail_sysctl_drain_func, req);
937
938         /* Retrieving. */
939         fail_point_get(fp, &sb, true);
940
941         sbuf_finish(&sb);
942         sbuf_delete(&sb);
943
944         /**
945          * Lock protects any new entries from being garbage collected before we
946          * can link them to the fail point.
947          */
948         sx_xlock(&sx_fp_set);
949         fail_point_garbage_collect();
950         sx_xunlock(&sx_fp_set);
951
952         return (0);
953 }
954
955 int
956 fail_sysctl_drain_func(void *sysctl_args, const char *buf, int len)
957 {
958         struct sysctl_req *sa;
959         int error;
960
961         sa = sysctl_args;
962
963         error = SYSCTL_OUT(sa, buf, len);
964
965         if (error == ENOMEM)
966                 return (-1);
967         else
968                 return (len);
969 }
970
971 /**
972  * Internal helper function to translate a human-readable failpoint string
973  * into a internally-parsable fail_point structure.
974  */
975 static char *
976 parse_fail_point(struct fail_point_setting *ents, char *p)
977 {
978         /*  <fail_point> ::
979          *      <term> ( "->" <term> )*
980          */
981         uint8_t term_count;
982
983         term_count = 1;
984
985         p = parse_term(ents, p);
986         if (p == NULL)
987                 return (NULL);
988
989         while (*p != '\0') {
990                 term_count++;
991                 if (p[0] != '-' || p[1] != '>' ||
992                         (p = parse_term(ents, p+2)) == NULL ||
993                         term_count > FP_MAX_ENTRY_COUNT)
994                         return (NULL);
995         }
996         return (p);
997 }
998
999 /**
1000  * Internal helper function to parse an individual term from a failpoint.
1001  */
1002 static char *
1003 parse_term(struct fail_point_setting *ents, char *p)
1004 {
1005         struct fail_point_entry *ent;
1006
1007         ent = fail_point_entry_new(ents);
1008
1009         /*
1010          * <term> ::
1011          *     ( (<float> "%") | (<integer> "*" ) )*
1012          *     <type>
1013          *     [ "(" <integer> ")" ]
1014          *     [ "[pid " <integer> "]" ]
1015          */
1016
1017         /* ( (<float> "%") | (<integer> "*" ) )* */
1018         while (isdigit(*p) || *p == '.') {
1019                 int units, decimal;
1020
1021                 p = parse_number(&units, &decimal, p);
1022                 if (p == NULL)
1023                         return (NULL);
1024
1025                 if (*p == '%') {
1026                         if (units > 100) /* prevent overflow early */
1027                                 units = 100;
1028                         ent->fe_prob = units * (PROB_MAX / 100) + decimal;
1029                         if (ent->fe_prob > PROB_MAX)
1030                                 ent->fe_prob = PROB_MAX;
1031                 } else if (*p == '*') {
1032                         if (!units || units < 0 || decimal)
1033                                 return (NULL);
1034                         ent->fe_count = units;
1035                 } else
1036                         return (NULL);
1037                 p++;
1038         }
1039
1040         /* <type> */
1041         p = parse_type(ent, p);
1042         if (p == NULL)
1043                 return (NULL);
1044         if (*p == '\0')
1045                 return (p);
1046
1047         /* [ "(" <integer> ")" ] */
1048         if (*p != '(')
1049                 return (p);
1050         p++;
1051         if (!isdigit(*p) && *p != '-')
1052                 return (NULL);
1053         ent->fe_arg = strtol(p, &p, 0);
1054         if (*p++ != ')')
1055                 return (NULL);
1056
1057         /* [ "[pid " <integer> "]" ] */
1058 #define PID_STRING "[pid "
1059         if (strncmp(p, PID_STRING, sizeof(PID_STRING) - 1) != 0)
1060                 return (p);
1061         p += sizeof(PID_STRING) - 1;
1062         if (!isdigit(*p))
1063                 return (NULL);
1064         ent->fe_pid = strtol(p, &p, 0);
1065         if (*p++ != ']')
1066                 return (NULL);
1067
1068         return (p);
1069 }
1070
1071 /**
1072  * Internal helper function to parse a numeric for a failpoint term.
1073  */
1074 static char *
1075 parse_number(int *out_units, int *out_decimal, char *p)
1076 {
1077         char *old_p;
1078
1079         /**
1080          *  <number> ::
1081          *      <integer> [ "." <integer> ] |
1082          *      "." <integer>
1083          */
1084
1085         /* whole part */
1086         old_p = p;
1087         *out_units = strtol(p, &p, 10);
1088         if (p == old_p && *p != '.')
1089                 return (NULL);
1090
1091         /* fractional part */
1092         *out_decimal = 0;
1093         if (*p == '.') {
1094                 int digits = 0;
1095                 p++;
1096                 while (isdigit(*p)) {
1097                         int digit = *p - '0';
1098                         if (digits < PROB_DIGITS - 2)
1099                                 *out_decimal = *out_decimal * 10 + digit;
1100                         else if (digits == PROB_DIGITS - 2 && digit >= 5)
1101                                 (*out_decimal)++;
1102                         digits++;
1103                         p++;
1104                 }
1105                 if (!digits) /* need at least one digit after '.' */
1106                         return (NULL);
1107                 while (digits++ < PROB_DIGITS - 2) /* add implicit zeros */
1108                         *out_decimal *= 10;
1109         }
1110
1111         return (p); /* success */
1112 }
1113
1114 /**
1115  * Internal helper function to parse an individual type for a failpoint term.
1116  */
1117 static char *
1118 parse_type(struct fail_point_entry *ent, char *beg)
1119 {
1120         enum fail_point_t type;
1121         int len;
1122
1123         for (type = FAIL_POINT_OFF; type < FAIL_POINT_NUMTYPES; type++) {
1124                 len = fail_type_strings[type].nmlen;
1125                 if (strncmp(fail_type_strings[type].name, beg, len) == 0) {
1126                         ent->fe_type = type;
1127                         return (beg + len);
1128                 }
1129         }
1130         return (NULL);
1131 }
1132
1133 /* The fail point sysctl tree. */
1134 SYSCTL_NODE(_debug, OID_AUTO, fail_point, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1135     "fail points");
1136
1137 /* Debugging/testing stuff for fail point */
1138 static int
1139 sysctl_test_fail_point(SYSCTL_HANDLER_ARGS)
1140 {
1141
1142         KFAIL_POINT_RETURN(DEBUG_FP, test_fail_point);
1143         return (0);
1144 }
1145 SYSCTL_OID(_debug_fail_point, OID_AUTO, test_trigger_fail_point,
1146     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1147     sysctl_test_fail_point, "A",
1148     "Trigger test fail points");