4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Fault Management Architecture (FMA) Resource and Protocol Support
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
31 * Name-Value Pair Lists
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t). FMA-specific nvlist constructor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
40 * Protocol Event and FMRI Construction
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
54 #include <sys/types.h>
57 #include <sys/nvpair.h>
58 #include <sys/cmn_err.h>
59 #include <sys/sysmacros.h>
60 #include <sys/sunddi.h>
61 #include <sys/systeminfo.h>
62 #include <sys/fm/util.h>
63 #include <sys/fm/protocol.h>
64 #include <sys/kstat.h>
65 #include <sys/zfs_context.h>
67 #include <sys/atomic.h>
68 #include <sys/condvar.h>
69 #include <sys/console.h>
71 #include <sys/zfs_ioctl.h>
73 int zfs_zevent_len_max = 0;
74 int zfs_zevent_cols = 80;
75 int zfs_zevent_console = 0;
77 static int zevent_len_cur = 0;
78 static int zevent_waiters = 0;
79 static int zevent_flags = 0;
81 /* Num events rate limited since the last time zfs_zevent_next() was called */
82 static uint64_t ratelimit_dropped = 0;
85 * The EID (Event IDentifier) is used to uniquely tag a zevent when it is
86 * posted. The posted EIDs are monotonically increasing but not persistent.
87 * They will be reset to the initial value (1) each time the kernel module is
90 static uint64_t zevent_eid = 0;
92 static kmutex_t zevent_lock;
93 static list_t zevent_list;
94 static kcondvar_t zevent_cv;
99 * Common fault management kstats to record event generation failures
103 kstat_named_t erpt_dropped; /* num erpts dropped on post */
104 kstat_named_t erpt_set_failed; /* num erpt set failures */
105 kstat_named_t fmri_set_failed; /* num fmri set failures */
106 kstat_named_t payload_set_failed; /* num payload set failures */
107 kstat_named_t erpt_duplicates; /* num duplicate erpts */
110 static struct erpt_kstat erpt_kstat_data = {
111 { "erpt-dropped", KSTAT_DATA_UINT64 },
112 { "erpt-set-failed", KSTAT_DATA_UINT64 },
113 { "fmri-set-failed", KSTAT_DATA_UINT64 },
114 { "payload-set-failed", KSTAT_DATA_UINT64 },
115 { "erpt-duplicates", KSTAT_DATA_UINT64 }
123 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
124 * output so they aren't split across console lines, and return the end column.
128 fm_printf(int depth, int c, int cols, const char *format, ...)
134 va_start(ap, format);
135 width = vsnprintf(&c1, sizeof (c1), format, ap);
138 if (c + width >= cols) {
139 console_printf("\n");
141 if (format[0] != ' ' && depth > 0) {
147 va_start(ap, format);
148 console_vprintf(format, ap);
151 return ((c + width) % cols);
155 * Recursively print an nvlist in the specified column width and return the
156 * column we end up in. This function is called recursively by fm_nvprint(),
157 * below. We generically format the entire nvpair using hexadecimal
158 * integers and strings, and elide any integer arrays. Arrays are basically
159 * used for cache dumps right now, so we suppress them so as not to overwhelm
160 * the amount of console output we produce at panic time. This can be further
161 * enhanced as FMA technology grows based upon the needs of consumers. All
162 * FMA telemetry is logged using the dump device transport, so the console
163 * output serves only as a fallback in case this procedure is unsuccessful.
166 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
170 for (nvp = nvlist_next_nvpair(nvl, NULL);
171 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
173 data_type_t type = nvpair_type(nvp);
174 const char *name = nvpair_name(nvp);
184 if (strcmp(name, FM_CLASS) == 0)
185 continue; /* already printed by caller */
187 c = fm_printf(d, c, cols, " %s=", name);
190 case DATA_TYPE_BOOLEAN:
191 c = fm_printf(d + 1, c, cols, " 1");
194 case DATA_TYPE_BOOLEAN_VALUE:
195 (void) nvpair_value_boolean_value(nvp, &b);
196 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
200 (void) nvpair_value_byte(nvp, &i8);
201 c = fm_printf(d + 1, c, cols, "0x%x", i8);
205 (void) nvpair_value_int8(nvp, (void *)&i8);
206 c = fm_printf(d + 1, c, cols, "0x%x", i8);
209 case DATA_TYPE_UINT8:
210 (void) nvpair_value_uint8(nvp, &i8);
211 c = fm_printf(d + 1, c, cols, "0x%x", i8);
214 case DATA_TYPE_INT16:
215 (void) nvpair_value_int16(nvp, (void *)&i16);
216 c = fm_printf(d + 1, c, cols, "0x%x", i16);
219 case DATA_TYPE_UINT16:
220 (void) nvpair_value_uint16(nvp, &i16);
221 c = fm_printf(d + 1, c, cols, "0x%x", i16);
224 case DATA_TYPE_INT32:
225 (void) nvpair_value_int32(nvp, (void *)&i32);
226 c = fm_printf(d + 1, c, cols, "0x%x", i32);
229 case DATA_TYPE_UINT32:
230 (void) nvpair_value_uint32(nvp, &i32);
231 c = fm_printf(d + 1, c, cols, "0x%x", i32);
234 case DATA_TYPE_INT64:
235 (void) nvpair_value_int64(nvp, (void *)&i64);
236 c = fm_printf(d + 1, c, cols, "0x%llx",
240 case DATA_TYPE_UINT64:
241 (void) nvpair_value_uint64(nvp, &i64);
242 c = fm_printf(d + 1, c, cols, "0x%llx",
246 case DATA_TYPE_HRTIME:
247 (void) nvpair_value_hrtime(nvp, (void *)&i64);
248 c = fm_printf(d + 1, c, cols, "0x%llx",
252 case DATA_TYPE_STRING:
253 (void) nvpair_value_string(nvp, &str);
254 c = fm_printf(d + 1, c, cols, "\"%s\"",
255 str ? str : "<NULL>");
258 case DATA_TYPE_NVLIST:
259 c = fm_printf(d + 1, c, cols, "[");
260 (void) nvpair_value_nvlist(nvp, &cnv);
261 c = fm_nvprintr(cnv, d + 1, c, cols);
262 c = fm_printf(d + 1, c, cols, " ]");
265 case DATA_TYPE_NVLIST_ARRAY: {
269 c = fm_printf(d + 1, c, cols, "[");
270 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
271 for (i = 0; i < nelem; i++) {
272 c = fm_nvprintr(val[i], d + 1, c, cols);
274 c = fm_printf(d + 1, c, cols, " ]");
278 case DATA_TYPE_INT8_ARRAY: {
282 c = fm_printf(d + 1, c, cols, "[ ");
283 (void) nvpair_value_int8_array(nvp, &val, &nelem);
284 for (i = 0; i < nelem; i++)
285 c = fm_printf(d + 1, c, cols, "0x%llx ",
286 (u_longlong_t)val[i]);
288 c = fm_printf(d + 1, c, cols, "]");
292 case DATA_TYPE_UINT8_ARRAY: {
296 c = fm_printf(d + 1, c, cols, "[ ");
297 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
298 for (i = 0; i < nelem; i++)
299 c = fm_printf(d + 1, c, cols, "0x%llx ",
300 (u_longlong_t)val[i]);
302 c = fm_printf(d + 1, c, cols, "]");
306 case DATA_TYPE_INT16_ARRAY: {
310 c = fm_printf(d + 1, c, cols, "[ ");
311 (void) nvpair_value_int16_array(nvp, &val, &nelem);
312 for (i = 0; i < nelem; i++)
313 c = fm_printf(d + 1, c, cols, "0x%llx ",
314 (u_longlong_t)val[i]);
316 c = fm_printf(d + 1, c, cols, "]");
320 case DATA_TYPE_UINT16_ARRAY: {
324 c = fm_printf(d + 1, c, cols, "[ ");
325 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
326 for (i = 0; i < nelem; i++)
327 c = fm_printf(d + 1, c, cols, "0x%llx ",
328 (u_longlong_t)val[i]);
330 c = fm_printf(d + 1, c, cols, "]");
334 case DATA_TYPE_INT32_ARRAY: {
338 c = fm_printf(d + 1, c, cols, "[ ");
339 (void) nvpair_value_int32_array(nvp, &val, &nelem);
340 for (i = 0; i < nelem; i++)
341 c = fm_printf(d + 1, c, cols, "0x%llx ",
342 (u_longlong_t)val[i]);
344 c = fm_printf(d + 1, c, cols, "]");
348 case DATA_TYPE_UINT32_ARRAY: {
352 c = fm_printf(d + 1, c, cols, "[ ");
353 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
354 for (i = 0; i < nelem; i++)
355 c = fm_printf(d + 1, c, cols, "0x%llx ",
356 (u_longlong_t)val[i]);
358 c = fm_printf(d + 1, c, cols, "]");
362 case DATA_TYPE_INT64_ARRAY: {
366 c = fm_printf(d + 1, c, cols, "[ ");
367 (void) nvpair_value_int64_array(nvp, &val, &nelem);
368 for (i = 0; i < nelem; i++)
369 c = fm_printf(d + 1, c, cols, "0x%llx ",
370 (u_longlong_t)val[i]);
372 c = fm_printf(d + 1, c, cols, "]");
376 case DATA_TYPE_UINT64_ARRAY: {
380 c = fm_printf(d + 1, c, cols, "[ ");
381 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
382 for (i = 0; i < nelem; i++)
383 c = fm_printf(d + 1, c, cols, "0x%llx ",
384 (u_longlong_t)val[i]);
386 c = fm_printf(d + 1, c, cols, "]");
390 case DATA_TYPE_STRING_ARRAY:
391 case DATA_TYPE_BOOLEAN_ARRAY:
392 case DATA_TYPE_BYTE_ARRAY:
393 c = fm_printf(d + 1, c, cols, "[...]");
396 case DATA_TYPE_UNKNOWN:
397 case DATA_TYPE_DONTCARE:
398 c = fm_printf(d + 1, c, cols, "<unknown>");
407 fm_nvprint(nvlist_t *nvl)
412 console_printf("\n");
414 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
415 c = fm_printf(0, c, zfs_zevent_cols, "%s", class);
417 if (fm_nvprintr(nvl, 0, c, zfs_zevent_cols) != 0)
418 console_printf("\n");
420 console_printf("\n");
424 zfs_zevent_alloc(void)
428 ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
430 list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
431 offsetof(zfs_zevent_t, ze_node));
432 list_link_init(&ev->ev_node);
438 zfs_zevent_free(zevent_t *ev)
440 /* Run provided cleanup callback */
441 ev->ev_cb(ev->ev_nvl, ev->ev_detector);
443 list_destroy(&ev->ev_ze_list);
444 kmem_free(ev, sizeof (zevent_t));
448 zfs_zevent_drain(zevent_t *ev)
452 ASSERT(MUTEX_HELD(&zevent_lock));
453 list_remove(&zevent_list, ev);
455 /* Remove references to this event in all private file data */
456 while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
457 list_remove(&ev->ev_ze_list, ze);
458 ze->ze_zevent = NULL;
466 zfs_zevent_drain_all(int *count)
470 mutex_enter(&zevent_lock);
471 while ((ev = list_head(&zevent_list)) != NULL)
472 zfs_zevent_drain(ev);
474 *count = zevent_len_cur;
476 mutex_exit(&zevent_lock);
480 * New zevents are inserted at the head. If the maximum queue
481 * length is exceeded a zevent will be drained from the tail.
482 * As part of this any user space processes which currently have
483 * a reference to this zevent_t in their private data will have
484 * this reference set to NULL.
487 zfs_zevent_insert(zevent_t *ev)
489 ASSERT(MUTEX_HELD(&zevent_lock));
490 list_insert_head(&zevent_list, ev);
492 if (zevent_len_cur >= zfs_zevent_len_max)
493 zfs_zevent_drain(list_tail(&zevent_list));
499 * Post a zevent. The cb will be called when nvl and detector are no longer
501 * - An error happened and a zevent can't be posted. In this case, cb is called
502 * before zfs_zevent_post() returns.
503 * - The event is being drained and freed.
506 zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
518 tv_array[0] = tv.tv_sec;
519 tv_array[1] = tv.tv_nsec;
521 error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
523 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
527 eid = atomic_inc_64_nv(&zevent_eid);
528 error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
530 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
534 error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
536 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
540 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
541 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
546 if (zfs_zevent_console)
549 ev = zfs_zevent_alloc();
551 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
557 ev->ev_detector = detector;
561 mutex_enter(&zevent_lock);
562 zfs_zevent_insert(ev);
563 cv_broadcast(&zevent_cv);
564 mutex_exit(&zevent_lock);
574 zfs_zevent_track_duplicate(void)
576 atomic_inc_64(&erpt_kstat_data.erpt_duplicates.value.ui64);
580 zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
582 *ze = zfsdev_get_state(minor, ZST_ZEVENT);
584 return (SET_ERROR(EBADF));
590 zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
594 error = zfsdev_getminor(fd, minorp);
596 error = zfs_zevent_minor_to_state(*minorp, ze);
599 zfs_zevent_fd_rele(fd);
605 zfs_zevent_fd_rele(int fd)
611 * Get the next zevent in the stream and place a copy in 'event'. This
612 * may fail with ENOMEM if the encoded nvlist size exceeds the passed
613 * 'event_size'. In this case the stream pointer is not advanced and
614 * and 'event_size' is set to the minimum required buffer size.
617 zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
624 mutex_enter(&zevent_lock);
625 if (ze->ze_zevent == NULL) {
626 /* New stream start at the beginning/tail */
627 ev = list_tail(&zevent_list);
634 * Existing stream continue with the next element and remove
635 * ourselves from the wait queue for the previous element
637 ev = list_prev(&zevent_list, ze->ze_zevent);
644 VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
645 if (size > *event_size) {
652 list_remove(&ze->ze_zevent->ev_ze_list, ze);
655 list_insert_head(&ev->ev_ze_list, ze);
656 (void) nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
657 *dropped = ze->ze_dropped;
660 /* Include events dropped due to rate limiting */
661 *dropped += ratelimit_dropped;
662 ratelimit_dropped = 0;
666 mutex_exit(&zevent_lock);
672 * Wait in an interruptible state for any new events.
675 zfs_zevent_wait(zfs_zevent_t *ze)
679 mutex_enter(&zevent_lock);
682 while (error == EAGAIN) {
683 if (zevent_flags & ZEVENT_SHUTDOWN) {
684 error = SET_ERROR(ESHUTDOWN);
688 error = cv_wait_sig(&zevent_cv, &zevent_lock);
689 if (signal_pending(current)) {
690 error = SET_ERROR(EINTR);
692 } else if (!list_is_empty(&zevent_list)) {
701 mutex_exit(&zevent_lock);
707 * The caller may seek to a specific EID by passing that EID. If the EID
708 * is still available in the posted list of events the cursor is positioned
709 * there. Otherwise ENOENT is returned and the cursor is not moved.
711 * There are two reserved EIDs which may be passed and will never fail.
712 * ZEVENT_SEEK_START positions the cursor at the start of the list, and
713 * ZEVENT_SEEK_END positions the cursor at the end of the list.
716 zfs_zevent_seek(zfs_zevent_t *ze, uint64_t eid)
721 mutex_enter(&zevent_lock);
723 if (eid == ZEVENT_SEEK_START) {
725 list_remove(&ze->ze_zevent->ev_ze_list, ze);
727 ze->ze_zevent = NULL;
731 if (eid == ZEVENT_SEEK_END) {
733 list_remove(&ze->ze_zevent->ev_ze_list, ze);
735 ev = list_head(&zevent_list);
738 list_insert_head(&ev->ev_ze_list, ze);
740 ze->ze_zevent = NULL;
746 for (ev = list_tail(&zevent_list); ev != NULL;
747 ev = list_prev(&zevent_list, ev)) {
748 if (ev->ev_eid == eid) {
750 list_remove(&ze->ze_zevent->ev_ze_list, ze);
753 list_insert_head(&ev->ev_ze_list, ze);
762 mutex_exit(&zevent_lock);
768 zfs_zevent_init(zfs_zevent_t **zep)
772 ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
773 list_link_init(&ze->ze_node);
777 zfs_zevent_destroy(zfs_zevent_t *ze)
779 mutex_enter(&zevent_lock);
781 list_remove(&ze->ze_zevent->ev_ze_list, ze);
782 mutex_exit(&zevent_lock);
784 kmem_free(ze, sizeof (zfs_zevent_t));
789 * Wrappers for FM nvlist allocators
793 i_fm_alloc(nv_alloc_t *nva, size_t size)
795 return (kmem_zalloc(size, KM_SLEEP));
800 i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
802 kmem_free(buf, size);
805 const nv_alloc_ops_t fm_mem_alloc_ops = {
808 .nv_ao_alloc = i_fm_alloc,
809 .nv_ao_free = i_fm_free,
814 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
815 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
816 * is returned to indicate that the nv_alloc structure could not be created.
819 fm_nva_xcreate(char *buf, size_t bufsz)
821 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
823 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
824 kmem_free(nvhdl, sizeof (nv_alloc_t));
832 * Destroy a previously allocated nv_alloc structure. The fixed buffer
833 * associated with nva must be freed by the caller.
836 fm_nva_xdestroy(nv_alloc_t *nva)
839 kmem_free(nva, sizeof (nv_alloc_t));
843 * Create a new nv list. A pointer to a new nv list structure is returned
844 * upon success or NULL is returned to indicate that the structure could
845 * not be created. The newly created nv list is created and managed by the
846 * operations installed in nva. If nva is NULL, the default FMA nva
847 * operations are installed and used.
849 * When called from the kernel and nva == NULL, this function must be called
850 * from passive kernel context with no locks held that can prevent a
851 * sleeping memory allocation from occurring. Otherwise, this function may
852 * be called from other kernel contexts as long a valid nva created via
853 * fm_nva_create() is supplied.
856 fm_nvlist_create(nv_alloc_t *nva)
863 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
865 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
866 kmem_free(nvhdl, sizeof (nv_alloc_t));
874 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
876 nv_alloc_fini(nvhdl);
877 kmem_free(nvhdl, sizeof (nv_alloc_t));
886 * Destroy a previously allocated nvlist structure. flag indicates whether
887 * or not the associated nva structure should be freed (FM_NVA_FREE) or
888 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
889 * it to be re-used for future nvlist creation operations.
892 fm_nvlist_destroy(nvlist_t *nvl, int flag)
894 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
899 if (flag == FM_NVA_FREE)
900 fm_nva_xdestroy(nva);
905 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
910 while (ret == 0 && name != NULL) {
911 type = va_arg(ap, data_type_t);
914 ret = nvlist_add_byte(payload, name,
917 case DATA_TYPE_BYTE_ARRAY:
918 nelem = va_arg(ap, int);
919 ret = nvlist_add_byte_array(payload, name,
920 va_arg(ap, uchar_t *), nelem);
922 case DATA_TYPE_BOOLEAN_VALUE:
923 ret = nvlist_add_boolean_value(payload, name,
924 va_arg(ap, boolean_t));
926 case DATA_TYPE_BOOLEAN_ARRAY:
927 nelem = va_arg(ap, int);
928 ret = nvlist_add_boolean_array(payload, name,
929 va_arg(ap, boolean_t *), nelem);
932 ret = nvlist_add_int8(payload, name,
935 case DATA_TYPE_INT8_ARRAY:
936 nelem = va_arg(ap, int);
937 ret = nvlist_add_int8_array(payload, name,
938 va_arg(ap, int8_t *), nelem);
940 case DATA_TYPE_UINT8:
941 ret = nvlist_add_uint8(payload, name,
944 case DATA_TYPE_UINT8_ARRAY:
945 nelem = va_arg(ap, int);
946 ret = nvlist_add_uint8_array(payload, name,
947 va_arg(ap, uint8_t *), nelem);
949 case DATA_TYPE_INT16:
950 ret = nvlist_add_int16(payload, name,
953 case DATA_TYPE_INT16_ARRAY:
954 nelem = va_arg(ap, int);
955 ret = nvlist_add_int16_array(payload, name,
956 va_arg(ap, int16_t *), nelem);
958 case DATA_TYPE_UINT16:
959 ret = nvlist_add_uint16(payload, name,
962 case DATA_TYPE_UINT16_ARRAY:
963 nelem = va_arg(ap, int);
964 ret = nvlist_add_uint16_array(payload, name,
965 va_arg(ap, uint16_t *), nelem);
967 case DATA_TYPE_INT32:
968 ret = nvlist_add_int32(payload, name,
969 va_arg(ap, int32_t));
971 case DATA_TYPE_INT32_ARRAY:
972 nelem = va_arg(ap, int);
973 ret = nvlist_add_int32_array(payload, name,
974 va_arg(ap, int32_t *), nelem);
976 case DATA_TYPE_UINT32:
977 ret = nvlist_add_uint32(payload, name,
978 va_arg(ap, uint32_t));
980 case DATA_TYPE_UINT32_ARRAY:
981 nelem = va_arg(ap, int);
982 ret = nvlist_add_uint32_array(payload, name,
983 va_arg(ap, uint32_t *), nelem);
985 case DATA_TYPE_INT64:
986 ret = nvlist_add_int64(payload, name,
987 va_arg(ap, int64_t));
989 case DATA_TYPE_INT64_ARRAY:
990 nelem = va_arg(ap, int);
991 ret = nvlist_add_int64_array(payload, name,
992 va_arg(ap, int64_t *), nelem);
994 case DATA_TYPE_UINT64:
995 ret = nvlist_add_uint64(payload, name,
996 va_arg(ap, uint64_t));
998 case DATA_TYPE_UINT64_ARRAY:
999 nelem = va_arg(ap, int);
1000 ret = nvlist_add_uint64_array(payload, name,
1001 va_arg(ap, uint64_t *), nelem);
1003 case DATA_TYPE_STRING:
1004 ret = nvlist_add_string(payload, name,
1005 va_arg(ap, char *));
1007 case DATA_TYPE_STRING_ARRAY:
1008 nelem = va_arg(ap, int);
1009 ret = nvlist_add_string_array(payload, name,
1010 va_arg(ap, char **), nelem);
1012 case DATA_TYPE_NVLIST:
1013 ret = nvlist_add_nvlist(payload, name,
1014 va_arg(ap, nvlist_t *));
1016 case DATA_TYPE_NVLIST_ARRAY:
1017 nelem = va_arg(ap, int);
1018 ret = nvlist_add_nvlist_array(payload, name,
1019 va_arg(ap, nvlist_t **), nelem);
1025 name = va_arg(ap, char *);
1031 fm_payload_set(nvlist_t *payload, ...)
1037 va_start(ap, payload);
1038 name = va_arg(ap, char *);
1039 ret = i_fm_payload_set(payload, name, ap);
1043 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
1047 * Set-up and validate the members of an ereport event according to:
1049 * Member name Type Value
1050 * ====================================================
1051 * class string ereport
1053 * ena uint64_t <ena>
1054 * detector nvlist_t <detector>
1055 * ereport-payload nvlist_t <var args>
1057 * We don't actually add a 'version' member to the payload. Really,
1058 * the version quoted to us by our caller is that of the category 1
1059 * "ereport" event class (and we require FM_EREPORT_VERS0) but
1060 * the payload version of the actual leaf class event under construction
1061 * may be something else. Callers should supply a version in the varargs,
1062 * or (better) we could take two version arguments - one for the
1063 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
1064 * for the leaf class.
1067 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
1068 uint64_t ena, const nvlist_t *detector, ...)
1070 char ereport_class[FM_MAX_CLASS];
1075 if (version != FM_EREPORT_VERS0) {
1076 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
1080 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
1081 FM_EREPORT_CLASS, erpt_class);
1082 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
1083 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
1087 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
1088 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
1091 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
1092 (nvlist_t *)detector) != 0) {
1093 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
1096 va_start(ap, detector);
1097 name = va_arg(ap, const char *);
1098 ret = i_fm_payload_set(ereport, name, ap);
1102 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
1106 * Set-up and validate the members of an hc fmri according to;
1108 * Member name Type Value
1109 * ===================================================
1111 * auth nvlist_t <auth>
1112 * hc-name string <name>
1115 * Note that auth and hc-id are optional members.
1118 #define HC_MAXPAIRS 20
1119 #define HC_MAXNAMELEN 50
1122 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
1124 if (version != FM_HC_SCHEME_VERSION) {
1125 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1129 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
1130 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
1131 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1135 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1136 (nvlist_t *)auth) != 0) {
1137 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1145 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1146 nvlist_t *snvl, int npairs, ...)
1148 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1149 nvlist_t *pairs[HC_MAXPAIRS];
1153 if (!fm_fmri_hc_set_common(fmri, version, auth))
1156 npairs = MIN(npairs, HC_MAXPAIRS);
1158 va_start(ap, npairs);
1159 for (i = 0; i < npairs; i++) {
1160 const char *name = va_arg(ap, const char *);
1161 uint32_t id = va_arg(ap, uint32_t);
1164 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1166 pairs[i] = fm_nvlist_create(nva);
1167 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1168 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1170 &erpt_kstat_data.fmri_set_failed.value.ui64);
1175 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
1176 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1178 for (i = 0; i < npairs; i++)
1179 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1182 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1184 &erpt_kstat_data.fmri_set_failed.value.ui64);
1190 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1191 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1193 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1194 nvlist_t *pairs[HC_MAXPAIRS];
1199 char *hcname, *hcid;
1201 if (!fm_fmri_hc_set_common(fmri, version, auth))
1205 * copy the bboard nvpairs to the pairs array
1207 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1209 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1213 for (i = 0; i < n; i++) {
1214 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1217 &erpt_kstat_data.fmri_set_failed.value.ui64);
1220 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1222 &erpt_kstat_data.fmri_set_failed.value.ui64);
1226 pairs[i] = fm_nvlist_create(nva);
1227 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1228 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1229 for (j = 0; j <= i; j++) {
1230 if (pairs[j] != NULL)
1231 fm_nvlist_destroy(pairs[j],
1235 &erpt_kstat_data.fmri_set_failed.value.ui64);
1241 * create the pairs from passed in pairs
1243 npairs = MIN(npairs, HC_MAXPAIRS);
1245 va_start(ap, npairs);
1246 for (i = n; i < npairs + n; i++) {
1247 const char *name = va_arg(ap, const char *);
1248 uint32_t id = va_arg(ap, uint32_t);
1250 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1251 pairs[i] = fm_nvlist_create(nva);
1252 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1253 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1254 for (j = 0; j <= i; j++) {
1255 if (pairs[j] != NULL)
1256 fm_nvlist_destroy(pairs[j],
1260 &erpt_kstat_data.fmri_set_failed.value.ui64);
1267 * Create the fmri hc list
1269 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1271 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1275 for (i = 0; i < npairs + n; i++) {
1276 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1280 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1282 &erpt_kstat_data.fmri_set_failed.value.ui64);
1289 * Set-up and validate the members of an dev fmri according to:
1291 * Member name Type Value
1292 * ====================================================
1294 * auth nvlist_t <auth>
1295 * devpath string <devpath>
1296 * [devid] string <devid>
1297 * [target-port-l0id] string <target-port-lun0-id>
1299 * Note that auth and devid are optional members.
1302 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
1303 const char *devpath, const char *devid, const char *tpl0)
1307 if (version != DEV_SCHEME_VERSION0) {
1308 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1312 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
1313 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
1316 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
1320 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
1323 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
1326 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
1329 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1334 * Set-up and validate the members of an cpu fmri according to:
1336 * Member name Type Value
1337 * ====================================================
1339 * auth nvlist_t <auth>
1340 * cpuid uint32_t <cpu_id>
1341 * cpumask uint8_t <cpu_mask>
1342 * serial uint64_t <serial_id>
1344 * Note that auth, cpumask, serial are optional members.
1348 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1349 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1351 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1353 if (version < CPU_SCHEME_VERSION1) {
1354 atomic_inc_64(failedp);
1358 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1359 atomic_inc_64(failedp);
1363 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1364 FM_FMRI_SCHEME_CPU) != 0) {
1365 atomic_inc_64(failedp);
1369 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1370 (nvlist_t *)auth) != 0)
1371 atomic_inc_64(failedp);
1373 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1374 atomic_inc_64(failedp);
1376 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1378 atomic_inc_64(failedp);
1380 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1381 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1382 atomic_inc_64(failedp);
1386 * Set-up and validate the members of a mem according to:
1388 * Member name Type Value
1389 * ====================================================
1391 * auth nvlist_t <auth> [optional]
1392 * unum string <unum>
1393 * serial string <serial> [optional*]
1394 * offset uint64_t <offset> [optional]
1396 * * serial is required if offset is present
1399 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1400 const char *unum, const char *serial, uint64_t offset)
1402 if (version != MEM_SCHEME_VERSION0) {
1403 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1407 if (!serial && (offset != (uint64_t)-1)) {
1408 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1412 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1413 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1417 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1418 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1423 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1424 (nvlist_t *)auth) != 0) {
1426 &erpt_kstat_data.fmri_set_failed.value.ui64);
1430 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1431 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1434 if (serial != NULL) {
1435 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1436 (char **)&serial, 1) != 0) {
1438 &erpt_kstat_data.fmri_set_failed.value.ui64);
1440 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1441 FM_FMRI_MEM_OFFSET, offset) != 0) {
1443 &erpt_kstat_data.fmri_set_failed.value.ui64);
1449 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1452 if (version != ZFS_SCHEME_VERSION0) {
1453 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1457 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1458 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1462 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1463 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1467 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1468 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1471 if (vdev_guid != 0) {
1472 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1474 &erpt_kstat_data.fmri_set_failed.value.ui64);
1480 fm_ena_increment(uint64_t ena)
1484 switch (ENA_FORMAT(ena)) {
1486 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1489 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1499 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1506 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1507 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1508 ENA_FMT1_CPUID_MASK) |
1509 ((timestamp << ENA_FMT1_TIME_SHFT) &
1510 ENA_FMT1_TIME_MASK));
1512 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1513 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1514 ENA_FMT1_CPUID_MASK) |
1515 ((gethrtime() << ENA_FMT1_TIME_SHFT) &
1516 ENA_FMT1_TIME_MASK));
1520 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1521 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1531 fm_ena_generate(uint64_t timestamp, uchar_t format)
1536 ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
1543 fm_ena_generation_get(uint64_t ena)
1547 switch (ENA_FORMAT(ena)) {
1549 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1552 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1563 fm_ena_format_get(uint64_t ena)
1566 return (ENA_FORMAT(ena));
1570 fm_ena_id_get(uint64_t ena)
1574 switch (ENA_FORMAT(ena)) {
1576 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1579 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1589 fm_ena_time_get(uint64_t ena)
1593 switch (ENA_FORMAT(ena)) {
1595 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1598 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1609 * Helper function to increment ereport dropped count. Used by the event
1610 * rate limiting code to give feedback to the user about how many events were
1611 * rate limited by including them in the 'dropped' count.
1614 fm_erpt_dropped_increment(void)
1616 atomic_inc_64(&ratelimit_dropped);
1625 if (zfs_zevent_len_max == 0)
1626 zfs_zevent_len_max = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
1628 /* Initialize zevent allocation and generation kstats */
1629 fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
1630 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
1631 KSTAT_FLAG_VIRTUAL);
1633 if (fm_ksp != NULL) {
1634 fm_ksp->ks_data = &erpt_kstat_data;
1635 kstat_install(fm_ksp);
1637 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
1640 mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
1641 list_create(&zevent_list, sizeof (zevent_t),
1642 offsetof(zevent_t, ev_node));
1643 cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
1655 zfs_zevent_drain_all(&count);
1657 mutex_enter(&zevent_lock);
1658 cv_broadcast(&zevent_cv);
1660 zevent_flags |= ZEVENT_SHUTDOWN;
1661 while (zevent_waiters > 0) {
1662 mutex_exit(&zevent_lock);
1664 mutex_enter(&zevent_lock);
1666 mutex_exit(&zevent_lock);
1668 cv_destroy(&zevent_cv);
1669 list_destroy(&zevent_list);
1670 mutex_destroy(&zevent_lock);
1672 if (fm_ksp != NULL) {
1673 kstat_delete(fm_ksp);
1677 #endif /* _KERNEL */
1679 ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, INT, ZMOD_RW,
1680 "Max event queue length");
1682 ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, cols, INT, ZMOD_RW,
1683 "Max event column width");
1685 ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, console, INT, ZMOD_RW,
1686 "Log events to the console");