4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
29 #pragma ident "%Z%%M% %I% %E% SMI"
32 * DTrace - Dynamic Tracing for Solaris
34 * This is the implementation of the Solaris Dynamic Tracing framework
35 * (DTrace). The user-visible interface to DTrace is described at length in
36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
37 * library, the in-kernel DTrace framework, and the DTrace providers are
38 * described in the block comments in the <sys/dtrace.h> header file. The
39 * internal architecture of DTrace is described in the block comments in the
40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
41 * implementation very much assume mastery of all of these sources; if one has
42 * an unanswered question about the implementation, one should consult them
45 * The functions here are ordered roughly as follows:
47 * - Probe context functions
48 * - Probe hashing functions
49 * - Non-probe context utility functions
50 * - Matching functions
51 * - Provider-to-Framework API functions
52 * - Probe management functions
53 * - DIF object functions
55 * - Predicate functions
58 * - Enabling functions
60 * - Anonymous enabling functions
61 * - Consumer state functions
64 * - Driver cookbook functions
66 * Each group of functions begins with a block comment labelled the "DTrace
67 * [Group] Functions", allowing one to find each block by searching forward
68 * on capital-f functions.
70 #include <sys/errno.h>
75 #include <sys/modctl.h>
77 #include <sys/systm.h>
80 #include <sys/sunddi.h>
82 #include <sys/cpuvar.h>
85 #include <sys/strsubr.h>
87 #include <sys/sysmacros.h>
88 #include <sys/dtrace_impl.h>
89 #include <sys/atomic.h>
90 #include <sys/cmn_err.h>
92 #include <sys/mutex_impl.h>
93 #include <sys/rwlock_impl.h>
95 #include <sys/ctf_api.h>
97 #include <sys/panic.h>
98 #include <sys/priv_impl.h>
100 #include <sys/policy.h>
102 #include <sys/cred_impl.h>
103 #include <sys/procfs_isa.h>
105 #include <sys/taskq.h>
107 #include <sys/mkdev.h>
110 #include <sys/zone.h>
111 #include <sys/socket.h>
112 #include <netinet/in.h>
114 /* FreeBSD includes: */
116 #include <sys/callout.h>
117 #include <sys/ctype.h>
118 #include <sys/limits.h>
120 #include <sys/kernel.h>
121 #include <sys/malloc.h>
122 #include <sys/sysctl.h>
123 #include <sys/lock.h>
124 #include <sys/mutex.h>
126 #include <sys/dtrace_bsd.h>
127 #include <netinet/in.h>
128 #include "dtrace_cddl.h"
129 #include "dtrace_debug.c"
133 * DTrace Tunable Variables
135 * The following variables may be tuned by adding a line to /etc/system that
136 * includes both the name of the DTrace module ("dtrace") and the name of the
137 * variable. For example:
139 * set dtrace:dtrace_destructive_disallow = 1
141 * In general, the only variables that one should be tuning this way are those
142 * that affect system-wide DTrace behavior, and for which the default behavior
143 * is undesirable. Most of these variables are tunable on a per-consumer
144 * basis using DTrace options, and need not be tuned on a system-wide basis.
145 * When tuning these variables, avoid pathological values; while some attempt
146 * is made to verify the integrity of these variables, they are not considered
147 * part of the supported interface to DTrace, and they are therefore not
148 * checked comprehensively. Further, these variables should not be tuned
149 * dynamically via "mdb -kw" or other means; they should only be tuned via
152 int dtrace_destructive_disallow = 0;
153 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
154 size_t dtrace_difo_maxsize = (256 * 1024);
155 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
156 size_t dtrace_global_maxsize = (16 * 1024);
157 size_t dtrace_actions_max = (16 * 1024);
158 size_t dtrace_retain_max = 1024;
159 dtrace_optval_t dtrace_helper_actions_max = 32;
160 dtrace_optval_t dtrace_helper_providers_max = 32;
161 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
162 size_t dtrace_strsize_default = 256;
163 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
164 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
165 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
166 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
167 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
168 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
169 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
170 dtrace_optval_t dtrace_nspec_default = 1;
171 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
172 dtrace_optval_t dtrace_stackframes_default = 20;
173 dtrace_optval_t dtrace_ustackframes_default = 20;
174 dtrace_optval_t dtrace_jstackframes_default = 50;
175 dtrace_optval_t dtrace_jstackstrsize_default = 512;
176 int dtrace_msgdsize_max = 128;
177 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
178 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
179 int dtrace_devdepth_max = 32;
180 int dtrace_err_verbose;
181 hrtime_t dtrace_deadman_interval = NANOSEC;
182 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
183 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
186 * DTrace External Variables
188 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
189 * available to DTrace consumers via the backtick (`) syntax. One of these,
190 * dtrace_zero, is made deliberately so: it is provided as a source of
191 * well-known, zero-filled memory. While this variable is not documented,
192 * it is used by some translators as an implementation detail.
194 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
197 * DTrace Internal Variables
200 static dev_info_t *dtrace_devi; /* device info */
203 static vmem_t *dtrace_arena; /* probe ID arena */
204 static vmem_t *dtrace_minor; /* minor number arena */
205 static taskq_t *dtrace_taskq; /* task queue */
207 static struct unrhdr *dtrace_arena; /* Probe ID number. */
209 static dtrace_probe_t **dtrace_probes; /* array of all probes */
210 static int dtrace_nprobes; /* number of probes */
211 static dtrace_provider_t *dtrace_provider; /* provider list */
212 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
213 static int dtrace_opens; /* number of opens */
214 static int dtrace_helpers; /* number of helpers */
216 static void *dtrace_softstate; /* softstate pointer */
218 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
219 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
220 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
221 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
222 static int dtrace_toxranges; /* number of toxic ranges */
223 static int dtrace_toxranges_max; /* size of toxic range array */
224 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
225 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
226 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
227 static kthread_t *dtrace_panicked; /* panicking thread */
228 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
229 static dtrace_genid_t dtrace_probegen; /* current probe generation */
230 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
231 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
232 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
234 static struct mtx dtrace_unr_mtx;
235 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
236 int dtrace_in_probe; /* non-zero if executing a probe */
237 #if defined(__i386__) || defined(__amd64__)
238 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
244 * DTrace is protected by three (relatively coarse-grained) locks:
246 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
247 * including enabling state, probes, ECBs, consumer state, helper state,
248 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
249 * probe context is lock-free -- synchronization is handled via the
250 * dtrace_sync() cross call mechanism.
252 * (2) dtrace_provider_lock is required when manipulating provider state, or
253 * when provider state must be held constant.
255 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
256 * when meta provider state must be held constant.
258 * The lock ordering between these three locks is dtrace_meta_lock before
259 * dtrace_provider_lock before dtrace_lock. (In particular, there are
260 * several places where dtrace_provider_lock is held by the framework as it
261 * calls into the providers -- which then call back into the framework,
262 * grabbing dtrace_lock.)
264 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
265 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
266 * role as a coarse-grained lock; it is acquired before both of these locks.
267 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
268 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
269 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
270 * acquired _between_ dtrace_provider_lock and dtrace_lock.
272 static kmutex_t dtrace_lock; /* probe state lock */
273 static kmutex_t dtrace_provider_lock; /* provider state lock */
274 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
277 /* XXX FreeBSD hacks. */
278 static kmutex_t mod_lock;
280 #define cr_suid cr_svuid
281 #define cr_sgid cr_svgid
282 #define ipaddr_t in_addr_t
283 #define mod_modname pathname
284 #define vuprintf vprintf
285 #define ttoproc(_a) ((_a)->td_proc)
286 #define crgetzoneid(_a) 0
289 #define CPU_ON_INTR(_a) 0
291 #define PRIV_EFFECTIVE (1 << 0)
292 #define PRIV_DTRACE_KERNEL (1 << 1)
293 #define PRIV_DTRACE_PROC (1 << 2)
294 #define PRIV_DTRACE_USER (1 << 3)
295 #define PRIV_PROC_OWNER (1 << 4)
296 #define PRIV_PROC_ZONE (1 << 5)
299 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
303 #define curcpu CPU->cpu_id
308 * DTrace Provider Variables
310 * These are the variables relating to DTrace as a provider (that is, the
311 * provider of the BEGIN, END, and ERROR probes).
313 static dtrace_pattr_t dtrace_provider_attr = {
314 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
315 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
316 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
317 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
318 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
325 static dtrace_pops_t dtrace_provider_ops = {
326 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
327 (void (*)(void *, modctl_t *))dtrace_nullop,
328 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
335 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
338 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
339 static dtrace_id_t dtrace_probeid_end; /* special END probe */
340 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
343 * DTrace Helper Tracing Variables
345 uint32_t dtrace_helptrace_next = 0;
346 uint32_t dtrace_helptrace_nlocals;
347 char *dtrace_helptrace_buffer;
348 int dtrace_helptrace_bufsize = 512 * 1024;
351 int dtrace_helptrace_enabled = 1;
353 int dtrace_helptrace_enabled = 0;
357 * DTrace Error Hashing
359 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
360 * table. This is very useful for checking coverage of tests that are
361 * expected to induce DIF or DOF processing errors, and may be useful for
362 * debugging problems in the DIF code generator or in DOF generation . The
363 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
366 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
367 static const char *dtrace_errlast;
368 static kthread_t *dtrace_errthread;
369 static kmutex_t dtrace_errlock;
373 * DTrace Macros and Constants
375 * These are various macros that are useful in various spots in the
376 * implementation, along with a few random constants that have no meaning
377 * outside of the implementation. There is no real structure to this cpp
378 * mishmash -- but is there ever?
380 #define DTRACE_HASHSTR(hash, probe) \
381 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
383 #define DTRACE_HASHNEXT(hash, probe) \
384 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
386 #define DTRACE_HASHPREV(hash, probe) \
387 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
389 #define DTRACE_HASHEQ(hash, lhs, rhs) \
390 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
391 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
393 #define DTRACE_AGGHASHSIZE_SLEW 17
395 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
398 * The key for a thread-local variable consists of the lower 61 bits of the
399 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
400 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
401 * equal to a variable identifier. This is necessary (but not sufficient) to
402 * assure that global associative arrays never collide with thread-local
403 * variables. To guarantee that they cannot collide, we must also define the
404 * order for keying dynamic variables. That order is:
406 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
408 * Because the variable-key and the tls-key are in orthogonal spaces, there is
409 * no way for a global variable key signature to match a thread-local key
413 #define DTRACE_TLS_THRKEY(where) { \
415 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
416 for (; actv; actv >>= 1) \
418 ASSERT(intr < (1 << 3)); \
419 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
420 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
423 #define DTRACE_TLS_THRKEY(where) { \
424 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
426 uint_t actv = _c->cpu_intr_actv; \
427 for (; actv; actv >>= 1) \
429 ASSERT(intr < (1 << 3)); \
430 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
431 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
435 #define DT_BSWAP_8(x) ((x) & 0xff)
436 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
437 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
438 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
440 #define DT_MASK_LO 0x00000000FFFFFFFFULL
442 #define DTRACE_STORE(type, tomax, offset, what) \
443 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
446 #define DTRACE_ALIGNCHECK(addr, size, flags) \
447 if (addr & (size - 1)) { \
448 *flags |= CPU_DTRACE_BADALIGN; \
449 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
453 #define DTRACE_ALIGNCHECK(addr, size, flags)
457 * Test whether a range of memory starting at testaddr of size testsz falls
458 * within the range of memory described by addr, sz. We take care to avoid
459 * problems with overflow and underflow of the unsigned quantities, and
460 * disallow all negative sizes. Ranges of size 0 are allowed.
462 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
463 ((testaddr) - (baseaddr) < (basesz) && \
464 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
465 (testaddr) + (testsz) >= (testaddr))
468 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
469 * alloc_sz on the righthand side of the comparison in order to avoid overflow
470 * or underflow in the comparison with it. This is simpler than the INRANGE
471 * check above, because we know that the dtms_scratch_ptr is valid in the
472 * range. Allocations of size zero are allowed.
474 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
475 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
476 (mstate)->dtms_scratch_ptr >= (alloc_sz))
478 #define DTRACE_LOADFUNC(bits) \
481 dtrace_load##bits(uintptr_t addr) \
483 size_t size = bits / NBBY; \
485 uint##bits##_t rval; \
487 volatile uint16_t *flags = (volatile uint16_t *) \
488 &cpu_core[curcpu].cpuc_dtrace_flags; \
490 DTRACE_ALIGNCHECK(addr, size, flags); \
492 for (i = 0; i < dtrace_toxranges; i++) { \
493 if (addr >= dtrace_toxrange[i].dtt_limit) \
496 if (addr + size <= dtrace_toxrange[i].dtt_base) \
500 * This address falls within a toxic region; return 0. \
502 *flags |= CPU_DTRACE_BADADDR; \
503 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
507 *flags |= CPU_DTRACE_NOFAULT; \
509 rval = *((volatile uint##bits##_t *)addr); \
510 *flags &= ~CPU_DTRACE_NOFAULT; \
512 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
516 #define dtrace_loadptr dtrace_load64
518 #define dtrace_loadptr dtrace_load32
521 #define DTRACE_DYNHASH_FREE 0
522 #define DTRACE_DYNHASH_SINK 1
523 #define DTRACE_DYNHASH_VALID 2
525 #define DTRACE_MATCH_NEXT 0
526 #define DTRACE_MATCH_DONE 1
527 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
528 #define DTRACE_STATE_ALIGN 64
530 #define DTRACE_FLAGS2FLT(flags) \
531 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
532 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
533 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
534 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
535 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
536 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
537 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
538 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
539 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
542 #define DTRACEACT_ISSTRING(act) \
543 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
544 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
546 /* Function prototype definitions: */
547 static size_t dtrace_strlen(const char *, size_t);
548 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
549 static void dtrace_enabling_provide(dtrace_provider_t *);
550 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
551 static void dtrace_enabling_matchall(void);
552 static dtrace_state_t *dtrace_anon_grab(void);
554 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
555 dtrace_state_t *, uint64_t, uint64_t);
556 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
558 static void dtrace_buffer_drop(dtrace_buffer_t *);
559 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
560 dtrace_state_t *, dtrace_mstate_t *);
561 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
563 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
565 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
567 uint16_t dtrace_load16(uintptr_t);
568 uint32_t dtrace_load32(uintptr_t);
569 uint64_t dtrace_load64(uintptr_t);
570 uint8_t dtrace_load8(uintptr_t);
571 void dtrace_dynvar_clean(dtrace_dstate_t *);
572 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
573 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
574 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
577 * DTrace Probe Context Functions
579 * These functions are called from probe context. Because probe context is
580 * any context in which C may be called, arbitrarily locks may be held,
581 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
582 * As a result, functions called from probe context may only call other DTrace
583 * support functions -- they may not interact at all with the system at large.
584 * (Note that the ASSERT macro is made probe-context safe by redefining it in
585 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
586 * loads are to be performed from probe context, they _must_ be in terms of
587 * the safe dtrace_load*() variants.
589 * Some functions in this block are not actually called from probe context;
590 * for these functions, there will be a comment above the function reading
591 * "Note: not called from probe context."
594 dtrace_panic(const char *format, ...)
598 va_start(alist, format);
599 dtrace_vpanic(format, alist);
604 dtrace_assfail(const char *a, const char *f, int l)
606 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
609 * We just need something here that even the most clever compiler
610 * cannot optimize away.
612 return (a[(uintptr_t)f]);
616 * Atomically increment a specified error counter from probe context.
619 dtrace_error(uint32_t *counter)
622 * Most counters stored to in probe context are per-CPU counters.
623 * However, there are some error conditions that are sufficiently
624 * arcane that they don't merit per-CPU storage. If these counters
625 * are incremented concurrently on different CPUs, scalability will be
626 * adversely affected -- but we don't expect them to be white-hot in a
627 * correctly constructed enabling...
634 if ((nval = oval + 1) == 0) {
636 * If the counter would wrap, set it to 1 -- assuring
637 * that the counter is never zero when we have seen
638 * errors. (The counter must be 32-bits because we
639 * aren't guaranteed a 64-bit compare&swap operation.)
640 * To save this code both the infamy of being fingered
641 * by a priggish news story and the indignity of being
642 * the target of a neo-puritan witch trial, we're
643 * carefully avoiding any colorful description of the
644 * likelihood of this condition -- but suffice it to
645 * say that it is only slightly more likely than the
646 * overflow of predicate cache IDs, as discussed in
647 * dtrace_predicate_create().
651 } while (dtrace_cas32(counter, oval, nval) != oval);
655 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
656 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
664 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
666 if (dest < mstate->dtms_scratch_base)
669 if (dest + size < dest)
672 if (dest + size > mstate->dtms_scratch_ptr)
679 dtrace_canstore_statvar(uint64_t addr, size_t sz,
680 dtrace_statvar_t **svars, int nsvars)
684 for (i = 0; i < nsvars; i++) {
685 dtrace_statvar_t *svar = svars[i];
687 if (svar == NULL || svar->dtsv_size == 0)
690 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
698 * Check to see if the address is within a memory region to which a store may
699 * be issued. This includes the DTrace scratch areas, and any DTrace variable
700 * region. The caller of dtrace_canstore() is responsible for performing any
701 * alignment checks that are needed before stores are actually executed.
704 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
705 dtrace_vstate_t *vstate)
708 * First, check to see if the address is in scratch space...
710 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
711 mstate->dtms_scratch_size))
715 * Now check to see if it's a dynamic variable. This check will pick
716 * up both thread-local variables and any global dynamically-allocated
719 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
720 vstate->dtvs_dynvars.dtds_size)) {
721 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
722 uintptr_t base = (uintptr_t)dstate->dtds_base +
723 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
727 * Before we assume that we can store here, we need to make
728 * sure that it isn't in our metadata -- storing to our
729 * dynamic variable metadata would corrupt our state. For
730 * the range to not include any dynamic variable metadata,
733 * (1) Start above the hash table that is at the base of
734 * the dynamic variable space
736 * (2) Have a starting chunk offset that is beyond the
737 * dtrace_dynvar_t that is at the base of every chunk
739 * (3) Not span a chunk boundary
745 chunkoffs = (addr - base) % dstate->dtds_chunksize;
747 if (chunkoffs < sizeof (dtrace_dynvar_t))
750 if (chunkoffs + sz > dstate->dtds_chunksize)
757 * Finally, check the static local and global variables. These checks
758 * take the longest, so we perform them last.
760 if (dtrace_canstore_statvar(addr, sz,
761 vstate->dtvs_locals, vstate->dtvs_nlocals))
764 if (dtrace_canstore_statvar(addr, sz,
765 vstate->dtvs_globals, vstate->dtvs_nglobals))
773 * Convenience routine to check to see if the address is within a memory
774 * region in which a load may be issued given the user's privilege level;
775 * if not, it sets the appropriate error flags and loads 'addr' into the
776 * illegal value slot.
778 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
779 * appropriate memory access protection.
782 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
783 dtrace_vstate_t *vstate)
785 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
788 * If we hold the privilege to read from kernel memory, then
789 * everything is readable.
791 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
795 * You can obviously read that which you can store.
797 if (dtrace_canstore(addr, sz, mstate, vstate))
801 * We're allowed to read from our own string table.
803 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
804 mstate->dtms_difo->dtdo_strlen))
807 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
813 * Convenience routine to check to see if a given string is within a memory
814 * region in which a load may be issued given the user's privilege level;
815 * this exists so that we don't need to issue unnecessary dtrace_strlen()
816 * calls in the event that the user has all privileges.
819 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
820 dtrace_vstate_t *vstate)
825 * If we hold the privilege to read from kernel memory, then
826 * everything is readable.
828 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
831 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
832 if (dtrace_canload(addr, strsz, mstate, vstate))
839 * Convenience routine to check to see if a given variable is within a memory
840 * region in which a load may be issued given the user's privilege level.
843 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
844 dtrace_vstate_t *vstate)
847 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
850 * If we hold the privilege to read from kernel memory, then
851 * everything is readable.
853 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
856 if (type->dtdt_kind == DIF_TYPE_STRING)
857 sz = dtrace_strlen(src,
858 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
860 sz = type->dtdt_size;
862 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
866 * Compare two strings using safe loads.
869 dtrace_strncmp(char *s1, char *s2, size_t limit)
872 volatile uint16_t *flags;
874 if (s1 == s2 || limit == 0)
877 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
883 c1 = dtrace_load8((uintptr_t)s1++);
889 c2 = dtrace_load8((uintptr_t)s2++);
894 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
900 * Compute strlen(s) for a string using safe memory accesses. The additional
901 * len parameter is used to specify a maximum length to ensure completion.
904 dtrace_strlen(const char *s, size_t lim)
908 for (len = 0; len != lim; len++) {
909 if (dtrace_load8((uintptr_t)s++) == '\0')
917 * Check if an address falls within a toxic region.
920 dtrace_istoxic(uintptr_t kaddr, size_t size)
922 uintptr_t taddr, tsize;
925 for (i = 0; i < dtrace_toxranges; i++) {
926 taddr = dtrace_toxrange[i].dtt_base;
927 tsize = dtrace_toxrange[i].dtt_limit - taddr;
929 if (kaddr - taddr < tsize) {
930 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
931 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
935 if (taddr - kaddr < size) {
936 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
937 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
946 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
947 * memory specified by the DIF program. The dst is assumed to be safe memory
948 * that we can store to directly because it is managed by DTrace. As with
949 * standard bcopy, overlapping copies are handled properly.
952 dtrace_bcopy(const void *src, void *dst, size_t len)
956 const uint8_t *s2 = src;
960 *s1++ = dtrace_load8((uintptr_t)s2++);
961 } while (--len != 0);
967 *--s1 = dtrace_load8((uintptr_t)--s2);
968 } while (--len != 0);
974 * Copy src to dst using safe memory accesses, up to either the specified
975 * length, or the point that a nul byte is encountered. The src is assumed to
976 * be unsafe memory specified by the DIF program. The dst is assumed to be
977 * safe memory that we can store to directly because it is managed by DTrace.
978 * Unlike dtrace_bcopy(), overlapping regions are not handled.
981 dtrace_strcpy(const void *src, void *dst, size_t len)
984 uint8_t *s1 = dst, c;
985 const uint8_t *s2 = src;
988 *s1++ = c = dtrace_load8((uintptr_t)s2++);
989 } while (--len != 0 && c != '\0');
994 * Copy src to dst, deriving the size and type from the specified (BYREF)
995 * variable type. The src is assumed to be unsafe memory specified by the DIF
996 * program. The dst is assumed to be DTrace variable memory that is of the
997 * specified type; we assume that we can store to directly.
1000 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1002 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1004 if (type->dtdt_kind == DIF_TYPE_STRING) {
1005 dtrace_strcpy(src, dst, type->dtdt_size);
1007 dtrace_bcopy(src, dst, type->dtdt_size);
1012 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1013 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1014 * safe memory that we can access directly because it is managed by DTrace.
1017 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1019 volatile uint16_t *flags;
1021 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1026 if (s1 == NULL || s2 == NULL)
1029 if (s1 != s2 && len != 0) {
1030 const uint8_t *ps1 = s1;
1031 const uint8_t *ps2 = s2;
1034 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1036 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1042 * Zero the specified region using a simple byte-by-byte loop. Note that this
1043 * is for safe DTrace-managed memory only.
1046 dtrace_bzero(void *dst, size_t len)
1050 for (cp = dst; len != 0; len--)
1055 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1059 result[0] = addend1[0] + addend2[0];
1060 result[1] = addend1[1] + addend2[1] +
1061 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1068 * Shift the 128-bit value in a by b. If b is positive, shift left.
1069 * If b is negative, shift right.
1072 dtrace_shift_128(uint64_t *a, int b)
1082 a[0] = a[1] >> (b - 64);
1086 mask = 1LL << (64 - b);
1088 a[0] |= ((a[1] & mask) << (64 - b));
1093 a[1] = a[0] << (b - 64);
1097 mask = a[0] >> (64 - b);
1105 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1106 * use native multiplication on those, and then re-combine into the
1107 * resulting 128-bit value.
1109 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1116 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1118 uint64_t hi1, hi2, lo1, lo2;
1121 hi1 = factor1 >> 32;
1122 hi2 = factor2 >> 32;
1124 lo1 = factor1 & DT_MASK_LO;
1125 lo2 = factor2 & DT_MASK_LO;
1127 product[0] = lo1 * lo2;
1128 product[1] = hi1 * hi2;
1132 dtrace_shift_128(tmp, 32);
1133 dtrace_add_128(product, tmp, product);
1137 dtrace_shift_128(tmp, 32);
1138 dtrace_add_128(product, tmp, product);
1142 * This privilege check should be used by actions and subroutines to
1143 * verify that the user credentials of the process that enabled the
1144 * invoking ECB match the target credentials
1147 dtrace_priv_proc_common_user(dtrace_state_t *state)
1149 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1152 * We should always have a non-NULL state cred here, since if cred
1153 * is null (anonymous tracing), we fast-path bypass this routine.
1155 ASSERT(s_cr != NULL);
1157 if ((cr = CRED()) != NULL &&
1158 s_cr->cr_uid == cr->cr_uid &&
1159 s_cr->cr_uid == cr->cr_ruid &&
1160 s_cr->cr_uid == cr->cr_suid &&
1161 s_cr->cr_gid == cr->cr_gid &&
1162 s_cr->cr_gid == cr->cr_rgid &&
1163 s_cr->cr_gid == cr->cr_sgid)
1170 * This privilege check should be used by actions and subroutines to
1171 * verify that the zone of the process that enabled the invoking ECB
1172 * matches the target credentials
1175 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1178 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1181 * We should always have a non-NULL state cred here, since if cred
1182 * is null (anonymous tracing), we fast-path bypass this routine.
1184 ASSERT(s_cr != NULL);
1186 if ((cr = CRED()) != NULL &&
1187 s_cr->cr_zone == cr->cr_zone)
1197 * This privilege check should be used by actions and subroutines to
1198 * verify that the process has not setuid or changed credentials.
1201 dtrace_priv_proc_common_nocd(void)
1205 if ((proc = ttoproc(curthread)) != NULL &&
1206 !(proc->p_flag & SNOCD))
1213 dtrace_priv_proc_destructive(dtrace_state_t *state)
1215 int action = state->dts_cred.dcr_action;
1217 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1218 dtrace_priv_proc_common_zone(state) == 0)
1221 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1222 dtrace_priv_proc_common_user(state) == 0)
1225 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1226 dtrace_priv_proc_common_nocd() == 0)
1232 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1238 dtrace_priv_proc_control(dtrace_state_t *state)
1240 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1243 if (dtrace_priv_proc_common_zone(state) &&
1244 dtrace_priv_proc_common_user(state) &&
1245 dtrace_priv_proc_common_nocd())
1248 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1254 dtrace_priv_proc(dtrace_state_t *state)
1256 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1259 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1265 dtrace_priv_kernel(dtrace_state_t *state)
1267 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1270 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1276 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1278 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1281 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1287 * Note: not called from probe context. This function is called
1288 * asynchronously (and at a regular interval) from outside of probe context to
1289 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1290 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1293 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1295 dtrace_dynvar_t *dirty;
1296 dtrace_dstate_percpu_t *dcpu;
1299 for (i = 0; i < NCPU; i++) {
1300 dcpu = &dstate->dtds_percpu[i];
1302 ASSERT(dcpu->dtdsc_rinsing == NULL);
1305 * If the dirty list is NULL, there is no dirty work to do.
1307 if (dcpu->dtdsc_dirty == NULL)
1311 * If the clean list is non-NULL, then we're not going to do
1312 * any work for this CPU -- it means that there has not been
1313 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1314 * since the last time we cleaned house.
1316 if (dcpu->dtdsc_clean != NULL)
1322 * Atomically move the dirty list aside.
1325 dirty = dcpu->dtdsc_dirty;
1328 * Before we zap the dirty list, set the rinsing list.
1329 * (This allows for a potential assertion in
1330 * dtrace_dynvar(): if a free dynamic variable appears
1331 * on a hash chain, either the dirty list or the
1332 * rinsing list for some CPU must be non-NULL.)
1334 dcpu->dtdsc_rinsing = dirty;
1335 dtrace_membar_producer();
1336 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1337 dirty, NULL) != dirty);
1342 * We have no work to do; we can simply return.
1349 for (i = 0; i < NCPU; i++) {
1350 dcpu = &dstate->dtds_percpu[i];
1352 if (dcpu->dtdsc_rinsing == NULL)
1356 * We are now guaranteed that no hash chain contains a pointer
1357 * into this dirty list; we can make it clean.
1359 ASSERT(dcpu->dtdsc_clean == NULL);
1360 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1361 dcpu->dtdsc_rinsing = NULL;
1365 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1366 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1367 * This prevents a race whereby a CPU incorrectly decides that
1368 * the state should be something other than DTRACE_DSTATE_CLEAN
1369 * after dtrace_dynvar_clean() has completed.
1373 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1377 * Depending on the value of the op parameter, this function looks-up,
1378 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1379 * allocation is requested, this function will return a pointer to a
1380 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1381 * variable can be allocated. If NULL is returned, the appropriate counter
1382 * will be incremented.
1385 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1386 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1387 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1389 uint64_t hashval = DTRACE_DYNHASH_VALID;
1390 dtrace_dynhash_t *hash = dstate->dtds_hash;
1391 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1392 processorid_t me = curcpu, cpu = me;
1393 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1394 size_t bucket, ksize;
1395 size_t chunksize = dstate->dtds_chunksize;
1396 uintptr_t kdata, lock, nstate;
1402 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1403 * algorithm. For the by-value portions, we perform the algorithm in
1404 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1405 * bit, and seems to have only a minute effect on distribution. For
1406 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1407 * over each referenced byte. It's painful to do this, but it's much
1408 * better than pathological hash distribution. The efficacy of the
1409 * hashing algorithm (and a comparison with other algorithms) may be
1410 * found by running the ::dtrace_dynstat MDB dcmd.
1412 for (i = 0; i < nkeys; i++) {
1413 if (key[i].dttk_size == 0) {
1414 uint64_t val = key[i].dttk_value;
1416 hashval += (val >> 48) & 0xffff;
1417 hashval += (hashval << 10);
1418 hashval ^= (hashval >> 6);
1420 hashval += (val >> 32) & 0xffff;
1421 hashval += (hashval << 10);
1422 hashval ^= (hashval >> 6);
1424 hashval += (val >> 16) & 0xffff;
1425 hashval += (hashval << 10);
1426 hashval ^= (hashval >> 6);
1428 hashval += val & 0xffff;
1429 hashval += (hashval << 10);
1430 hashval ^= (hashval >> 6);
1433 * This is incredibly painful, but it beats the hell
1434 * out of the alternative.
1436 uint64_t j, size = key[i].dttk_size;
1437 uintptr_t base = (uintptr_t)key[i].dttk_value;
1439 if (!dtrace_canload(base, size, mstate, vstate))
1442 for (j = 0; j < size; j++) {
1443 hashval += dtrace_load8(base + j);
1444 hashval += (hashval << 10);
1445 hashval ^= (hashval >> 6);
1450 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1453 hashval += (hashval << 3);
1454 hashval ^= (hashval >> 11);
1455 hashval += (hashval << 15);
1458 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1459 * comes out to be one of our two sentinel hash values. If this
1460 * actually happens, we set the hashval to be a value known to be a
1461 * non-sentinel value.
1463 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1464 hashval = DTRACE_DYNHASH_VALID;
1467 * Yes, it's painful to do a divide here. If the cycle count becomes
1468 * important here, tricks can be pulled to reduce it. (However, it's
1469 * critical that hash collisions be kept to an absolute minimum;
1470 * they're much more painful than a divide.) It's better to have a
1471 * solution that generates few collisions and still keeps things
1472 * relatively simple.
1474 bucket = hashval % dstate->dtds_hashsize;
1476 if (op == DTRACE_DYNVAR_DEALLOC) {
1477 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1480 while ((lock = *lockp) & 1)
1483 if (dtrace_casptr((volatile void *)lockp,
1484 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1488 dtrace_membar_producer();
1493 lock = hash[bucket].dtdh_lock;
1495 dtrace_membar_consumer();
1497 start = hash[bucket].dtdh_chain;
1498 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1499 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1500 op != DTRACE_DYNVAR_DEALLOC));
1502 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1503 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1504 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1506 if (dvar->dtdv_hashval != hashval) {
1507 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1509 * We've reached the sink, and therefore the
1510 * end of the hash chain; we can kick out of
1511 * the loop knowing that we have seen a valid
1512 * snapshot of state.
1514 ASSERT(dvar->dtdv_next == NULL);
1515 ASSERT(dvar == &dtrace_dynhash_sink);
1519 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1521 * We've gone off the rails: somewhere along
1522 * the line, one of the members of this hash
1523 * chain was deleted. Note that we could also
1524 * detect this by simply letting this loop run
1525 * to completion, as we would eventually hit
1526 * the end of the dirty list. However, we
1527 * want to avoid running the length of the
1528 * dirty list unnecessarily (it might be quite
1529 * long), so we catch this as early as
1530 * possible by detecting the hash marker. In
1531 * this case, we simply set dvar to NULL and
1532 * break; the conditional after the loop will
1533 * send us back to top.
1542 if (dtuple->dtt_nkeys != nkeys)
1545 for (i = 0; i < nkeys; i++, dkey++) {
1546 if (dkey->dttk_size != key[i].dttk_size)
1547 goto next; /* size or type mismatch */
1549 if (dkey->dttk_size != 0) {
1551 (void *)(uintptr_t)key[i].dttk_value,
1552 (void *)(uintptr_t)dkey->dttk_value,
1556 if (dkey->dttk_value != key[i].dttk_value)
1561 if (op != DTRACE_DYNVAR_DEALLOC)
1564 ASSERT(dvar->dtdv_next == NULL ||
1565 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1568 ASSERT(hash[bucket].dtdh_chain != dvar);
1569 ASSERT(start != dvar);
1570 ASSERT(prev->dtdv_next == dvar);
1571 prev->dtdv_next = dvar->dtdv_next;
1573 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1574 start, dvar->dtdv_next) != start) {
1576 * We have failed to atomically swing the
1577 * hash table head pointer, presumably because
1578 * of a conflicting allocation on another CPU.
1579 * We need to reread the hash chain and try
1586 dtrace_membar_producer();
1589 * Now set the hash value to indicate that it's free.
1591 ASSERT(hash[bucket].dtdh_chain != dvar);
1592 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1594 dtrace_membar_producer();
1597 * Set the next pointer to point at the dirty list, and
1598 * atomically swing the dirty pointer to the newly freed dvar.
1601 next = dcpu->dtdsc_dirty;
1602 dvar->dtdv_next = next;
1603 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1606 * Finally, unlock this hash bucket.
1608 ASSERT(hash[bucket].dtdh_lock == lock);
1610 hash[bucket].dtdh_lock++;
1620 * If dvar is NULL, it is because we went off the rails:
1621 * one of the elements that we traversed in the hash chain
1622 * was deleted while we were traversing it. In this case,
1623 * we assert that we aren't doing a dealloc (deallocs lock
1624 * the hash bucket to prevent themselves from racing with
1625 * one another), and retry the hash chain traversal.
1627 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1631 if (op != DTRACE_DYNVAR_ALLOC) {
1633 * If we are not to allocate a new variable, we want to
1634 * return NULL now. Before we return, check that the value
1635 * of the lock word hasn't changed. If it has, we may have
1636 * seen an inconsistent snapshot.
1638 if (op == DTRACE_DYNVAR_NOALLOC) {
1639 if (hash[bucket].dtdh_lock != lock)
1642 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1643 ASSERT(hash[bucket].dtdh_lock == lock);
1645 hash[bucket].dtdh_lock++;
1652 * We need to allocate a new dynamic variable. The size we need is the
1653 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1654 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1655 * the size of any referred-to data (dsize). We then round the final
1656 * size up to the chunksize for allocation.
1658 for (ksize = 0, i = 0; i < nkeys; i++)
1659 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1662 * This should be pretty much impossible, but could happen if, say,
1663 * strange DIF specified the tuple. Ideally, this should be an
1664 * assertion and not an error condition -- but that requires that the
1665 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1666 * bullet-proof. (That is, it must not be able to be fooled by
1667 * malicious DIF.) Given the lack of backwards branches in DIF,
1668 * solving this would presumably not amount to solving the Halting
1669 * Problem -- but it still seems awfully hard.
1671 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1672 ksize + dsize > chunksize) {
1673 dcpu->dtdsc_drops++;
1677 nstate = DTRACE_DSTATE_EMPTY;
1681 free = dcpu->dtdsc_free;
1684 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1687 if (clean == NULL) {
1689 * We're out of dynamic variable space on
1690 * this CPU. Unless we have tried all CPUs,
1691 * we'll try to allocate from a different
1694 switch (dstate->dtds_state) {
1695 case DTRACE_DSTATE_CLEAN: {
1696 void *sp = &dstate->dtds_state;
1701 if (dcpu->dtdsc_dirty != NULL &&
1702 nstate == DTRACE_DSTATE_EMPTY)
1703 nstate = DTRACE_DSTATE_DIRTY;
1705 if (dcpu->dtdsc_rinsing != NULL)
1706 nstate = DTRACE_DSTATE_RINSING;
1708 dcpu = &dstate->dtds_percpu[cpu];
1713 (void) dtrace_cas32(sp,
1714 DTRACE_DSTATE_CLEAN, nstate);
1717 * To increment the correct bean
1718 * counter, take another lap.
1723 case DTRACE_DSTATE_DIRTY:
1724 dcpu->dtdsc_dirty_drops++;
1727 case DTRACE_DSTATE_RINSING:
1728 dcpu->dtdsc_rinsing_drops++;
1731 case DTRACE_DSTATE_EMPTY:
1732 dcpu->dtdsc_drops++;
1736 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1741 * The clean list appears to be non-empty. We want to
1742 * move the clean list to the free list; we start by
1743 * moving the clean pointer aside.
1745 if (dtrace_casptr(&dcpu->dtdsc_clean,
1746 clean, NULL) != clean) {
1748 * We are in one of two situations:
1750 * (a) The clean list was switched to the
1751 * free list by another CPU.
1753 * (b) The clean list was added to by the
1756 * In either of these situations, we can
1757 * just reattempt the free list allocation.
1762 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1765 * Now we'll move the clean list to the free list.
1766 * It's impossible for this to fail: the only way
1767 * the free list can be updated is through this
1768 * code path, and only one CPU can own the clean list.
1769 * Thus, it would only be possible for this to fail if
1770 * this code were racing with dtrace_dynvar_clean().
1771 * (That is, if dtrace_dynvar_clean() updated the clean
1772 * list, and we ended up racing to update the free
1773 * list.) This race is prevented by the dtrace_sync()
1774 * in dtrace_dynvar_clean() -- which flushes the
1775 * owners of the clean lists out before resetting
1778 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1779 ASSERT(rval == NULL);
1784 new_free = dvar->dtdv_next;
1785 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1788 * We have now allocated a new chunk. We copy the tuple keys into the
1789 * tuple array and copy any referenced key data into the data space
1790 * following the tuple array. As we do this, we relocate dttk_value
1791 * in the final tuple to point to the key data address in the chunk.
1793 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1794 dvar->dtdv_data = (void *)(kdata + ksize);
1795 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1797 for (i = 0; i < nkeys; i++) {
1798 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1799 size_t kesize = key[i].dttk_size;
1803 (const void *)(uintptr_t)key[i].dttk_value,
1804 (void *)kdata, kesize);
1805 dkey->dttk_value = kdata;
1806 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1808 dkey->dttk_value = key[i].dttk_value;
1811 dkey->dttk_size = kesize;
1814 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1815 dvar->dtdv_hashval = hashval;
1816 dvar->dtdv_next = start;
1818 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1822 * The cas has failed. Either another CPU is adding an element to
1823 * this hash chain, or another CPU is deleting an element from this
1824 * hash chain. The simplest way to deal with both of these cases
1825 * (though not necessarily the most efficient) is to free our
1826 * allocated block and tail-call ourselves. Note that the free is
1827 * to the dirty list and _not_ to the free list. This is to prevent
1828 * races with allocators, above.
1830 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1832 dtrace_membar_producer();
1835 free = dcpu->dtdsc_dirty;
1836 dvar->dtdv_next = free;
1837 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1839 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1844 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1846 if ((int64_t)nval < (int64_t)*oval)
1852 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1854 if ((int64_t)nval > (int64_t)*oval)
1859 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1861 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1862 int64_t val = (int64_t)nval;
1865 for (i = 0; i < zero; i++) {
1866 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1872 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1873 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1874 quanta[i - 1] += incr;
1879 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1887 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1889 uint64_t arg = *lquanta++;
1890 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1891 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1892 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1893 int32_t val = (int32_t)nval, level;
1896 ASSERT(levels != 0);
1900 * This is an underflow.
1906 level = (val - base) / step;
1908 if (level < levels) {
1909 lquanta[level + 1] += incr;
1914 * This is an overflow.
1916 lquanta[levels + 1] += incr;
1921 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1929 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1931 int64_t snval = (int64_t)nval;
1938 * What we want to say here is:
1940 * data[2] += nval * nval;
1942 * But given that nval is 64-bit, we could easily overflow, so
1943 * we do this as 128-bit arithmetic.
1948 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1949 dtrace_add_128(data + 2, tmp, data + 2);
1954 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1961 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1967 * Aggregate given the tuple in the principal data buffer, and the aggregating
1968 * action denoted by the specified dtrace_aggregation_t. The aggregation
1969 * buffer is specified as the buf parameter. This routine does not return
1970 * failure; if there is no space in the aggregation buffer, the data will be
1971 * dropped, and a corresponding counter incremented.
1974 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1975 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1977 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1978 uint32_t i, ndx, size, fsize;
1979 uint32_t align = sizeof (uint64_t) - 1;
1980 dtrace_aggbuffer_t *agb;
1981 dtrace_aggkey_t *key;
1982 uint32_t hashval = 0, limit, isstr;
1983 caddr_t tomax, data, kdata;
1984 dtrace_actkind_t action;
1985 dtrace_action_t *act;
1991 if (!agg->dtag_hasarg) {
1993 * Currently, only quantize() and lquantize() take additional
1994 * arguments, and they have the same semantics: an increment
1995 * value that defaults to 1 when not present. If additional
1996 * aggregating actions take arguments, the setting of the
1997 * default argument value will presumably have to become more
2003 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2004 size = rec->dtrd_offset - agg->dtag_base;
2005 fsize = size + rec->dtrd_size;
2007 ASSERT(dbuf->dtb_tomax != NULL);
2008 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2010 if ((tomax = buf->dtb_tomax) == NULL) {
2011 dtrace_buffer_drop(buf);
2016 * The metastructure is always at the bottom of the buffer.
2018 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2019 sizeof (dtrace_aggbuffer_t));
2021 if (buf->dtb_offset == 0) {
2023 * We just kludge up approximately 1/8th of the size to be
2024 * buckets. If this guess ends up being routinely
2025 * off-the-mark, we may need to dynamically readjust this
2026 * based on past performance.
2028 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2030 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2031 (uintptr_t)tomax || hashsize == 0) {
2033 * We've been given a ludicrously small buffer;
2034 * increment our drop count and leave.
2036 dtrace_buffer_drop(buf);
2041 * And now, a pathetic attempt to try to get a an odd (or
2042 * perchance, a prime) hash size for better hash distribution.
2044 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2045 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2047 agb->dtagb_hashsize = hashsize;
2048 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2049 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2050 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2052 for (i = 0; i < agb->dtagb_hashsize; i++)
2053 agb->dtagb_hash[i] = NULL;
2056 ASSERT(agg->dtag_first != NULL);
2057 ASSERT(agg->dtag_first->dta_intuple);
2060 * Calculate the hash value based on the key. Note that we _don't_
2061 * include the aggid in the hashing (but we will store it as part of
2062 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2063 * algorithm: a simple, quick algorithm that has no known funnels, and
2064 * gets good distribution in practice. The efficacy of the hashing
2065 * algorithm (and a comparison with other algorithms) may be found by
2066 * running the ::dtrace_aggstat MDB dcmd.
2068 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2069 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2070 limit = i + act->dta_rec.dtrd_size;
2071 ASSERT(limit <= size);
2072 isstr = DTRACEACT_ISSTRING(act);
2074 for (; i < limit; i++) {
2076 hashval += (hashval << 10);
2077 hashval ^= (hashval >> 6);
2079 if (isstr && data[i] == '\0')
2084 hashval += (hashval << 3);
2085 hashval ^= (hashval >> 11);
2086 hashval += (hashval << 15);
2089 * Yes, the divide here is expensive -- but it's generally the least
2090 * of the performance issues given the amount of data that we iterate
2091 * over to compute hash values, compare data, etc.
2093 ndx = hashval % agb->dtagb_hashsize;
2095 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2096 ASSERT((caddr_t)key >= tomax);
2097 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2099 if (hashval != key->dtak_hashval || key->dtak_size != size)
2102 kdata = key->dtak_data;
2103 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2105 for (act = agg->dtag_first; act->dta_intuple;
2106 act = act->dta_next) {
2107 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2108 limit = i + act->dta_rec.dtrd_size;
2109 ASSERT(limit <= size);
2110 isstr = DTRACEACT_ISSTRING(act);
2112 for (; i < limit; i++) {
2113 if (kdata[i] != data[i])
2116 if (isstr && data[i] == '\0')
2121 if (action != key->dtak_action) {
2123 * We are aggregating on the same value in the same
2124 * aggregation with two different aggregating actions.
2125 * (This should have been picked up in the compiler,
2126 * so we may be dealing with errant or devious DIF.)
2127 * This is an error condition; we indicate as much,
2130 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2135 * This is a hit: we need to apply the aggregator to
2136 * the value at this key.
2138 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2145 * We didn't find it. We need to allocate some zero-filled space,
2146 * link it into the hash table appropriately, and apply the aggregator
2147 * to the (zero-filled) value.
2149 offs = buf->dtb_offset;
2150 while (offs & (align - 1))
2151 offs += sizeof (uint32_t);
2154 * If we don't have enough room to both allocate a new key _and_
2155 * its associated data, increment the drop count and return.
2157 if ((uintptr_t)tomax + offs + fsize >
2158 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2159 dtrace_buffer_drop(buf);
2164 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2165 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2166 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2168 key->dtak_data = kdata = tomax + offs;
2169 buf->dtb_offset = offs + fsize;
2172 * Now copy the data across.
2174 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2176 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2180 * Because strings are not zeroed out by default, we need to iterate
2181 * looking for actions that store strings, and we need to explicitly
2182 * pad these strings out with zeroes.
2184 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2187 if (!DTRACEACT_ISSTRING(act))
2190 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2191 limit = i + act->dta_rec.dtrd_size;
2192 ASSERT(limit <= size);
2194 for (nul = 0; i < limit; i++) {
2200 if (data[i] != '\0')
2207 for (i = size; i < fsize; i++)
2210 key->dtak_hashval = hashval;
2211 key->dtak_size = size;
2212 key->dtak_action = action;
2213 key->dtak_next = agb->dtagb_hash[ndx];
2214 agb->dtagb_hash[ndx] = key;
2217 * Finally, apply the aggregator.
2219 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2220 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2224 * Given consumer state, this routine finds a speculation in the INACTIVE
2225 * state and transitions it into the ACTIVE state. If there is no speculation
2226 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2227 * incremented -- it is up to the caller to take appropriate action.
2230 dtrace_speculation(dtrace_state_t *state)
2233 dtrace_speculation_state_t current;
2234 uint32_t *stat = &state->dts_speculations_unavail, count;
2236 while (i < state->dts_nspeculations) {
2237 dtrace_speculation_t *spec = &state->dts_speculations[i];
2239 current = spec->dtsp_state;
2241 if (current != DTRACESPEC_INACTIVE) {
2242 if (current == DTRACESPEC_COMMITTINGMANY ||
2243 current == DTRACESPEC_COMMITTING ||
2244 current == DTRACESPEC_DISCARDING)
2245 stat = &state->dts_speculations_busy;
2250 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2251 current, DTRACESPEC_ACTIVE) == current)
2256 * We couldn't find a speculation. If we found as much as a single
2257 * busy speculation buffer, we'll attribute this failure as "busy"
2258 * instead of "unavail".
2262 } while (dtrace_cas32(stat, count, count + 1) != count);
2268 * This routine commits an active speculation. If the specified speculation
2269 * is not in a valid state to perform a commit(), this routine will silently do
2270 * nothing. The state of the specified speculation is transitioned according
2271 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2274 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2275 dtrace_specid_t which)
2277 dtrace_speculation_t *spec;
2278 dtrace_buffer_t *src, *dest;
2279 uintptr_t daddr, saddr, dlimit;
2280 dtrace_speculation_state_t current, new = 0;
2286 if (which > state->dts_nspeculations) {
2287 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2291 spec = &state->dts_speculations[which - 1];
2292 src = &spec->dtsp_buffer[cpu];
2293 dest = &state->dts_buffer[cpu];
2296 current = spec->dtsp_state;
2298 if (current == DTRACESPEC_COMMITTINGMANY)
2302 case DTRACESPEC_INACTIVE:
2303 case DTRACESPEC_DISCARDING:
2306 case DTRACESPEC_COMMITTING:
2308 * This is only possible if we are (a) commit()'ing
2309 * without having done a prior speculate() on this CPU
2310 * and (b) racing with another commit() on a different
2311 * CPU. There's nothing to do -- we just assert that
2314 ASSERT(src->dtb_offset == 0);
2317 case DTRACESPEC_ACTIVE:
2318 new = DTRACESPEC_COMMITTING;
2321 case DTRACESPEC_ACTIVEONE:
2323 * This speculation is active on one CPU. If our
2324 * buffer offset is non-zero, we know that the one CPU
2325 * must be us. Otherwise, we are committing on a
2326 * different CPU from the speculate(), and we must
2327 * rely on being asynchronously cleaned.
2329 if (src->dtb_offset != 0) {
2330 new = DTRACESPEC_COMMITTING;
2335 case DTRACESPEC_ACTIVEMANY:
2336 new = DTRACESPEC_COMMITTINGMANY;
2342 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2343 current, new) != current);
2346 * We have set the state to indicate that we are committing this
2347 * speculation. Now reserve the necessary space in the destination
2350 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2351 sizeof (uint64_t), state, NULL)) < 0) {
2352 dtrace_buffer_drop(dest);
2357 * We have the space; copy the buffer across. (Note that this is a
2358 * highly subobtimal bcopy(); in the unlikely event that this becomes
2359 * a serious performance issue, a high-performance DTrace-specific
2360 * bcopy() should obviously be invented.)
2362 daddr = (uintptr_t)dest->dtb_tomax + offs;
2363 dlimit = daddr + src->dtb_offset;
2364 saddr = (uintptr_t)src->dtb_tomax;
2367 * First, the aligned portion.
2369 while (dlimit - daddr >= sizeof (uint64_t)) {
2370 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2372 daddr += sizeof (uint64_t);
2373 saddr += sizeof (uint64_t);
2377 * Now any left-over bit...
2379 while (dlimit - daddr)
2380 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2383 * Finally, commit the reserved space in the destination buffer.
2385 dest->dtb_offset = offs + src->dtb_offset;
2389 * If we're lucky enough to be the only active CPU on this speculation
2390 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2392 if (current == DTRACESPEC_ACTIVE ||
2393 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2394 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2395 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2397 ASSERT(rval == DTRACESPEC_COMMITTING);
2400 src->dtb_offset = 0;
2401 src->dtb_xamot_drops += src->dtb_drops;
2406 * This routine discards an active speculation. If the specified speculation
2407 * is not in a valid state to perform a discard(), this routine will silently
2408 * do nothing. The state of the specified speculation is transitioned
2409 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2412 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2413 dtrace_specid_t which)
2415 dtrace_speculation_t *spec;
2416 dtrace_speculation_state_t current, new = 0;
2417 dtrace_buffer_t *buf;
2422 if (which > state->dts_nspeculations) {
2423 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2427 spec = &state->dts_speculations[which - 1];
2428 buf = &spec->dtsp_buffer[cpu];
2431 current = spec->dtsp_state;
2434 case DTRACESPEC_INACTIVE:
2435 case DTRACESPEC_COMMITTINGMANY:
2436 case DTRACESPEC_COMMITTING:
2437 case DTRACESPEC_DISCARDING:
2440 case DTRACESPEC_ACTIVE:
2441 case DTRACESPEC_ACTIVEMANY:
2442 new = DTRACESPEC_DISCARDING;
2445 case DTRACESPEC_ACTIVEONE:
2446 if (buf->dtb_offset != 0) {
2447 new = DTRACESPEC_INACTIVE;
2449 new = DTRACESPEC_DISCARDING;
2456 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2457 current, new) != current);
2459 buf->dtb_offset = 0;
2464 * Note: not called from probe context. This function is called
2465 * asynchronously from cross call context to clean any speculations that are
2466 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2467 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2471 dtrace_speculation_clean_here(dtrace_state_t *state)
2473 dtrace_icookie_t cookie;
2474 processorid_t cpu = curcpu;
2475 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2478 cookie = dtrace_interrupt_disable();
2480 if (dest->dtb_tomax == NULL) {
2481 dtrace_interrupt_enable(cookie);
2485 for (i = 0; i < state->dts_nspeculations; i++) {
2486 dtrace_speculation_t *spec = &state->dts_speculations[i];
2487 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2489 if (src->dtb_tomax == NULL)
2492 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2493 src->dtb_offset = 0;
2497 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2500 if (src->dtb_offset == 0)
2503 dtrace_speculation_commit(state, cpu, i + 1);
2506 dtrace_interrupt_enable(cookie);
2510 * Note: not called from probe context. This function is called
2511 * asynchronously (and at a regular interval) to clean any speculations that
2512 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2513 * is work to be done, it cross calls all CPUs to perform that work;
2514 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2515 * INACTIVE state until they have been cleaned by all CPUs.
2518 dtrace_speculation_clean(dtrace_state_t *state)
2523 for (i = 0; i < state->dts_nspeculations; i++) {
2524 dtrace_speculation_t *spec = &state->dts_speculations[i];
2526 ASSERT(!spec->dtsp_cleaning);
2528 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2529 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2533 spec->dtsp_cleaning = 1;
2539 dtrace_xcall(DTRACE_CPUALL,
2540 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2543 * We now know that all CPUs have committed or discarded their
2544 * speculation buffers, as appropriate. We can now set the state
2547 for (i = 0; i < state->dts_nspeculations; i++) {
2548 dtrace_speculation_t *spec = &state->dts_speculations[i];
2549 dtrace_speculation_state_t current, new;
2551 if (!spec->dtsp_cleaning)
2554 current = spec->dtsp_state;
2555 ASSERT(current == DTRACESPEC_DISCARDING ||
2556 current == DTRACESPEC_COMMITTINGMANY);
2558 new = DTRACESPEC_INACTIVE;
2560 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2561 ASSERT(rv == current);
2562 spec->dtsp_cleaning = 0;
2567 * Called as part of a speculate() to get the speculative buffer associated
2568 * with a given speculation. Returns NULL if the specified speculation is not
2569 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2570 * the active CPU is not the specified CPU -- the speculation will be
2571 * atomically transitioned into the ACTIVEMANY state.
2573 static dtrace_buffer_t *
2574 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2575 dtrace_specid_t which)
2577 dtrace_speculation_t *spec;
2578 dtrace_speculation_state_t current, new = 0;
2579 dtrace_buffer_t *buf;
2584 if (which > state->dts_nspeculations) {
2585 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2589 spec = &state->dts_speculations[which - 1];
2590 buf = &spec->dtsp_buffer[cpuid];
2593 current = spec->dtsp_state;
2596 case DTRACESPEC_INACTIVE:
2597 case DTRACESPEC_COMMITTINGMANY:
2598 case DTRACESPEC_DISCARDING:
2601 case DTRACESPEC_COMMITTING:
2602 ASSERT(buf->dtb_offset == 0);
2605 case DTRACESPEC_ACTIVEONE:
2607 * This speculation is currently active on one CPU.
2608 * Check the offset in the buffer; if it's non-zero,
2609 * that CPU must be us (and we leave the state alone).
2610 * If it's zero, assume that we're starting on a new
2611 * CPU -- and change the state to indicate that the
2612 * speculation is active on more than one CPU.
2614 if (buf->dtb_offset != 0)
2617 new = DTRACESPEC_ACTIVEMANY;
2620 case DTRACESPEC_ACTIVEMANY:
2623 case DTRACESPEC_ACTIVE:
2624 new = DTRACESPEC_ACTIVEONE;
2630 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2631 current, new) != current);
2633 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2638 * Return a string. In the event that the user lacks the privilege to access
2639 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2640 * don't fail access checking.
2642 * dtrace_dif_variable() uses this routine as a helper for various
2643 * builtin values such as 'execname' and 'probefunc.'
2646 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2647 dtrace_mstate_t *mstate)
2649 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2654 * The easy case: this probe is allowed to read all of memory, so
2655 * we can just return this as a vanilla pointer.
2657 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2661 * This is the tougher case: we copy the string in question from
2662 * kernel memory into scratch memory and return it that way: this
2663 * ensures that we won't trip up when access checking tests the
2664 * BYREF return value.
2666 strsz = dtrace_strlen((char *)addr, size) + 1;
2668 if (mstate->dtms_scratch_ptr + strsz >
2669 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2670 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2674 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2676 ret = mstate->dtms_scratch_ptr;
2677 mstate->dtms_scratch_ptr += strsz;
2682 * Return a string from a memoy address which is known to have one or
2683 * more concatenated, individually zero terminated, sub-strings.
2684 * In the event that the user lacks the privilege to access
2685 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2686 * don't fail access checking.
2688 * dtrace_dif_variable() uses this routine as a helper for various
2689 * builtin values such as 'execargs'.
2692 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
2693 dtrace_mstate_t *mstate)
2699 if (mstate->dtms_scratch_ptr + strsz >
2700 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2701 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2705 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2708 /* Replace sub-string termination characters with a space. */
2709 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
2714 ret = mstate->dtms_scratch_ptr;
2715 mstate->dtms_scratch_ptr += strsz;
2720 * This function implements the DIF emulator's variable lookups. The emulator
2721 * passes a reserved variable identifier and optional built-in array index.
2724 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2728 * If we're accessing one of the uncached arguments, we'll turn this
2729 * into a reference in the args array.
2731 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2732 ndx = v - DIF_VAR_ARG0;
2738 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2739 if (ndx >= sizeof (mstate->dtms_arg) /
2740 sizeof (mstate->dtms_arg[0])) {
2741 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2742 dtrace_provider_t *pv;
2745 pv = mstate->dtms_probe->dtpr_provider;
2746 if (pv->dtpv_pops.dtps_getargval != NULL)
2747 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2748 mstate->dtms_probe->dtpr_id,
2749 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2751 val = dtrace_getarg(ndx, aframes);
2754 * This is regrettably required to keep the compiler
2755 * from tail-optimizing the call to dtrace_getarg().
2756 * The condition always evaluates to true, but the
2757 * compiler has no way of figuring that out a priori.
2758 * (None of this would be necessary if the compiler
2759 * could be relied upon to _always_ tail-optimize
2760 * the call to dtrace_getarg() -- but it can't.)
2762 if (mstate->dtms_probe != NULL)
2768 return (mstate->dtms_arg[ndx]);
2771 case DIF_VAR_UREGS: {
2774 if (!dtrace_priv_proc(state))
2777 if ((lwp = curthread->t_lwp) == NULL) {
2778 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2779 cpu_core[curcpu].cpuc_dtrace_illval = NULL;
2783 return (dtrace_getreg(lwp->lwp_regs, ndx));
2788 case DIF_VAR_CURTHREAD:
2789 if (!dtrace_priv_kernel(state))
2791 return ((uint64_t)(uintptr_t)curthread);
2793 case DIF_VAR_TIMESTAMP:
2794 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2795 mstate->dtms_timestamp = dtrace_gethrtime();
2796 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2798 return (mstate->dtms_timestamp);
2800 case DIF_VAR_VTIMESTAMP:
2801 ASSERT(dtrace_vtime_references != 0);
2802 return (curthread->t_dtrace_vtime);
2804 case DIF_VAR_WALLTIMESTAMP:
2805 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2806 mstate->dtms_walltimestamp = dtrace_gethrestime();
2807 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2809 return (mstate->dtms_walltimestamp);
2813 if (!dtrace_priv_kernel(state))
2815 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2816 mstate->dtms_ipl = dtrace_getipl();
2817 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2819 return (mstate->dtms_ipl);
2823 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2824 return (mstate->dtms_epid);
2827 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2828 return (mstate->dtms_probe->dtpr_id);
2830 case DIF_VAR_STACKDEPTH:
2831 if (!dtrace_priv_kernel(state))
2833 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2834 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2836 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2837 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2839 return (mstate->dtms_stackdepth);
2842 case DIF_VAR_USTACKDEPTH:
2843 if (!dtrace_priv_proc(state))
2845 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2847 * See comment in DIF_VAR_PID.
2849 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2851 mstate->dtms_ustackdepth = 0;
2853 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2854 mstate->dtms_ustackdepth =
2855 dtrace_getustackdepth();
2856 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2858 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2860 return (mstate->dtms_ustackdepth);
2863 case DIF_VAR_CALLER:
2864 if (!dtrace_priv_kernel(state))
2866 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2867 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2869 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2871 * If this is an unanchored probe, we are
2872 * required to go through the slow path:
2873 * dtrace_caller() only guarantees correct
2874 * results for anchored probes.
2876 pc_t caller[2] = {0, 0};
2878 dtrace_getpcstack(caller, 2, aframes,
2879 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2880 mstate->dtms_caller = caller[1];
2881 } else if ((mstate->dtms_caller =
2882 dtrace_caller(aframes)) == -1) {
2884 * We have failed to do this the quick way;
2885 * we must resort to the slower approach of
2886 * calling dtrace_getpcstack().
2890 dtrace_getpcstack(&caller, 1, aframes, NULL);
2891 mstate->dtms_caller = caller;
2894 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2896 return (mstate->dtms_caller);
2899 case DIF_VAR_UCALLER:
2900 if (!dtrace_priv_proc(state))
2903 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2907 * dtrace_getupcstack() fills in the first uint64_t
2908 * with the current PID. The second uint64_t will
2909 * be the program counter at user-level. The third
2910 * uint64_t will contain the caller, which is what
2914 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2915 dtrace_getupcstack(ustack, 3);
2916 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2917 mstate->dtms_ucaller = ustack[2];
2918 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2921 return (mstate->dtms_ucaller);
2924 case DIF_VAR_PROBEPROV:
2925 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2926 return (dtrace_dif_varstr(
2927 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2930 case DIF_VAR_PROBEMOD:
2931 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2932 return (dtrace_dif_varstr(
2933 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2936 case DIF_VAR_PROBEFUNC:
2937 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2938 return (dtrace_dif_varstr(
2939 (uintptr_t)mstate->dtms_probe->dtpr_func,
2942 case DIF_VAR_PROBENAME:
2943 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2944 return (dtrace_dif_varstr(
2945 (uintptr_t)mstate->dtms_probe->dtpr_name,
2949 if (!dtrace_priv_proc(state))
2954 * Note that we are assuming that an unanchored probe is
2955 * always due to a high-level interrupt. (And we're assuming
2956 * that there is only a single high level interrupt.)
2958 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2959 return (pid0.pid_id);
2962 * It is always safe to dereference one's own t_procp pointer:
2963 * it always points to a valid, allocated proc structure.
2964 * Further, it is always safe to dereference the p_pidp member
2965 * of one's own proc structure. (These are truisms becuase
2966 * threads and processes don't clean up their own state --
2967 * they leave that task to whomever reaps them.)
2969 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2971 return ((uint64_t)curproc->p_pid);
2975 if (!dtrace_priv_proc(state))
2980 * See comment in DIF_VAR_PID.
2982 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2983 return (pid0.pid_id);
2986 * It is always safe to dereference one's own t_procp pointer:
2987 * it always points to a valid, allocated proc structure.
2988 * (This is true because threads don't clean up their own
2989 * state -- they leave that task to whomever reaps them.)
2991 return ((uint64_t)curthread->t_procp->p_ppid);
2993 return ((uint64_t)curproc->p_pptr->p_pid);
2999 * See comment in DIF_VAR_PID.
3001 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3005 return ((uint64_t)curthread->t_tid);
3007 case DIF_VAR_EXECARGS: {
3008 struct pargs *p_args = curthread->td_proc->p_args;
3010 return (dtrace_dif_varstrz(
3011 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3014 case DIF_VAR_EXECNAME:
3016 if (!dtrace_priv_proc(state))
3020 * See comment in DIF_VAR_PID.
3022 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3023 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3026 * It is always safe to dereference one's own t_procp pointer:
3027 * it always points to a valid, allocated proc structure.
3028 * (This is true because threads don't clean up their own
3029 * state -- they leave that task to whomever reaps them.)
3031 return (dtrace_dif_varstr(
3032 (uintptr_t)curthread->t_procp->p_user.u_comm,
3035 return (dtrace_dif_varstr(
3036 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3039 case DIF_VAR_ZONENAME:
3041 if (!dtrace_priv_proc(state))
3045 * See comment in DIF_VAR_PID.
3047 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3048 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3051 * It is always safe to dereference one's own t_procp pointer:
3052 * it always points to a valid, allocated proc structure.
3053 * (This is true because threads don't clean up their own
3054 * state -- they leave that task to whomever reaps them.)
3056 return (dtrace_dif_varstr(
3057 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3064 if (!dtrace_priv_proc(state))
3069 * See comment in DIF_VAR_PID.
3071 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3072 return ((uint64_t)p0.p_cred->cr_uid);
3076 * It is always safe to dereference one's own t_procp pointer:
3077 * it always points to a valid, allocated proc structure.
3078 * (This is true because threads don't clean up their own
3079 * state -- they leave that task to whomever reaps them.)
3081 * Additionally, it is safe to dereference one's own process
3082 * credential, since this is never NULL after process birth.
3084 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3087 if (!dtrace_priv_proc(state))
3092 * See comment in DIF_VAR_PID.
3094 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3095 return ((uint64_t)p0.p_cred->cr_gid);
3099 * It is always safe to dereference one's own t_procp pointer:
3100 * it always points to a valid, allocated proc structure.
3101 * (This is true because threads don't clean up their own
3102 * state -- they leave that task to whomever reaps them.)
3104 * Additionally, it is safe to dereference one's own process
3105 * credential, since this is never NULL after process birth.
3107 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3109 case DIF_VAR_ERRNO: {
3112 if (!dtrace_priv_proc(state))
3116 * See comment in DIF_VAR_PID.
3118 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3122 * It is always safe to dereference one's own t_lwp pointer in
3123 * the event that this pointer is non-NULL. (This is true
3124 * because threads and lwps don't clean up their own state --
3125 * they leave that task to whomever reaps them.)
3127 if ((lwp = curthread->t_lwp) == NULL)
3130 return ((uint64_t)lwp->lwp_errno);
3132 return (curthread->td_errno);
3136 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3142 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3143 * Notice that we don't bother validating the proper number of arguments or
3144 * their types in the tuple stack. This isn't needed because all argument
3145 * interpretation is safe because of our load safety -- the worst that can
3146 * happen is that a bogus program can obtain bogus results.
3149 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3150 dtrace_key_t *tupregs, int nargs,
3151 dtrace_mstate_t *mstate, dtrace_state_t *state)
3153 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
3154 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
3155 dtrace_vstate_t *vstate = &state->dts_vstate;
3180 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3184 case DIF_SUBR_MUTEX_OWNED:
3185 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3191 m.mx = dtrace_load64(tupregs[0].dttk_value);
3192 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3193 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3195 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3198 case DIF_SUBR_MUTEX_OWNER:
3199 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3205 m.mx = dtrace_load64(tupregs[0].dttk_value);
3206 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3207 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3208 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3213 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3214 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3220 m.mx = dtrace_load64(tupregs[0].dttk_value);
3221 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3224 case DIF_SUBR_MUTEX_TYPE_SPIN:
3225 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3231 m.mx = dtrace_load64(tupregs[0].dttk_value);
3232 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3235 case DIF_SUBR_RW_READ_HELD: {
3238 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3244 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3245 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3249 case DIF_SUBR_RW_WRITE_HELD:
3250 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3256 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3257 regs[rd] = _RW_WRITE_HELD(&r.ri);
3260 case DIF_SUBR_RW_ISWRITER:
3261 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3267 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3268 regs[rd] = _RW_ISWRITER(&r.ri);
3273 * XXX - The following code works because mutex, rwlocks, & sxlocks
3274 * all have similar data structures in FreeBSD. This may not be
3275 * good if someone changes one of the lock data structures.
3276 * Ideally, it would be nice if all these shared a common lock
3279 case DIF_SUBR_MUTEX_OWNED:
3280 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3281 m.mx = tupregs[0].dttk_value;
3284 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) {
3285 regs[rd] = !(m.mi->mtx_lock & MTX_UNOWNED);
3287 regs[rd] = !(m.mi->mtx_lock & SX_UNLOCKED);
3292 case DIF_SUBR_MUTEX_OWNER:
3293 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3294 m.mx = tupregs[0].dttk_value;
3296 if (LO_CLASSINDEX(&(m.mi->lock_object)) < 2) {
3297 regs[rd] = m.mi->mtx_lock & ~MTX_FLAGMASK;
3299 if (!(m.mi->mtx_lock & SX_LOCK_SHARED))
3300 regs[rd] = SX_OWNER(m.mi->mtx_lock);
3306 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3307 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3308 m.mx = tupregs[0].dttk_value;
3310 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) != 0);
3313 case DIF_SUBR_MUTEX_TYPE_SPIN:
3314 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3315 m.mx = tupregs[0].dttk_value;
3317 regs[rd] = (LO_CLASSINDEX(&(m.mi->lock_object)) == 0);
3320 case DIF_SUBR_RW_READ_HELD:
3321 case DIF_SUBR_SX_SHARED_HELD:
3322 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3323 s.sx = tupregs[0].dttk_value;
3324 regs[rd] = ((s.si->sx_lock & SX_LOCK_SHARED) &&
3325 (SX_OWNER(s.si->sx_lock) >> SX_SHARERS_SHIFT) != 0);
3328 case DIF_SUBR_RW_WRITE_HELD:
3329 case DIF_SUBR_SX_EXCLUSIVE_HELD:
3330 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3331 s.sx = tupregs[0].dttk_value;
3332 regs[rd] = (SX_OWNER(s.si->sx_lock) == (uintptr_t) curthread);
3335 case DIF_SUBR_RW_ISWRITER:
3336 case DIF_SUBR_SX_ISEXCLUSIVE:
3337 /* XXX - need to use dtrace_canload() and dtrace_loadptr() */
3338 s.sx = tupregs[0].dttk_value;
3339 regs[rd] = ((s.si->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS) ||
3340 !(s.si->sx_lock & SX_LOCK_SHARED));
3342 #endif /* ! defined(sun) */
3344 case DIF_SUBR_BCOPY: {
3346 * We need to be sure that the destination is in the scratch
3347 * region -- no other region is allowed.
3349 uintptr_t src = tupregs[0].dttk_value;
3350 uintptr_t dest = tupregs[1].dttk_value;
3351 size_t size = tupregs[2].dttk_value;
3353 if (!dtrace_inscratch(dest, size, mstate)) {
3354 *flags |= CPU_DTRACE_BADADDR;
3359 if (!dtrace_canload(src, size, mstate, vstate)) {
3364 dtrace_bcopy((void *)src, (void *)dest, size);
3368 case DIF_SUBR_ALLOCA:
3369 case DIF_SUBR_COPYIN: {
3370 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3372 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3373 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3376 * This action doesn't require any credential checks since
3377 * probes will not activate in user contexts to which the
3378 * enabling user does not have permissions.
3382 * Rounding up the user allocation size could have overflowed
3383 * a large, bogus allocation (like -1ULL) to 0.
3385 if (scratch_size < size ||
3386 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3387 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3392 if (subr == DIF_SUBR_COPYIN) {
3393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3394 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3395 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3398 mstate->dtms_scratch_ptr += scratch_size;
3403 case DIF_SUBR_COPYINTO: {
3404 uint64_t size = tupregs[1].dttk_value;
3405 uintptr_t dest = tupregs[2].dttk_value;
3408 * This action doesn't require any credential checks since
3409 * probes will not activate in user contexts to which the
3410 * enabling user does not have permissions.
3412 if (!dtrace_inscratch(dest, size, mstate)) {
3413 *flags |= CPU_DTRACE_BADADDR;
3418 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3419 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3420 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3424 case DIF_SUBR_COPYINSTR: {
3425 uintptr_t dest = mstate->dtms_scratch_ptr;
3426 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3428 if (nargs > 1 && tupregs[1].dttk_value < size)
3429 size = tupregs[1].dttk_value + 1;
3432 * This action doesn't require any credential checks since
3433 * probes will not activate in user contexts to which the
3434 * enabling user does not have permissions.
3436 if (!DTRACE_INSCRATCH(mstate, size)) {
3437 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3442 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3443 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3444 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3446 ((char *)dest)[size - 1] = '\0';
3447 mstate->dtms_scratch_ptr += size;
3453 case DIF_SUBR_MSGSIZE:
3454 case DIF_SUBR_MSGDSIZE: {
3455 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3456 uintptr_t wptr, rptr;
3460 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
3462 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3468 wptr = dtrace_loadptr(baddr +
3469 offsetof(mblk_t, b_wptr));
3471 rptr = dtrace_loadptr(baddr +
3472 offsetof(mblk_t, b_rptr));
3475 *flags |= CPU_DTRACE_BADADDR;
3476 *illval = tupregs[0].dttk_value;
3480 daddr = dtrace_loadptr(baddr +
3481 offsetof(mblk_t, b_datap));
3483 baddr = dtrace_loadptr(baddr +
3484 offsetof(mblk_t, b_cont));
3487 * We want to prevent against denial-of-service here,
3488 * so we're only going to search the list for
3489 * dtrace_msgdsize_max mblks.
3491 if (cont++ > dtrace_msgdsize_max) {
3492 *flags |= CPU_DTRACE_ILLOP;
3496 if (subr == DIF_SUBR_MSGDSIZE) {
3497 if (dtrace_load8(daddr +
3498 offsetof(dblk_t, db_type)) != M_DATA)
3502 count += wptr - rptr;
3505 if (!(*flags & CPU_DTRACE_FAULT))
3512 case DIF_SUBR_PROGENYOF: {
3513 pid_t pid = tupregs[0].dttk_value;
3517 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3519 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3521 if (p->p_pidp->pid_id == pid) {
3523 if (p->p_pid == pid) {
3530 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3536 case DIF_SUBR_SPECULATION:
3537 regs[rd] = dtrace_speculation(state);
3540 case DIF_SUBR_COPYOUT: {
3541 uintptr_t kaddr = tupregs[0].dttk_value;
3542 uintptr_t uaddr = tupregs[1].dttk_value;
3543 uint64_t size = tupregs[2].dttk_value;
3545 if (!dtrace_destructive_disallow &&
3546 dtrace_priv_proc_control(state) &&
3547 !dtrace_istoxic(kaddr, size)) {
3548 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3549 dtrace_copyout(kaddr, uaddr, size, flags);
3550 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3555 case DIF_SUBR_COPYOUTSTR: {
3556 uintptr_t kaddr = tupregs[0].dttk_value;
3557 uintptr_t uaddr = tupregs[1].dttk_value;
3558 uint64_t size = tupregs[2].dttk_value;
3560 if (!dtrace_destructive_disallow &&
3561 dtrace_priv_proc_control(state) &&
3562 !dtrace_istoxic(kaddr, size)) {
3563 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3564 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3565 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3570 case DIF_SUBR_STRLEN: {
3572 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3573 sz = dtrace_strlen((char *)addr,
3574 state->dts_options[DTRACEOPT_STRSIZE]);
3576 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3586 case DIF_SUBR_STRCHR:
3587 case DIF_SUBR_STRRCHR: {
3589 * We're going to iterate over the string looking for the
3590 * specified character. We will iterate until we have reached
3591 * the string length or we have found the character. If this
3592 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3593 * of the specified character instead of the first.
3595 uintptr_t saddr = tupregs[0].dttk_value;
3596 uintptr_t addr = tupregs[0].dttk_value;
3597 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3598 char c, target = (char)tupregs[1].dttk_value;
3600 for (regs[rd] = 0; addr < limit; addr++) {
3601 if ((c = dtrace_load8(addr)) == target) {
3604 if (subr == DIF_SUBR_STRCHR)
3612 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3620 case DIF_SUBR_STRSTR:
3621 case DIF_SUBR_INDEX:
3622 case DIF_SUBR_RINDEX: {
3624 * We're going to iterate over the string looking for the
3625 * specified string. We will iterate until we have reached
3626 * the string length or we have found the string. (Yes, this
3627 * is done in the most naive way possible -- but considering
3628 * that the string we're searching for is likely to be
3629 * relatively short, the complexity of Rabin-Karp or similar
3630 * hardly seems merited.)
3632 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3633 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3634 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3635 size_t len = dtrace_strlen(addr, size);
3636 size_t sublen = dtrace_strlen(substr, size);
3637 char *limit = addr + len, *orig = addr;
3638 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3641 regs[rd] = notfound;
3643 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3648 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3655 * strstr() and index()/rindex() have similar semantics if
3656 * both strings are the empty string: strstr() returns a
3657 * pointer to the (empty) string, and index() and rindex()
3658 * both return index 0 (regardless of any position argument).
3660 if (sublen == 0 && len == 0) {
3661 if (subr == DIF_SUBR_STRSTR)
3662 regs[rd] = (uintptr_t)addr;
3668 if (subr != DIF_SUBR_STRSTR) {
3669 if (subr == DIF_SUBR_RINDEX) {
3676 * Both index() and rindex() take an optional position
3677 * argument that denotes the starting position.
3680 int64_t pos = (int64_t)tupregs[2].dttk_value;
3683 * If the position argument to index() is
3684 * negative, Perl implicitly clamps it at
3685 * zero. This semantic is a little surprising
3686 * given the special meaning of negative
3687 * positions to similar Perl functions like
3688 * substr(), but it appears to reflect a
3689 * notion that index() can start from a
3690 * negative index and increment its way up to
3691 * the string. Given this notion, Perl's
3692 * rindex() is at least self-consistent in
3693 * that it implicitly clamps positions greater
3694 * than the string length to be the string
3695 * length. Where Perl completely loses
3696 * coherence, however, is when the specified
3697 * substring is the empty string (""). In
3698 * this case, even if the position is
3699 * negative, rindex() returns 0 -- and even if
3700 * the position is greater than the length,
3701 * index() returns the string length. These
3702 * semantics violate the notion that index()
3703 * should never return a value less than the
3704 * specified position and that rindex() should
3705 * never return a value greater than the
3706 * specified position. (One assumes that
3707 * these semantics are artifacts of Perl's
3708 * implementation and not the results of
3709 * deliberate design -- it beggars belief that
3710 * even Larry Wall could desire such oddness.)
3711 * While in the abstract one would wish for
3712 * consistent position semantics across
3713 * substr(), index() and rindex() -- or at the
3714 * very least self-consistent position
3715 * semantics for index() and rindex() -- we
3716 * instead opt to keep with the extant Perl
3717 * semantics, in all their broken glory. (Do
3718 * we have more desire to maintain Perl's
3719 * semantics than Perl does? Probably.)
3721 if (subr == DIF_SUBR_RINDEX) {
3745 for (regs[rd] = notfound; addr != limit; addr += inc) {
3746 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3747 if (subr != DIF_SUBR_STRSTR) {
3749 * As D index() and rindex() are
3750 * modeled on Perl (and not on awk),
3751 * we return a zero-based (and not a
3752 * one-based) index. (For you Perl
3753 * weenies: no, we're not going to add
3754 * $[ -- and shouldn't you be at a con
3757 regs[rd] = (uintptr_t)(addr - orig);
3761 ASSERT(subr == DIF_SUBR_STRSTR);
3762 regs[rd] = (uintptr_t)addr;
3770 case DIF_SUBR_STRTOK: {
3771 uintptr_t addr = tupregs[0].dttk_value;
3772 uintptr_t tokaddr = tupregs[1].dttk_value;
3773 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3774 uintptr_t limit, toklimit = tokaddr + size;
3775 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
3776 char *dest = (char *)mstate->dtms_scratch_ptr;
3780 * Check both the token buffer and (later) the input buffer,
3781 * since both could be non-scratch addresses.
3783 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3788 if (!DTRACE_INSCRATCH(mstate, size)) {
3789 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3796 * If the address specified is NULL, we use our saved
3797 * strtok pointer from the mstate. Note that this
3798 * means that the saved strtok pointer is _only_
3799 * valid within multiple enablings of the same probe --
3800 * it behaves like an implicit clause-local variable.
3802 addr = mstate->dtms_strtok;
3805 * If the user-specified address is non-NULL we must
3806 * access check it. This is the only time we have
3807 * a chance to do so, since this address may reside
3808 * in the string table of this clause-- future calls
3809 * (when we fetch addr from mstate->dtms_strtok)
3810 * would fail this access check.
3812 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3819 * First, zero the token map, and then process the token
3820 * string -- setting a bit in the map for every character
3821 * found in the token string.
3823 for (i = 0; i < sizeof (tokmap); i++)
3826 for (; tokaddr < toklimit; tokaddr++) {
3827 if ((c = dtrace_load8(tokaddr)) == '\0')
3830 ASSERT((c >> 3) < sizeof (tokmap));
3831 tokmap[c >> 3] |= (1 << (c & 0x7));
3834 for (limit = addr + size; addr < limit; addr++) {
3836 * We're looking for a character that is _not_ contained
3837 * in the token string.
3839 if ((c = dtrace_load8(addr)) == '\0')
3842 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3848 * We reached the end of the string without finding
3849 * any character that was not in the token string.
3850 * We return NULL in this case, and we set the saved
3851 * address to NULL as well.
3854 mstate->dtms_strtok = 0;
3859 * From here on, we're copying into the destination string.
3861 for (i = 0; addr < limit && i < size - 1; addr++) {
3862 if ((c = dtrace_load8(addr)) == '\0')
3865 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3874 regs[rd] = (uintptr_t)dest;
3875 mstate->dtms_scratch_ptr += size;
3876 mstate->dtms_strtok = addr;
3880 case DIF_SUBR_SUBSTR: {
3881 uintptr_t s = tupregs[0].dttk_value;
3882 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3883 char *d = (char *)mstate->dtms_scratch_ptr;
3884 int64_t index = (int64_t)tupregs[1].dttk_value;
3885 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3886 size_t len = dtrace_strlen((char *)s, size);
3889 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3894 if (!DTRACE_INSCRATCH(mstate, size)) {
3895 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3901 remaining = (int64_t)size;
3906 if (index < 0 && index + remaining > 0) {
3912 if (index >= len || index < 0) {
3914 } else if (remaining < 0) {
3915 remaining += len - index;
3916 } else if (index + remaining > size) {
3917 remaining = size - index;
3920 for (i = 0; i < remaining; i++) {
3921 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3927 mstate->dtms_scratch_ptr += size;
3928 regs[rd] = (uintptr_t)d;
3933 case DIF_SUBR_GETMAJOR:
3935 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3937 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3941 case DIF_SUBR_GETMINOR:
3943 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3945 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3949 case DIF_SUBR_DDI_PATHNAME: {
3951 * This one is a galactic mess. We are going to roughly
3952 * emulate ddi_pathname(), but it's made more complicated
3953 * by the fact that we (a) want to include the minor name and
3954 * (b) must proceed iteratively instead of recursively.
3956 uintptr_t dest = mstate->dtms_scratch_ptr;
3957 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3958 char *start = (char *)dest, *end = start + size - 1;
3959 uintptr_t daddr = tupregs[0].dttk_value;
3960 int64_t minor = (int64_t)tupregs[1].dttk_value;
3962 int i, len, depth = 0;
3965 * Due to all the pointer jumping we do and context we must
3966 * rely upon, we just mandate that the user must have kernel
3967 * read privileges to use this routine.
3969 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3970 *flags |= CPU_DTRACE_KPRIV;
3975 if (!DTRACE_INSCRATCH(mstate, size)) {
3976 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3984 * We want to have a name for the minor. In order to do this,
3985 * we need to walk the minor list from the devinfo. We want
3986 * to be sure that we don't infinitely walk a circular list,
3987 * so we check for circularity by sending a scout pointer
3988 * ahead two elements for every element that we iterate over;
3989 * if the list is circular, these will ultimately point to the
3990 * same element. You may recognize this little trick as the
3991 * answer to a stupid interview question -- one that always
3992 * seems to be asked by those who had to have it laboriously
3993 * explained to them, and who can't even concisely describe
3994 * the conditions under which one would be forced to resort to
3995 * this technique. Needless to say, those conditions are
3996 * found here -- and probably only here. Is this the only use
3997 * of this infamous trick in shipping, production code? If it
3998 * isn't, it probably should be...
4001 uintptr_t maddr = dtrace_loadptr(daddr +
4002 offsetof(struct dev_info, devi_minor));
4004 uintptr_t next = offsetof(struct ddi_minor_data, next);
4005 uintptr_t name = offsetof(struct ddi_minor_data,
4006 d_minor) + offsetof(struct ddi_minor, name);
4007 uintptr_t dev = offsetof(struct ddi_minor_data,
4008 d_minor) + offsetof(struct ddi_minor, dev);
4012 scout = dtrace_loadptr(maddr + next);
4014 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4017 m = dtrace_load64(maddr + dev) & MAXMIN64;
4019 m = dtrace_load32(maddr + dev) & MAXMIN;
4022 maddr = dtrace_loadptr(maddr + next);
4027 scout = dtrace_loadptr(scout + next);
4032 scout = dtrace_loadptr(scout + next);
4037 if (scout == maddr) {
4038 *flags |= CPU_DTRACE_ILLOP;
4046 * We have the minor data. Now we need to
4047 * copy the minor's name into the end of the
4050 s = (char *)dtrace_loadptr(maddr + name);
4051 len = dtrace_strlen(s, size);
4053 if (*flags & CPU_DTRACE_FAULT)
4057 if ((end -= (len + 1)) < start)
4063 for (i = 1; i <= len; i++)
4064 end[i] = dtrace_load8((uintptr_t)s++);
4069 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4070 ddi_node_state_t devi_state;
4072 devi_state = dtrace_load32(daddr +
4073 offsetof(struct dev_info, devi_node_state));
4075 if (*flags & CPU_DTRACE_FAULT)
4078 if (devi_state >= DS_INITIALIZED) {
4079 s = (char *)dtrace_loadptr(daddr +
4080 offsetof(struct dev_info, devi_addr));
4081 len = dtrace_strlen(s, size);
4083 if (*flags & CPU_DTRACE_FAULT)
4087 if ((end -= (len + 1)) < start)
4093 for (i = 1; i <= len; i++)
4094 end[i] = dtrace_load8((uintptr_t)s++);
4098 * Now for the node name...
4100 s = (char *)dtrace_loadptr(daddr +
4101 offsetof(struct dev_info, devi_node_name));
4103 daddr = dtrace_loadptr(daddr +
4104 offsetof(struct dev_info, devi_parent));
4107 * If our parent is NULL (that is, if we're the root
4108 * node), we're going to use the special path
4114 len = dtrace_strlen(s, size);
4115 if (*flags & CPU_DTRACE_FAULT)
4118 if ((end -= (len + 1)) < start)
4121 for (i = 1; i <= len; i++)
4122 end[i] = dtrace_load8((uintptr_t)s++);
4125 if (depth++ > dtrace_devdepth_max) {
4126 *flags |= CPU_DTRACE_ILLOP;
4132 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4135 regs[rd] = (uintptr_t)end;
4136 mstate->dtms_scratch_ptr += size;
4143 case DIF_SUBR_STRJOIN: {
4144 char *d = (char *)mstate->dtms_scratch_ptr;
4145 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4146 uintptr_t s1 = tupregs[0].dttk_value;
4147 uintptr_t s2 = tupregs[1].dttk_value;
4150 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4151 !dtrace_strcanload(s2, size, mstate, vstate)) {
4156 if (!DTRACE_INSCRATCH(mstate, size)) {
4157 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4164 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4169 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4177 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4182 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4187 mstate->dtms_scratch_ptr += i;
4188 regs[rd] = (uintptr_t)d;
4194 case DIF_SUBR_LLTOSTR: {
4195 int64_t i = (int64_t)tupregs[0].dttk_value;
4196 int64_t val = i < 0 ? i * -1 : i;
4197 uint64_t size = 22; /* enough room for 2^64 in decimal */
4198 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4200 if (!DTRACE_INSCRATCH(mstate, size)) {
4201 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4206 for (*end-- = '\0'; val; val /= 10)
4207 *end-- = '0' + (val % 10);
4215 regs[rd] = (uintptr_t)end + 1;
4216 mstate->dtms_scratch_ptr += size;
4220 case DIF_SUBR_HTONS:
4221 case DIF_SUBR_NTOHS:
4222 #if BYTE_ORDER == BIG_ENDIAN
4223 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4225 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4230 case DIF_SUBR_HTONL:
4231 case DIF_SUBR_NTOHL:
4232 #if BYTE_ORDER == BIG_ENDIAN
4233 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4235 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4240 case DIF_SUBR_HTONLL:
4241 case DIF_SUBR_NTOHLL:
4242 #if BYTE_ORDER == BIG_ENDIAN
4243 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4245 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4250 case DIF_SUBR_DIRNAME:
4251 case DIF_SUBR_BASENAME: {
4252 char *dest = (char *)mstate->dtms_scratch_ptr;
4253 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4254 uintptr_t src = tupregs[0].dttk_value;
4255 int i, j, len = dtrace_strlen((char *)src, size);
4256 int lastbase = -1, firstbase = -1, lastdir = -1;
4259 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4264 if (!DTRACE_INSCRATCH(mstate, size)) {
4265 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4271 * The basename and dirname for a zero-length string is
4276 src = (uintptr_t)".";
4280 * Start from the back of the string, moving back toward the
4281 * front until we see a character that isn't a slash. That
4282 * character is the last character in the basename.
4284 for (i = len - 1; i >= 0; i--) {
4285 if (dtrace_load8(src + i) != '/')
4293 * Starting from the last character in the basename, move
4294 * towards the front until we find a slash. The character
4295 * that we processed immediately before that is the first
4296 * character in the basename.
4298 for (; i >= 0; i--) {
4299 if (dtrace_load8(src + i) == '/')
4307 * Now keep going until we find a non-slash character. That
4308 * character is the last character in the dirname.
4310 for (; i >= 0; i--) {
4311 if (dtrace_load8(src + i) != '/')
4318 ASSERT(!(lastbase == -1 && firstbase != -1));
4319 ASSERT(!(firstbase == -1 && lastdir != -1));
4321 if (lastbase == -1) {
4323 * We didn't find a non-slash character. We know that
4324 * the length is non-zero, so the whole string must be
4325 * slashes. In either the dirname or the basename
4326 * case, we return '/'.
4328 ASSERT(firstbase == -1);
4329 firstbase = lastbase = lastdir = 0;
4332 if (firstbase == -1) {
4334 * The entire string consists only of a basename
4335 * component. If we're looking for dirname, we need
4336 * to change our string to be just "."; if we're
4337 * looking for a basename, we'll just set the first
4338 * character of the basename to be 0.
4340 if (subr == DIF_SUBR_DIRNAME) {
4341 ASSERT(lastdir == -1);
4342 src = (uintptr_t)".";
4349 if (subr == DIF_SUBR_DIRNAME) {
4350 if (lastdir == -1) {
4352 * We know that we have a slash in the name --
4353 * or lastdir would be set to 0, above. And
4354 * because lastdir is -1, we know that this
4355 * slash must be the first character. (That
4356 * is, the full string must be of the form
4357 * "/basename".) In this case, the last
4358 * character of the directory name is 0.
4366 ASSERT(subr == DIF_SUBR_BASENAME);
4367 ASSERT(firstbase != -1 && lastbase != -1);
4372 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4373 dest[j] = dtrace_load8(src + i);
4376 regs[rd] = (uintptr_t)dest;
4377 mstate->dtms_scratch_ptr += size;
4381 case DIF_SUBR_CLEANPATH: {
4382 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4383 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4384 uintptr_t src = tupregs[0].dttk_value;
4387 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4392 if (!DTRACE_INSCRATCH(mstate, size)) {
4393 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4399 * Move forward, loading each character.
4402 c = dtrace_load8(src + i++);
4404 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4412 c = dtrace_load8(src + i++);
4416 * We have two slashes -- we can just advance
4417 * to the next character.
4424 * This is not "." and it's not ".." -- we can
4425 * just store the "/" and this character and
4433 c = dtrace_load8(src + i++);
4437 * This is a "/./" component. We're not going
4438 * to store anything in the destination buffer;
4439 * we're just going to go to the next component.
4446 * This is not ".." -- we can just store the
4447 * "/." and this character and continue
4456 c = dtrace_load8(src + i++);
4458 if (c != '/' && c != '\0') {
4460 * This is not ".." -- it's "..[mumble]".
4461 * We'll store the "/.." and this character
4462 * and continue processing.
4472 * This is "/../" or "/..\0". We need to back up
4473 * our destination pointer until we find a "/".
4476 while (j != 0 && dest[--j] != '/')
4481 } while (c != '\0');
4484 regs[rd] = (uintptr_t)dest;
4485 mstate->dtms_scratch_ptr += size;
4489 case DIF_SUBR_INET_NTOA:
4490 case DIF_SUBR_INET_NTOA6:
4491 case DIF_SUBR_INET_NTOP: {
4496 if (subr == DIF_SUBR_INET_NTOP) {
4497 af = (int)tupregs[0].dttk_value;
4500 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4504 if (af == AF_INET) {
4509 * Safely load the IPv4 address.
4511 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4514 * Check an IPv4 string will fit in scratch.
4516 size = INET_ADDRSTRLEN;
4517 if (!DTRACE_INSCRATCH(mstate, size)) {
4518 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4522 base = (char *)mstate->dtms_scratch_ptr;
4523 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4526 * Stringify as a dotted decimal quad.
4529 ptr8 = (uint8_t *)&ip4;
4530 for (i = 3; i >= 0; i--) {
4536 for (; val; val /= 10) {
4537 *end-- = '0' + (val % 10);
4544 ASSERT(end + 1 >= base);
4546 } else if (af == AF_INET6) {
4547 struct in6_addr ip6;
4548 int firstzero, tryzero, numzero, v6end;
4550 const char digits[] = "0123456789abcdef";
4553 * Stringify using RFC 1884 convention 2 - 16 bit
4554 * hexadecimal values with a zero-run compression.
4555 * Lower case hexadecimal digits are used.
4556 * eg, fe80::214:4fff:fe0b:76c8.
4557 * The IPv4 embedded form is returned for inet_ntop,
4558 * just the IPv4 string is returned for inet_ntoa6.
4562 * Safely load the IPv6 address.
4565 (void *)(uintptr_t)tupregs[argi].dttk_value,
4566 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4569 * Check an IPv6 string will fit in scratch.
4571 size = INET6_ADDRSTRLEN;
4572 if (!DTRACE_INSCRATCH(mstate, size)) {
4573 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4577 base = (char *)mstate->dtms_scratch_ptr;
4578 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4582 * Find the longest run of 16 bit zero values
4583 * for the single allowed zero compression - "::".
4588 for (i = 0; i < sizeof (struct in6_addr); i++) {
4590 if (ip6._S6_un._S6_u8[i] == 0 &&
4592 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4594 tryzero == -1 && i % 2 == 0) {
4599 if (tryzero != -1 &&
4601 (ip6._S6_un._S6_u8[i] != 0 ||
4603 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
4605 i == sizeof (struct in6_addr) - 1)) {
4607 if (i - tryzero <= numzero) {
4612 firstzero = tryzero;
4613 numzero = i - i % 2 - tryzero;
4617 if (ip6._S6_un._S6_u8[i] == 0 &&
4619 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
4621 i == sizeof (struct in6_addr) - 1)
4625 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4628 * Check for an IPv4 embedded address.
4630 v6end = sizeof (struct in6_addr) - 2;
4631 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4632 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4633 for (i = sizeof (struct in6_addr) - 1;
4634 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4635 ASSERT(end >= base);
4638 val = ip6._S6_un._S6_u8[i];
4640 val = ip6.__u6_addr.__u6_addr8[i];
4646 for (; val; val /= 10) {
4647 *end-- = '0' + val % 10;
4651 if (i > DTRACE_V4MAPPED_OFFSET)
4655 if (subr == DIF_SUBR_INET_NTOA6)
4659 * Set v6end to skip the IPv4 address that
4660 * we have already stringified.
4666 * Build the IPv6 string by working through the
4667 * address in reverse.
4669 for (i = v6end; i >= 0; i -= 2) {
4670 ASSERT(end >= base);
4672 if (i == firstzero + numzero - 2) {
4679 if (i < 14 && i != firstzero - 2)
4683 val = (ip6._S6_un._S6_u8[i] << 8) +
4684 ip6._S6_un._S6_u8[i + 1];
4686 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
4687 ip6.__u6_addr.__u6_addr8[i + 1];
4693 for (; val; val /= 16) {
4694 *end-- = digits[val % 16];
4698 ASSERT(end + 1 >= base);
4702 * The user didn't use AH_INET or AH_INET6.
4704 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4709 inetout: regs[rd] = (uintptr_t)end + 1;
4710 mstate->dtms_scratch_ptr += size;
4714 case DIF_SUBR_MEMREF: {
4715 uintptr_t size = 2 * sizeof(uintptr_t);
4716 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4717 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
4719 /* address and length */
4720 memref[0] = tupregs[0].dttk_value;
4721 memref[1] = tupregs[1].dttk_value;
4723 regs[rd] = (uintptr_t) memref;
4724 mstate->dtms_scratch_ptr += scratch_size;
4728 case DIF_SUBR_TYPEREF: {
4729 uintptr_t size = 4 * sizeof(uintptr_t);
4730 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
4731 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size;
4733 /* address, num_elements, type_str, type_len */
4734 typeref[0] = tupregs[0].dttk_value;
4735 typeref[1] = tupregs[1].dttk_value;
4736 typeref[2] = tupregs[2].dttk_value;
4737 typeref[3] = tupregs[3].dttk_value;
4739 regs[rd] = (uintptr_t) typeref;
4740 mstate->dtms_scratch_ptr += scratch_size;
4747 * Emulate the execution of DTrace IR instructions specified by the given
4748 * DIF object. This function is deliberately void of assertions as all of
4749 * the necessary checks are handled by a call to dtrace_difo_validate().
4752 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4753 dtrace_vstate_t *vstate, dtrace_state_t *state)
4755 const dif_instr_t *text = difo->dtdo_buf;
4756 const uint_t textlen = difo->dtdo_len;
4757 const char *strtab = difo->dtdo_strtab;
4758 const uint64_t *inttab = difo->dtdo_inttab;
4761 dtrace_statvar_t *svar;
4762 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4764 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4765 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4767 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4768 uint64_t regs[DIF_DIR_NREGS];
4771 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4773 uint_t pc = 0, id, opc = 0;
4779 * We stash the current DIF object into the machine state: we need it
4780 * for subsequent access checking.
4782 mstate->dtms_difo = difo;
4784 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4786 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4790 r1 = DIF_INSTR_R1(instr);
4791 r2 = DIF_INSTR_R2(instr);
4792 rd = DIF_INSTR_RD(instr);
4794 switch (DIF_INSTR_OP(instr)) {
4796 regs[rd] = regs[r1] | regs[r2];
4799 regs[rd] = regs[r1] ^ regs[r2];
4802 regs[rd] = regs[r1] & regs[r2];
4805 regs[rd] = regs[r1] << regs[r2];
4808 regs[rd] = regs[r1] >> regs[r2];
4811 regs[rd] = regs[r1] - regs[r2];
4814 regs[rd] = regs[r1] + regs[r2];
4817 regs[rd] = regs[r1] * regs[r2];
4820 if (regs[r2] == 0) {
4822 *flags |= CPU_DTRACE_DIVZERO;
4824 regs[rd] = (int64_t)regs[r1] /
4830 if (regs[r2] == 0) {
4832 *flags |= CPU_DTRACE_DIVZERO;
4834 regs[rd] = regs[r1] / regs[r2];
4839 if (regs[r2] == 0) {
4841 *flags |= CPU_DTRACE_DIVZERO;
4843 regs[rd] = (int64_t)regs[r1] %
4849 if (regs[r2] == 0) {
4851 *flags |= CPU_DTRACE_DIVZERO;
4853 regs[rd] = regs[r1] % regs[r2];
4858 regs[rd] = ~regs[r1];
4861 regs[rd] = regs[r1];
4864 cc_r = regs[r1] - regs[r2];
4868 cc_c = regs[r1] < regs[r2];
4871 cc_n = cc_v = cc_c = 0;
4872 cc_z = regs[r1] == 0;
4875 pc = DIF_INSTR_LABEL(instr);
4879 pc = DIF_INSTR_LABEL(instr);
4883 pc = DIF_INSTR_LABEL(instr);
4886 if ((cc_z | (cc_n ^ cc_v)) == 0)
4887 pc = DIF_INSTR_LABEL(instr);
4890 if ((cc_c | cc_z) == 0)
4891 pc = DIF_INSTR_LABEL(instr);
4894 if ((cc_n ^ cc_v) == 0)
4895 pc = DIF_INSTR_LABEL(instr);
4899 pc = DIF_INSTR_LABEL(instr);
4903 pc = DIF_INSTR_LABEL(instr);
4907 pc = DIF_INSTR_LABEL(instr);
4910 if (cc_z | (cc_n ^ cc_v))
4911 pc = DIF_INSTR_LABEL(instr);
4915 pc = DIF_INSTR_LABEL(instr);
4918 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4919 *flags |= CPU_DTRACE_KPRIV;
4925 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4928 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4929 *flags |= CPU_DTRACE_KPRIV;
4935 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4938 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4939 *flags |= CPU_DTRACE_KPRIV;
4945 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4948 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4949 *flags |= CPU_DTRACE_KPRIV;
4955 regs[rd] = dtrace_load8(regs[r1]);
4958 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4959 *flags |= CPU_DTRACE_KPRIV;
4965 regs[rd] = dtrace_load16(regs[r1]);
4968 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4969 *flags |= CPU_DTRACE_KPRIV;
4975 regs[rd] = dtrace_load32(regs[r1]);
4978 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4979 *flags |= CPU_DTRACE_KPRIV;
4985 regs[rd] = dtrace_load64(regs[r1]);
4989 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4992 regs[rd] = (int16_t)
4993 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4996 regs[rd] = (int32_t)
4997 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5001 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5005 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5009 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5013 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5022 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5025 regs[rd] = (uint64_t)(uintptr_t)
5026 (strtab + DIF_INSTR_STRING(instr));
5029 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5030 uintptr_t s1 = regs[r1];
5031 uintptr_t s2 = regs[r2];
5034 !dtrace_strcanload(s1, sz, mstate, vstate))
5037 !dtrace_strcanload(s2, sz, mstate, vstate))
5040 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5048 regs[rd] = dtrace_dif_variable(mstate, state,
5052 id = DIF_INSTR_VAR(instr);
5054 if (id >= DIF_VAR_OTHER_UBASE) {
5057 id -= DIF_VAR_OTHER_UBASE;
5058 svar = vstate->dtvs_globals[id];
5059 ASSERT(svar != NULL);
5060 v = &svar->dtsv_var;
5062 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5063 regs[rd] = svar->dtsv_data;
5067 a = (uintptr_t)svar->dtsv_data;
5069 if (*(uint8_t *)a == UINT8_MAX) {
5071 * If the 0th byte is set to UINT8_MAX
5072 * then this is to be treated as a
5073 * reference to a NULL variable.
5077 regs[rd] = a + sizeof (uint64_t);
5083 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5087 id = DIF_INSTR_VAR(instr);
5089 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5090 id -= DIF_VAR_OTHER_UBASE;
5092 svar = vstate->dtvs_globals[id];
5093 ASSERT(svar != NULL);
5094 v = &svar->dtsv_var;
5096 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5097 uintptr_t a = (uintptr_t)svar->dtsv_data;
5100 ASSERT(svar->dtsv_size != 0);
5102 if (regs[rd] == 0) {
5103 *(uint8_t *)a = UINT8_MAX;
5107 a += sizeof (uint64_t);
5109 if (!dtrace_vcanload(
5110 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5114 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5115 (void *)a, &v->dtdv_type);
5119 svar->dtsv_data = regs[rd];
5124 * There are no DTrace built-in thread-local arrays at
5125 * present. This opcode is saved for future work.
5127 *flags |= CPU_DTRACE_ILLOP;
5132 id = DIF_INSTR_VAR(instr);
5134 if (id < DIF_VAR_OTHER_UBASE) {
5136 * For now, this has no meaning.
5142 id -= DIF_VAR_OTHER_UBASE;
5144 ASSERT(id < vstate->dtvs_nlocals);
5145 ASSERT(vstate->dtvs_locals != NULL);
5147 svar = vstate->dtvs_locals[id];
5148 ASSERT(svar != NULL);
5149 v = &svar->dtsv_var;
5151 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5152 uintptr_t a = (uintptr_t)svar->dtsv_data;
5153 size_t sz = v->dtdv_type.dtdt_size;
5155 sz += sizeof (uint64_t);
5156 ASSERT(svar->dtsv_size == NCPU * sz);
5159 if (*(uint8_t *)a == UINT8_MAX) {
5161 * If the 0th byte is set to UINT8_MAX
5162 * then this is to be treated as a
5163 * reference to a NULL variable.
5167 regs[rd] = a + sizeof (uint64_t);
5173 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5174 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5175 regs[rd] = tmp[curcpu];
5179 id = DIF_INSTR_VAR(instr);
5181 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5182 id -= DIF_VAR_OTHER_UBASE;
5183 ASSERT(id < vstate->dtvs_nlocals);
5185 ASSERT(vstate->dtvs_locals != NULL);
5186 svar = vstate->dtvs_locals[id];
5187 ASSERT(svar != NULL);
5188 v = &svar->dtsv_var;
5190 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5191 uintptr_t a = (uintptr_t)svar->dtsv_data;
5192 size_t sz = v->dtdv_type.dtdt_size;
5194 sz += sizeof (uint64_t);
5195 ASSERT(svar->dtsv_size == NCPU * sz);
5198 if (regs[rd] == 0) {
5199 *(uint8_t *)a = UINT8_MAX;
5203 a += sizeof (uint64_t);
5206 if (!dtrace_vcanload(
5207 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5211 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5212 (void *)a, &v->dtdv_type);
5216 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5217 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5218 tmp[curcpu] = regs[rd];
5222 dtrace_dynvar_t *dvar;
5225 id = DIF_INSTR_VAR(instr);
5226 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5227 id -= DIF_VAR_OTHER_UBASE;
5228 v = &vstate->dtvs_tlocals[id];
5230 key = &tupregs[DIF_DTR_NREGS];
5231 key[0].dttk_value = (uint64_t)id;
5232 key[0].dttk_size = 0;
5233 DTRACE_TLS_THRKEY(key[1].dttk_value);
5234 key[1].dttk_size = 0;
5236 dvar = dtrace_dynvar(dstate, 2, key,
5237 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5245 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5246 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5248 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5255 dtrace_dynvar_t *dvar;
5258 id = DIF_INSTR_VAR(instr);
5259 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5260 id -= DIF_VAR_OTHER_UBASE;
5262 key = &tupregs[DIF_DTR_NREGS];
5263 key[0].dttk_value = (uint64_t)id;
5264 key[0].dttk_size = 0;
5265 DTRACE_TLS_THRKEY(key[1].dttk_value);
5266 key[1].dttk_size = 0;
5267 v = &vstate->dtvs_tlocals[id];
5269 dvar = dtrace_dynvar(dstate, 2, key,
5270 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5271 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5272 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5273 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5276 * Given that we're storing to thread-local data,
5277 * we need to flush our predicate cache.
5279 curthread->t_predcache = 0;
5284 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5285 if (!dtrace_vcanload(
5286 (void *)(uintptr_t)regs[rd],
5287 &v->dtdv_type, mstate, vstate))
5290 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5291 dvar->dtdv_data, &v->dtdv_type);
5293 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5300 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5304 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5305 regs, tupregs, ttop, mstate, state);
5309 if (ttop == DIF_DTR_NREGS) {
5310 *flags |= CPU_DTRACE_TUPOFLOW;
5314 if (r1 == DIF_TYPE_STRING) {
5316 * If this is a string type and the size is 0,
5317 * we'll use the system-wide default string
5318 * size. Note that we are _not_ looking at
5319 * the value of the DTRACEOPT_STRSIZE option;
5320 * had this been set, we would expect to have
5321 * a non-zero size value in the "pushtr".
5323 tupregs[ttop].dttk_size =
5324 dtrace_strlen((char *)(uintptr_t)regs[rd],
5325 regs[r2] ? regs[r2] :
5326 dtrace_strsize_default) + 1;
5328 tupregs[ttop].dttk_size = regs[r2];
5331 tupregs[ttop++].dttk_value = regs[rd];
5335 if (ttop == DIF_DTR_NREGS) {
5336 *flags |= CPU_DTRACE_TUPOFLOW;
5340 tupregs[ttop].dttk_value = regs[rd];
5341 tupregs[ttop++].dttk_size = 0;
5349 case DIF_OP_FLUSHTS:
5354 case DIF_OP_LDTAA: {
5355 dtrace_dynvar_t *dvar;
5356 dtrace_key_t *key = tupregs;
5357 uint_t nkeys = ttop;
5359 id = DIF_INSTR_VAR(instr);
5360 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5361 id -= DIF_VAR_OTHER_UBASE;
5363 key[nkeys].dttk_value = (uint64_t)id;
5364 key[nkeys++].dttk_size = 0;
5366 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5367 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5368 key[nkeys++].dttk_size = 0;
5369 v = &vstate->dtvs_tlocals[id];
5371 v = &vstate->dtvs_globals[id]->dtsv_var;
5374 dvar = dtrace_dynvar(dstate, nkeys, key,
5375 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5376 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5377 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5384 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5385 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5387 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5394 case DIF_OP_STTAA: {
5395 dtrace_dynvar_t *dvar;
5396 dtrace_key_t *key = tupregs;
5397 uint_t nkeys = ttop;
5399 id = DIF_INSTR_VAR(instr);
5400 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5401 id -= DIF_VAR_OTHER_UBASE;
5403 key[nkeys].dttk_value = (uint64_t)id;
5404 key[nkeys++].dttk_size = 0;
5406 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5407 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5408 key[nkeys++].dttk_size = 0;
5409 v = &vstate->dtvs_tlocals[id];
5411 v = &vstate->dtvs_globals[id]->dtsv_var;
5414 dvar = dtrace_dynvar(dstate, nkeys, key,
5415 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5416 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5417 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5418 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5423 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5424 if (!dtrace_vcanload(
5425 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5429 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5430 dvar->dtdv_data, &v->dtdv_type);
5432 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5438 case DIF_OP_ALLOCS: {
5439 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5440 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5443 * Rounding up the user allocation size could have
5444 * overflowed large, bogus allocations (like -1ULL) to
5447 if (size < regs[r1] ||
5448 !DTRACE_INSCRATCH(mstate, size)) {
5449 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5454 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5455 mstate->dtms_scratch_ptr += size;
5461 if (!dtrace_canstore(regs[rd], regs[r2],
5463 *flags |= CPU_DTRACE_BADADDR;
5468 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5471 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5472 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5476 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5477 *flags |= CPU_DTRACE_BADADDR;
5481 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5485 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5486 *flags |= CPU_DTRACE_BADADDR;
5491 *flags |= CPU_DTRACE_BADALIGN;
5495 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5499 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5500 *flags |= CPU_DTRACE_BADADDR;
5505 *flags |= CPU_DTRACE_BADALIGN;
5509 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5513 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5514 *flags |= CPU_DTRACE_BADADDR;
5519 *flags |= CPU_DTRACE_BADALIGN;
5523 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5528 if (!(*flags & CPU_DTRACE_FAULT))
5531 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5532 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5538 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5540 dtrace_probe_t *probe = ecb->dte_probe;
5541 dtrace_provider_t *prov = probe->dtpr_provider;
5542 char c[DTRACE_FULLNAMELEN + 80], *str;
5543 char *msg = "dtrace: breakpoint action at probe ";
5544 char *ecbmsg = " (ecb ";
5545 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5546 uintptr_t val = (uintptr_t)ecb;
5547 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5549 if (dtrace_destructive_disallow)
5553 * It's impossible to be taking action on the NULL probe.
5555 ASSERT(probe != NULL);
5558 * This is a poor man's (destitute man's?) sprintf(): we want to
5559 * print the provider name, module name, function name and name of
5560 * the probe, along with the hex address of the ECB with the breakpoint
5561 * action -- all of which we must place in the character buffer by
5564 while (*msg != '\0')
5567 for (str = prov->dtpv_name; *str != '\0'; str++)
5571 for (str = probe->dtpr_mod; *str != '\0'; str++)
5575 for (str = probe->dtpr_func; *str != '\0'; str++)
5579 for (str = probe->dtpr_name; *str != '\0'; str++)
5582 while (*ecbmsg != '\0')
5585 while (shift >= 0) {
5586 mask = (uintptr_t)0xf << shift;
5588 if (val >= ((uintptr_t)1 << shift))
5589 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5599 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
5604 dtrace_action_panic(dtrace_ecb_t *ecb)
5606 dtrace_probe_t *probe = ecb->dte_probe;
5609 * It's impossible to be taking action on the NULL probe.
5611 ASSERT(probe != NULL);
5613 if (dtrace_destructive_disallow)
5616 if (dtrace_panicked != NULL)
5619 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5623 * We won the right to panic. (We want to be sure that only one
5624 * thread calls panic() from dtrace_probe(), and that panic() is
5625 * called exactly once.)
5627 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5628 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5629 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5633 dtrace_action_raise(uint64_t sig)
5635 if (dtrace_destructive_disallow)
5639 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5645 * raise() has a queue depth of 1 -- we ignore all subsequent
5646 * invocations of the raise() action.
5648 if (curthread->t_dtrace_sig == 0)
5649 curthread->t_dtrace_sig = (uint8_t)sig;
5651 curthread->t_sig_check = 1;
5654 struct proc *p = curproc;
5662 dtrace_action_stop(void)
5664 if (dtrace_destructive_disallow)
5668 if (!curthread->t_dtrace_stop) {
5669 curthread->t_dtrace_stop = 1;
5670 curthread->t_sig_check = 1;
5674 struct proc *p = curproc;
5676 psignal(p, SIGSTOP);
5682 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5685 volatile uint16_t *flags;
5689 cpu_t *cpu = &solaris_cpu[curcpu];
5692 if (dtrace_destructive_disallow)
5695 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5697 now = dtrace_gethrtime();
5699 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5701 * We need to advance the mark to the current time.
5703 cpu->cpu_dtrace_chillmark = now;
5704 cpu->cpu_dtrace_chilled = 0;
5708 * Now check to see if the requested chill time would take us over
5709 * the maximum amount of time allowed in the chill interval. (Or
5710 * worse, if the calculation itself induces overflow.)
5712 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5713 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5714 *flags |= CPU_DTRACE_ILLOP;
5718 while (dtrace_gethrtime() - now < val)
5722 * Normally, we assure that the value of the variable "timestamp" does
5723 * not change within an ECB. The presence of chill() represents an
5724 * exception to this rule, however.
5726 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5727 cpu->cpu_dtrace_chilled += val;
5732 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5733 uint64_t *buf, uint64_t arg)
5735 int nframes = DTRACE_USTACK_NFRAMES(arg);
5736 int strsize = DTRACE_USTACK_STRSIZE(arg);
5737 uint64_t *pcs = &buf[1], *fps;
5738 char *str = (char *)&pcs[nframes];
5739 int size, offs = 0, i, j;
5740 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5741 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
5745 * Should be taking a faster path if string space has not been
5748 ASSERT(strsize != 0);
5751 * We will first allocate some temporary space for the frame pointers.
5753 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5754 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5755 (nframes * sizeof (uint64_t));
5757 if (!DTRACE_INSCRATCH(mstate, size)) {
5759 * Not enough room for our frame pointers -- need to indicate
5760 * that we ran out of scratch space.
5762 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5766 mstate->dtms_scratch_ptr += size;
5767 saved = mstate->dtms_scratch_ptr;
5770 * Now get a stack with both program counters and frame pointers.
5772 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5773 dtrace_getufpstack(buf, fps, nframes + 1);
5774 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5777 * If that faulted, we're cooked.
5779 if (*flags & CPU_DTRACE_FAULT)
5783 * Now we want to walk up the stack, calling the USTACK helper. For
5784 * each iteration, we restore the scratch pointer.
5786 for (i = 0; i < nframes; i++) {
5787 mstate->dtms_scratch_ptr = saved;
5789 if (offs >= strsize)
5792 sym = (char *)(uintptr_t)dtrace_helper(
5793 DTRACE_HELPER_ACTION_USTACK,
5794 mstate, state, pcs[i], fps[i]);
5797 * If we faulted while running the helper, we're going to
5798 * clear the fault and null out the corresponding string.
5800 if (*flags & CPU_DTRACE_FAULT) {
5801 *flags &= ~CPU_DTRACE_FAULT;
5811 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5814 * Now copy in the string that the helper returned to us.
5816 for (j = 0; offs + j < strsize; j++) {
5817 if ((str[offs + j] = sym[j]) == '\0')
5821 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5826 if (offs >= strsize) {
5828 * If we didn't have room for all of the strings, we don't
5829 * abort processing -- this needn't be a fatal error -- but we
5830 * still want to increment a counter (dts_stkstroverflows) to
5831 * allow this condition to be warned about. (If this is from
5832 * a jstack() action, it is easily tuned via jstackstrsize.)
5834 dtrace_error(&state->dts_stkstroverflows);
5837 while (offs < strsize)
5841 mstate->dtms_scratch_ptr = old;
5846 * If you're looking for the epicenter of DTrace, you just found it. This
5847 * is the function called by the provider to fire a probe -- from which all
5848 * subsequent probe-context DTrace activity emanates.
5851 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5852 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5854 processorid_t cpuid;
5855 dtrace_icookie_t cookie;
5856 dtrace_probe_t *probe;
5857 dtrace_mstate_t mstate;
5859 dtrace_action_t *act;
5863 volatile uint16_t *flags;
5868 * Kick out immediately if this CPU is still being born (in which case
5869 * curthread will be set to -1) or the current thread can't allow
5870 * probes in its current context.
5872 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5876 cookie = dtrace_interrupt_disable();
5877 probe = dtrace_probes[id - 1];
5879 onintr = CPU_ON_INTR(CPU);
5881 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5882 probe->dtpr_predcache == curthread->t_predcache) {
5884 * We have hit in the predicate cache; we know that
5885 * this predicate would evaluate to be false.
5887 dtrace_interrupt_enable(cookie);
5892 if (panic_quiesce) {
5894 if (panicstr != NULL) {
5897 * We don't trace anything if we're panicking.
5899 dtrace_interrupt_enable(cookie);
5903 now = dtrace_gethrtime();
5904 vtime = dtrace_vtime_references != 0;
5906 if (vtime && curthread->t_dtrace_start)
5907 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5909 mstate.dtms_difo = NULL;
5910 mstate.dtms_probe = probe;
5911 mstate.dtms_strtok = 0;
5912 mstate.dtms_arg[0] = arg0;
5913 mstate.dtms_arg[1] = arg1;
5914 mstate.dtms_arg[2] = arg2;
5915 mstate.dtms_arg[3] = arg3;
5916 mstate.dtms_arg[4] = arg4;
5918 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5920 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5921 dtrace_predicate_t *pred = ecb->dte_predicate;
5922 dtrace_state_t *state = ecb->dte_state;
5923 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5924 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5925 dtrace_vstate_t *vstate = &state->dts_vstate;
5926 dtrace_provider_t *prov = probe->dtpr_provider;
5931 * A little subtlety with the following (seemingly innocuous)
5932 * declaration of the automatic 'val': by looking at the
5933 * code, you might think that it could be declared in the
5934 * action processing loop, below. (That is, it's only used in
5935 * the action processing loop.) However, it must be declared
5936 * out of that scope because in the case of DIF expression
5937 * arguments to aggregating actions, one iteration of the
5938 * action loop will use the last iteration's value.
5942 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5943 *flags &= ~CPU_DTRACE_ERROR;
5945 if (prov == dtrace_provider) {
5947 * If dtrace itself is the provider of this probe,
5948 * we're only going to continue processing the ECB if
5949 * arg0 (the dtrace_state_t) is equal to the ECB's
5950 * creating state. (This prevents disjoint consumers
5951 * from seeing one another's metaprobes.)
5953 if (arg0 != (uint64_t)(uintptr_t)state)
5957 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5959 * We're not currently active. If our provider isn't
5960 * the dtrace pseudo provider, we're not interested.
5962 if (prov != dtrace_provider)
5966 * Now we must further check if we are in the BEGIN
5967 * probe. If we are, we will only continue processing
5968 * if we're still in WARMUP -- if one BEGIN enabling
5969 * has invoked the exit() action, we don't want to
5970 * evaluate subsequent BEGIN enablings.
5972 if (probe->dtpr_id == dtrace_probeid_begin &&
5973 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5974 ASSERT(state->dts_activity ==
5975 DTRACE_ACTIVITY_DRAINING);
5980 if (ecb->dte_cond) {
5982 * If the dte_cond bits indicate that this
5983 * consumer is only allowed to see user-mode firings
5984 * of this probe, call the provider's dtps_usermode()
5985 * entry point to check that the probe was fired
5986 * while in a user context. Skip this ECB if that's
5989 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
5990 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
5991 probe->dtpr_id, probe->dtpr_arg) == 0)
5996 * This is more subtle than it looks. We have to be
5997 * absolutely certain that CRED() isn't going to
5998 * change out from under us so it's only legit to
5999 * examine that structure if we're in constrained
6000 * situations. Currently, the only times we'll this
6001 * check is if a non-super-user has enabled the
6002 * profile or syscall providers -- providers that
6003 * allow visibility of all processes. For the
6004 * profile case, the check above will ensure that
6005 * we're examining a user context.
6007 if (ecb->dte_cond & DTRACE_COND_OWNER) {
6010 ecb->dte_state->dts_cred.dcr_cred;
6013 ASSERT(s_cr != NULL);
6015 if ((cr = CRED()) == NULL ||
6016 s_cr->cr_uid != cr->cr_uid ||
6017 s_cr->cr_uid != cr->cr_ruid ||
6018 s_cr->cr_uid != cr->cr_suid ||
6019 s_cr->cr_gid != cr->cr_gid ||
6020 s_cr->cr_gid != cr->cr_rgid ||
6021 s_cr->cr_gid != cr->cr_sgid ||
6022 (proc = ttoproc(curthread)) == NULL ||
6023 (proc->p_flag & SNOCD))
6027 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
6030 ecb->dte_state->dts_cred.dcr_cred;
6032 ASSERT(s_cr != NULL);
6034 if ((cr = CRED()) == NULL ||
6035 s_cr->cr_zone->zone_id !=
6036 cr->cr_zone->zone_id)
6042 if (now - state->dts_alive > dtrace_deadman_timeout) {
6044 * We seem to be dead. Unless we (a) have kernel
6045 * destructive permissions (b) have expicitly enabled
6046 * destructive actions and (c) destructive actions have
6047 * not been disabled, we're going to transition into
6048 * the KILLED state, from which no further processing
6049 * on this state will be performed.
6051 if (!dtrace_priv_kernel_destructive(state) ||
6052 !state->dts_cred.dcr_destructive ||
6053 dtrace_destructive_disallow) {
6054 void *activity = &state->dts_activity;
6055 dtrace_activity_t current;
6058 current = state->dts_activity;
6059 } while (dtrace_cas32(activity, current,
6060 DTRACE_ACTIVITY_KILLED) != current);
6066 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6067 ecb->dte_alignment, state, &mstate)) < 0)
6070 tomax = buf->dtb_tomax;
6071 ASSERT(tomax != NULL);
6073 if (ecb->dte_size != 0)
6074 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6076 mstate.dtms_epid = ecb->dte_epid;
6077 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6079 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6080 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6082 mstate.dtms_access = 0;
6085 dtrace_difo_t *dp = pred->dtp_difo;
6088 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6090 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6091 dtrace_cacheid_t cid = probe->dtpr_predcache;
6093 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6095 * Update the predicate cache...
6097 ASSERT(cid == pred->dtp_cacheid);
6098 curthread->t_predcache = cid;
6105 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6106 act != NULL; act = act->dta_next) {
6109 dtrace_recdesc_t *rec = &act->dta_rec;
6111 size = rec->dtrd_size;
6112 valoffs = offs + rec->dtrd_offset;
6114 if (DTRACEACT_ISAGG(act->dta_kind)) {
6116 dtrace_aggregation_t *agg;
6118 agg = (dtrace_aggregation_t *)act;
6120 if ((dp = act->dta_difo) != NULL)
6121 v = dtrace_dif_emulate(dp,
6122 &mstate, vstate, state);
6124 if (*flags & CPU_DTRACE_ERROR)
6128 * Note that we always pass the expression
6129 * value from the previous iteration of the
6130 * action loop. This value will only be used
6131 * if there is an expression argument to the
6132 * aggregating action, denoted by the
6133 * dtag_hasarg field.
6135 dtrace_aggregate(agg, buf,
6136 offs, aggbuf, v, val);
6140 switch (act->dta_kind) {
6141 case DTRACEACT_STOP:
6142 if (dtrace_priv_proc_destructive(state))
6143 dtrace_action_stop();
6146 case DTRACEACT_BREAKPOINT:
6147 if (dtrace_priv_kernel_destructive(state))
6148 dtrace_action_breakpoint(ecb);
6151 case DTRACEACT_PANIC:
6152 if (dtrace_priv_kernel_destructive(state))
6153 dtrace_action_panic(ecb);
6156 case DTRACEACT_STACK:
6157 if (!dtrace_priv_kernel(state))
6160 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6161 size / sizeof (pc_t), probe->dtpr_aframes,
6162 DTRACE_ANCHORED(probe) ? NULL :
6167 case DTRACEACT_JSTACK:
6168 case DTRACEACT_USTACK:
6169 if (!dtrace_priv_proc(state))
6173 * See comment in DIF_VAR_PID.
6175 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6177 int depth = DTRACE_USTACK_NFRAMES(
6180 dtrace_bzero((void *)(tomax + valoffs),
6181 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6182 + depth * sizeof (uint64_t));
6187 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6188 curproc->p_dtrace_helpers != NULL) {
6190 * This is the slow path -- we have
6191 * allocated string space, and we're
6192 * getting the stack of a process that
6193 * has helpers. Call into a separate
6194 * routine to perform this processing.
6196 dtrace_action_ustack(&mstate, state,
6197 (uint64_t *)(tomax + valoffs),
6202 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6203 dtrace_getupcstack((uint64_t *)
6205 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6206 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6217 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6219 if (*flags & CPU_DTRACE_ERROR)
6222 switch (act->dta_kind) {
6223 case DTRACEACT_SPECULATE:
6224 ASSERT(buf == &state->dts_buffer[cpuid]);
6225 buf = dtrace_speculation_buffer(state,
6229 *flags |= CPU_DTRACE_DROP;
6233 offs = dtrace_buffer_reserve(buf,
6234 ecb->dte_needed, ecb->dte_alignment,
6238 *flags |= CPU_DTRACE_DROP;
6242 tomax = buf->dtb_tomax;
6243 ASSERT(tomax != NULL);
6245 if (ecb->dte_size != 0)
6246 DTRACE_STORE(uint32_t, tomax, offs,
6250 case DTRACEACT_PRINTM: {
6251 /* The DIF returns a 'memref'. */
6252 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
6254 /* Get the size from the memref. */
6258 * Check if the size exceeds the allocated
6261 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6263 *flags |= CPU_DTRACE_DROP;
6267 /* Store the size in the buffer first. */
6268 DTRACE_STORE(uintptr_t, tomax,
6272 * Offset the buffer address to the start
6275 valoffs += sizeof(uintptr_t);
6278 * Reset to the memory address rather than
6279 * the memref array, then let the BYREF
6280 * code below do the work to store the
6281 * memory data in the buffer.
6287 case DTRACEACT_PRINTT: {
6288 /* The DIF returns a 'typeref'. */
6289 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val;
6294 * Get the type string length and round it
6295 * up so that the data that follows is
6296 * aligned for easy access.
6298 size_t typs = strlen((char *) typeref[2]) + 1;
6299 typs = roundup(typs, sizeof(uintptr_t));
6302 *Get the size from the typeref using the
6303 * number of elements and the type size.
6305 size = typeref[1] * typeref[3];
6308 * Check if the size exceeds the allocated
6311 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
6313 *flags |= CPU_DTRACE_DROP;
6317 /* Store the size in the buffer first. */
6318 DTRACE_STORE(uintptr_t, tomax,
6320 valoffs += sizeof(uintptr_t);
6322 /* Store the type size in the buffer. */
6323 DTRACE_STORE(uintptr_t, tomax,
6324 valoffs, typeref[3]);
6325 valoffs += sizeof(uintptr_t);
6329 for (s = 0; s < typs; s++) {
6331 c = dtrace_load8(val++);
6333 DTRACE_STORE(uint8_t, tomax,
6338 * Reset to the memory address rather than
6339 * the typeref array, then let the BYREF
6340 * code below do the work to store the
6341 * memory data in the buffer.
6347 case DTRACEACT_CHILL:
6348 if (dtrace_priv_kernel_destructive(state))
6349 dtrace_action_chill(&mstate, val);
6352 case DTRACEACT_RAISE:
6353 if (dtrace_priv_proc_destructive(state))
6354 dtrace_action_raise(val);
6357 case DTRACEACT_COMMIT:
6361 * We need to commit our buffer state.
6364 buf->dtb_offset = offs + ecb->dte_size;
6365 buf = &state->dts_buffer[cpuid];
6366 dtrace_speculation_commit(state, cpuid, val);
6370 case DTRACEACT_DISCARD:
6371 dtrace_speculation_discard(state, cpuid, val);
6374 case DTRACEACT_DIFEXPR:
6375 case DTRACEACT_LIBACT:
6376 case DTRACEACT_PRINTF:
6377 case DTRACEACT_PRINTA:
6378 case DTRACEACT_SYSTEM:
6379 case DTRACEACT_FREOPEN:
6384 if (!dtrace_priv_kernel(state))
6388 case DTRACEACT_USYM:
6389 case DTRACEACT_UMOD:
6390 case DTRACEACT_UADDR: {
6392 struct pid *pid = curthread->t_procp->p_pidp;
6395 if (!dtrace_priv_proc(state))
6398 DTRACE_STORE(uint64_t, tomax,
6400 valoffs, (uint64_t)pid->pid_id);
6402 valoffs, (uint64_t) curproc->p_pid);
6404 DTRACE_STORE(uint64_t, tomax,
6405 valoffs + sizeof (uint64_t), val);
6410 case DTRACEACT_EXIT: {
6412 * For the exit action, we are going to attempt
6413 * to atomically set our activity to be
6414 * draining. If this fails (either because
6415 * another CPU has beat us to the exit action,
6416 * or because our current activity is something
6417 * other than ACTIVE or WARMUP), we will
6418 * continue. This assures that the exit action
6419 * can be successfully recorded at most once
6420 * when we're in the ACTIVE state. If we're
6421 * encountering the exit() action while in
6422 * COOLDOWN, however, we want to honor the new
6423 * status code. (We know that we're the only
6424 * thread in COOLDOWN, so there is no race.)
6426 void *activity = &state->dts_activity;
6427 dtrace_activity_t current = state->dts_activity;
6429 if (current == DTRACE_ACTIVITY_COOLDOWN)
6432 if (current != DTRACE_ACTIVITY_WARMUP)
6433 current = DTRACE_ACTIVITY_ACTIVE;
6435 if (dtrace_cas32(activity, current,
6436 DTRACE_ACTIVITY_DRAINING) != current) {
6437 *flags |= CPU_DTRACE_DROP;
6448 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6449 uintptr_t end = valoffs + size;
6451 if (!dtrace_vcanload((void *)(uintptr_t)val,
6452 &dp->dtdo_rtype, &mstate, vstate))
6456 * If this is a string, we're going to only
6457 * load until we find the zero byte -- after
6458 * which we'll store zero bytes.
6460 if (dp->dtdo_rtype.dtdt_kind ==
6463 int intuple = act->dta_intuple;
6466 for (s = 0; s < size; s++) {
6468 c = dtrace_load8(val++);
6470 DTRACE_STORE(uint8_t, tomax,
6473 if (c == '\0' && intuple)
6480 while (valoffs < end) {
6481 DTRACE_STORE(uint8_t, tomax, valoffs++,
6482 dtrace_load8(val++));
6492 case sizeof (uint8_t):
6493 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6495 case sizeof (uint16_t):
6496 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6498 case sizeof (uint32_t):
6499 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6501 case sizeof (uint64_t):
6502 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6506 * Any other size should have been returned by
6507 * reference, not by value.
6514 if (*flags & CPU_DTRACE_DROP)
6517 if (*flags & CPU_DTRACE_FAULT) {
6519 dtrace_action_t *err;
6523 if (probe->dtpr_id == dtrace_probeid_error) {
6525 * There's nothing we can do -- we had an
6526 * error on the error probe. We bump an
6527 * error counter to at least indicate that
6528 * this condition happened.
6530 dtrace_error(&state->dts_dblerrors);
6536 * Before recursing on dtrace_probe(), we
6537 * need to explicitly clear out our start
6538 * time to prevent it from being accumulated
6539 * into t_dtrace_vtime.
6541 curthread->t_dtrace_start = 0;
6545 * Iterate over the actions to figure out which action
6546 * we were processing when we experienced the error.
6547 * Note that act points _past_ the faulting action; if
6548 * act is ecb->dte_action, the fault was in the
6549 * predicate, if it's ecb->dte_action->dta_next it's
6550 * in action #1, and so on.
6552 for (err = ecb->dte_action, ndx = 0;
6553 err != act; err = err->dta_next, ndx++)
6556 dtrace_probe_error(state, ecb->dte_epid, ndx,
6557 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6558 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6559 cpu_core[cpuid].cpuc_dtrace_illval);
6565 buf->dtb_offset = offs + ecb->dte_size;
6569 curthread->t_dtrace_start = dtrace_gethrtime();
6571 dtrace_interrupt_enable(cookie);
6575 * DTrace Probe Hashing Functions
6577 * The functions in this section (and indeed, the functions in remaining
6578 * sections) are not _called_ from probe context. (Any exceptions to this are
6579 * marked with a "Note:".) Rather, they are called from elsewhere in the
6580 * DTrace framework to look-up probes in, add probes to and remove probes from
6581 * the DTrace probe hashes. (Each probe is hashed by each element of the
6582 * probe tuple -- allowing for fast lookups, regardless of what was
6586 dtrace_hash_str(const char *p)
6592 hval = (hval << 4) + *p++;
6593 if ((g = (hval & 0xf0000000)) != 0)
6600 static dtrace_hash_t *
6601 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6603 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6605 hash->dth_stroffs = stroffs;
6606 hash->dth_nextoffs = nextoffs;
6607 hash->dth_prevoffs = prevoffs;
6610 hash->dth_mask = hash->dth_size - 1;
6612 hash->dth_tab = kmem_zalloc(hash->dth_size *
6613 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6619 dtrace_hash_destroy(dtrace_hash_t *hash)
6624 for (i = 0; i < hash->dth_size; i++)
6625 ASSERT(hash->dth_tab[i] == NULL);
6628 kmem_free(hash->dth_tab,
6629 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6630 kmem_free(hash, sizeof (dtrace_hash_t));
6634 dtrace_hash_resize(dtrace_hash_t *hash)
6636 int size = hash->dth_size, i, ndx;
6637 int new_size = hash->dth_size << 1;
6638 int new_mask = new_size - 1;
6639 dtrace_hashbucket_t **new_tab, *bucket, *next;
6641 ASSERT((new_size & new_mask) == 0);
6643 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6645 for (i = 0; i < size; i++) {
6646 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6647 dtrace_probe_t *probe = bucket->dthb_chain;
6649 ASSERT(probe != NULL);
6650 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6652 next = bucket->dthb_next;
6653 bucket->dthb_next = new_tab[ndx];
6654 new_tab[ndx] = bucket;
6658 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6659 hash->dth_tab = new_tab;
6660 hash->dth_size = new_size;
6661 hash->dth_mask = new_mask;
6665 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6667 int hashval = DTRACE_HASHSTR(hash, new);
6668 int ndx = hashval & hash->dth_mask;
6669 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6670 dtrace_probe_t **nextp, **prevp;
6672 for (; bucket != NULL; bucket = bucket->dthb_next) {
6673 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6677 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6678 dtrace_hash_resize(hash);
6679 dtrace_hash_add(hash, new);
6683 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6684 bucket->dthb_next = hash->dth_tab[ndx];
6685 hash->dth_tab[ndx] = bucket;
6686 hash->dth_nbuckets++;
6689 nextp = DTRACE_HASHNEXT(hash, new);
6690 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6691 *nextp = bucket->dthb_chain;
6693 if (bucket->dthb_chain != NULL) {
6694 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6695 ASSERT(*prevp == NULL);
6699 bucket->dthb_chain = new;
6703 static dtrace_probe_t *
6704 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6706 int hashval = DTRACE_HASHSTR(hash, template);
6707 int ndx = hashval & hash->dth_mask;
6708 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6710 for (; bucket != NULL; bucket = bucket->dthb_next) {
6711 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6712 return (bucket->dthb_chain);
6719 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6721 int hashval = DTRACE_HASHSTR(hash, template);
6722 int ndx = hashval & hash->dth_mask;
6723 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6725 for (; bucket != NULL; bucket = bucket->dthb_next) {
6726 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6727 return (bucket->dthb_len);
6734 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6736 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6737 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6739 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6740 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6743 * Find the bucket that we're removing this probe from.
6745 for (; bucket != NULL; bucket = bucket->dthb_next) {
6746 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6750 ASSERT(bucket != NULL);
6752 if (*prevp == NULL) {
6753 if (*nextp == NULL) {
6755 * The removed probe was the only probe on this
6756 * bucket; we need to remove the bucket.
6758 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6760 ASSERT(bucket->dthb_chain == probe);
6764 hash->dth_tab[ndx] = bucket->dthb_next;
6766 while (b->dthb_next != bucket)
6768 b->dthb_next = bucket->dthb_next;
6771 ASSERT(hash->dth_nbuckets > 0);
6772 hash->dth_nbuckets--;
6773 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6777 bucket->dthb_chain = *nextp;
6779 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6783 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6787 * DTrace Utility Functions
6789 * These are random utility functions that are _not_ called from probe context.
6792 dtrace_badattr(const dtrace_attribute_t *a)
6794 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6795 a->dtat_data > DTRACE_STABILITY_MAX ||
6796 a->dtat_class > DTRACE_CLASS_MAX);
6800 * Return a duplicate copy of a string. If the specified string is NULL,
6801 * this function returns a zero-length string.
6804 dtrace_strdup(const char *str)
6806 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6809 (void) strcpy(new, str);
6814 #define DTRACE_ISALPHA(c) \
6815 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6818 dtrace_badname(const char *s)
6822 if (s == NULL || (c = *s++) == '\0')
6825 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6828 while ((c = *s++) != '\0') {
6829 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6830 c != '-' && c != '_' && c != '.' && c != '`')
6838 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6843 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6845 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6847 priv = DTRACE_PRIV_ALL;
6849 *uidp = crgetuid(cr);
6850 *zoneidp = crgetzoneid(cr);
6853 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6854 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6855 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6856 priv |= DTRACE_PRIV_USER;
6857 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6858 priv |= DTRACE_PRIV_PROC;
6859 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6860 priv |= DTRACE_PRIV_OWNER;
6861 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6862 priv |= DTRACE_PRIV_ZONEOWNER;
6865 priv = DTRACE_PRIV_ALL;
6871 #ifdef DTRACE_ERRDEBUG
6873 dtrace_errdebug(const char *str)
6875 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
6878 mutex_enter(&dtrace_errlock);
6879 dtrace_errlast = str;
6880 dtrace_errthread = curthread;
6882 while (occupied++ < DTRACE_ERRHASHSZ) {
6883 if (dtrace_errhash[hval].dter_msg == str) {
6884 dtrace_errhash[hval].dter_count++;
6888 if (dtrace_errhash[hval].dter_msg != NULL) {
6889 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6893 dtrace_errhash[hval].dter_msg = str;
6894 dtrace_errhash[hval].dter_count = 1;
6898 panic("dtrace: undersized error hash");
6900 mutex_exit(&dtrace_errlock);
6905 * DTrace Matching Functions
6907 * These functions are used to match groups of probes, given some elements of
6908 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6911 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6914 if (priv != DTRACE_PRIV_ALL) {
6915 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6916 uint32_t match = priv & ppriv;
6919 * No PRIV_DTRACE_* privileges...
6921 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6922 DTRACE_PRIV_KERNEL)) == 0)
6926 * No matching bits, but there were bits to match...
6928 if (match == 0 && ppriv != 0)
6932 * Need to have permissions to the process, but don't...
6934 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6935 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6940 * Need to be in the same zone unless we possess the
6941 * privilege to examine all zones.
6943 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6944 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6953 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6954 * consists of input pattern strings and an ops-vector to evaluate them.
6955 * This function returns >0 for match, 0 for no match, and <0 for error.
6958 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6959 uint32_t priv, uid_t uid, zoneid_t zoneid)
6961 dtrace_provider_t *pvp = prp->dtpr_provider;
6964 if (pvp->dtpv_defunct)
6967 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6970 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6973 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6976 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6979 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6986 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6987 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
6988 * libc's version, the kernel version only applies to 8-bit ASCII strings.
6989 * In addition, all of the recursion cases except for '*' matching have been
6990 * unwound. For '*', we still implement recursive evaluation, but a depth
6991 * counter is maintained and matching is aborted if we recurse too deep.
6992 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
6995 dtrace_match_glob(const char *s, const char *p, int depth)
7001 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7005 s = ""; /* treat NULL as empty string */
7014 if ((c = *p++) == '\0')
7015 return (s1 == '\0');
7019 int ok = 0, notflag = 0;
7030 if ((c = *p++) == '\0')
7034 if (c == '-' && lc != '\0' && *p != ']') {
7035 if ((c = *p++) == '\0')
7037 if (c == '\\' && (c = *p++) == '\0')
7041 if (s1 < lc || s1 > c)
7045 } else if (lc <= s1 && s1 <= c)
7048 } else if (c == '\\' && (c = *p++) == '\0')
7051 lc = c; /* save left-hand 'c' for next iteration */
7061 if ((c = *p++) == '\0')
7073 if ((c = *p++) == '\0')
7089 p++; /* consecutive *'s are identical to a single one */
7094 for (s = olds; *s != '\0'; s++) {
7095 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7105 dtrace_match_string(const char *s, const char *p, int depth)
7107 return (s != NULL && strcmp(s, p) == 0);
7112 dtrace_match_nul(const char *s, const char *p, int depth)
7114 return (1); /* always match the empty pattern */
7119 dtrace_match_nonzero(const char *s, const char *p, int depth)
7121 return (s != NULL && s[0] != '\0');
7125 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7126 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7128 dtrace_probe_t template, *probe;
7129 dtrace_hash_t *hash = NULL;
7130 int len, best = INT_MAX, nmatched = 0;
7133 ASSERT(MUTEX_HELD(&dtrace_lock));
7136 * If the probe ID is specified in the key, just lookup by ID and
7137 * invoke the match callback once if a matching probe is found.
7139 if (pkp->dtpk_id != DTRACE_IDNONE) {
7140 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7141 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7142 (void) (*matched)(probe, arg);
7148 template.dtpr_mod = (char *)pkp->dtpk_mod;
7149 template.dtpr_func = (char *)pkp->dtpk_func;
7150 template.dtpr_name = (char *)pkp->dtpk_name;
7153 * We want to find the most distinct of the module name, function
7154 * name, and name. So for each one that is not a glob pattern or
7155 * empty string, we perform a lookup in the corresponding hash and
7156 * use the hash table with the fewest collisions to do our search.
7158 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7159 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7161 hash = dtrace_bymod;
7164 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7165 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7167 hash = dtrace_byfunc;
7170 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7171 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7173 hash = dtrace_byname;
7177 * If we did not select a hash table, iterate over every probe and
7178 * invoke our callback for each one that matches our input probe key.
7181 for (i = 0; i < dtrace_nprobes; i++) {
7182 if ((probe = dtrace_probes[i]) == NULL ||
7183 dtrace_match_probe(probe, pkp, priv, uid,
7189 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7197 * If we selected a hash table, iterate over each probe of the same key
7198 * name and invoke the callback for every probe that matches the other
7199 * attributes of our input probe key.
7201 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7202 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7204 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7209 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
7217 * Return the function pointer dtrace_probecmp() should use to compare the
7218 * specified pattern with a string. For NULL or empty patterns, we select
7219 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7220 * For non-empty non-glob strings, we use dtrace_match_string().
7222 static dtrace_probekey_f *
7223 dtrace_probekey_func(const char *p)
7227 if (p == NULL || *p == '\0')
7228 return (&dtrace_match_nul);
7230 while ((c = *p++) != '\0') {
7231 if (c == '[' || c == '?' || c == '*' || c == '\\')
7232 return (&dtrace_match_glob);
7235 return (&dtrace_match_string);
7239 * Build a probe comparison key for use with dtrace_match_probe() from the
7240 * given probe description. By convention, a null key only matches anchored
7241 * probes: if each field is the empty string, reset dtpk_fmatch to
7242 * dtrace_match_nonzero().
7245 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7247 pkp->dtpk_prov = pdp->dtpd_provider;
7248 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7250 pkp->dtpk_mod = pdp->dtpd_mod;
7251 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7253 pkp->dtpk_func = pdp->dtpd_func;
7254 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7256 pkp->dtpk_name = pdp->dtpd_name;
7257 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7259 pkp->dtpk_id = pdp->dtpd_id;
7261 if (pkp->dtpk_id == DTRACE_IDNONE &&
7262 pkp->dtpk_pmatch == &dtrace_match_nul &&
7263 pkp->dtpk_mmatch == &dtrace_match_nul &&
7264 pkp->dtpk_fmatch == &dtrace_match_nul &&
7265 pkp->dtpk_nmatch == &dtrace_match_nul)
7266 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7270 * DTrace Provider-to-Framework API Functions
7272 * These functions implement much of the Provider-to-Framework API, as
7273 * described in <sys/dtrace.h>. The parts of the API not in this section are
7274 * the functions in the API for probe management (found below), and
7275 * dtrace_probe() itself (found above).
7279 * Register the calling provider with the DTrace framework. This should
7280 * generally be called by DTrace providers in their attach(9E) entry point.
7283 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7284 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7286 dtrace_provider_t *provider;
7288 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7289 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7290 "arguments", name ? name : "<NULL>");
7294 if (name[0] == '\0' || dtrace_badname(name)) {
7295 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7296 "provider name", name);
7300 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7301 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7302 pops->dtps_destroy == NULL ||
7303 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7304 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7305 "provider ops", name);
7309 if (dtrace_badattr(&pap->dtpa_provider) ||
7310 dtrace_badattr(&pap->dtpa_mod) ||
7311 dtrace_badattr(&pap->dtpa_func) ||
7312 dtrace_badattr(&pap->dtpa_name) ||
7313 dtrace_badattr(&pap->dtpa_args)) {
7314 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7315 "provider attributes", name);
7319 if (priv & ~DTRACE_PRIV_ALL) {
7320 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7321 "privilege attributes", name);
7325 if ((priv & DTRACE_PRIV_KERNEL) &&
7326 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7327 pops->dtps_usermode == NULL) {
7328 cmn_err(CE_WARN, "failed to register provider '%s': need "
7329 "dtps_usermode() op for given privilege attributes", name);
7333 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7334 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7335 (void) strcpy(provider->dtpv_name, name);
7337 provider->dtpv_attr = *pap;
7338 provider->dtpv_priv.dtpp_flags = priv;
7340 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7341 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7343 provider->dtpv_pops = *pops;
7345 if (pops->dtps_provide == NULL) {
7346 ASSERT(pops->dtps_provide_module != NULL);
7347 provider->dtpv_pops.dtps_provide =
7348 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
7351 if (pops->dtps_provide_module == NULL) {
7352 ASSERT(pops->dtps_provide != NULL);
7353 provider->dtpv_pops.dtps_provide_module =
7354 (void (*)(void *, modctl_t *))dtrace_nullop;
7357 if (pops->dtps_suspend == NULL) {
7358 ASSERT(pops->dtps_resume == NULL);
7359 provider->dtpv_pops.dtps_suspend =
7360 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7361 provider->dtpv_pops.dtps_resume =
7362 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7365 provider->dtpv_arg = arg;
7366 *idp = (dtrace_provider_id_t)provider;
7368 if (pops == &dtrace_provider_ops) {
7369 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7370 ASSERT(MUTEX_HELD(&dtrace_lock));
7371 ASSERT(dtrace_anon.dta_enabling == NULL);
7374 * We make sure that the DTrace provider is at the head of
7375 * the provider chain.
7377 provider->dtpv_next = dtrace_provider;
7378 dtrace_provider = provider;
7382 mutex_enter(&dtrace_provider_lock);
7383 mutex_enter(&dtrace_lock);
7386 * If there is at least one provider registered, we'll add this
7387 * provider after the first provider.
7389 if (dtrace_provider != NULL) {
7390 provider->dtpv_next = dtrace_provider->dtpv_next;
7391 dtrace_provider->dtpv_next = provider;
7393 dtrace_provider = provider;
7396 if (dtrace_retained != NULL) {
7397 dtrace_enabling_provide(provider);
7400 * Now we need to call dtrace_enabling_matchall() -- which
7401 * will acquire cpu_lock and dtrace_lock. We therefore need
7402 * to drop all of our locks before calling into it...
7404 mutex_exit(&dtrace_lock);
7405 mutex_exit(&dtrace_provider_lock);
7406 dtrace_enabling_matchall();
7411 mutex_exit(&dtrace_lock);
7412 mutex_exit(&dtrace_provider_lock);
7418 * Unregister the specified provider from the DTrace framework. This should
7419 * generally be called by DTrace providers in their detach(9E) entry point.
7422 dtrace_unregister(dtrace_provider_id_t id)
7424 dtrace_provider_t *old = (dtrace_provider_t *)id;
7425 dtrace_provider_t *prev = NULL;
7427 dtrace_probe_t *probe, *first = NULL;
7429 if (old->dtpv_pops.dtps_enable ==
7430 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
7432 * If DTrace itself is the provider, we're called with locks
7435 ASSERT(old == dtrace_provider);
7437 ASSERT(dtrace_devi != NULL);
7439 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7440 ASSERT(MUTEX_HELD(&dtrace_lock));
7443 if (dtrace_provider->dtpv_next != NULL) {
7445 * There's another provider here; return failure.
7450 mutex_enter(&dtrace_provider_lock);
7451 mutex_enter(&mod_lock);
7452 mutex_enter(&dtrace_lock);
7456 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7457 * probes, we refuse to let providers slither away, unless this
7458 * provider has already been explicitly invalidated.
7460 if (!old->dtpv_defunct &&
7461 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7462 dtrace_anon.dta_state->dts_necbs > 0))) {
7464 mutex_exit(&dtrace_lock);
7465 mutex_exit(&mod_lock);
7466 mutex_exit(&dtrace_provider_lock);
7472 * Attempt to destroy the probes associated with this provider.
7474 for (i = 0; i < dtrace_nprobes; i++) {
7475 if ((probe = dtrace_probes[i]) == NULL)
7478 if (probe->dtpr_provider != old)
7481 if (probe->dtpr_ecb == NULL)
7485 * We have at least one ECB; we can't remove this provider.
7488 mutex_exit(&dtrace_lock);
7489 mutex_exit(&mod_lock);
7490 mutex_exit(&dtrace_provider_lock);
7496 * All of the probes for this provider are disabled; we can safely
7497 * remove all of them from their hash chains and from the probe array.
7499 for (i = 0; i < dtrace_nprobes; i++) {
7500 if ((probe = dtrace_probes[i]) == NULL)
7503 if (probe->dtpr_provider != old)
7506 dtrace_probes[i] = NULL;
7508 dtrace_hash_remove(dtrace_bymod, probe);
7509 dtrace_hash_remove(dtrace_byfunc, probe);
7510 dtrace_hash_remove(dtrace_byname, probe);
7512 if (first == NULL) {
7514 probe->dtpr_nextmod = NULL;
7516 probe->dtpr_nextmod = first;
7522 * The provider's probes have been removed from the hash chains and
7523 * from the probe array. Now issue a dtrace_sync() to be sure that
7524 * everyone has cleared out from any probe array processing.
7528 for (probe = first; probe != NULL; probe = first) {
7529 first = probe->dtpr_nextmod;
7531 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7533 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7534 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7535 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7537 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7539 free_unr(dtrace_arena, probe->dtpr_id);
7541 kmem_free(probe, sizeof (dtrace_probe_t));
7544 if ((prev = dtrace_provider) == old) {
7546 ASSERT(self || dtrace_devi == NULL);
7547 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7549 dtrace_provider = old->dtpv_next;
7551 while (prev != NULL && prev->dtpv_next != old)
7552 prev = prev->dtpv_next;
7555 panic("attempt to unregister non-existent "
7556 "dtrace provider %p\n", (void *)id);
7559 prev->dtpv_next = old->dtpv_next;
7563 mutex_exit(&dtrace_lock);
7564 mutex_exit(&mod_lock);
7565 mutex_exit(&dtrace_provider_lock);
7568 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7569 kmem_free(old, sizeof (dtrace_provider_t));
7575 * Invalidate the specified provider. All subsequent probe lookups for the
7576 * specified provider will fail, but its probes will not be removed.
7579 dtrace_invalidate(dtrace_provider_id_t id)
7581 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7583 ASSERT(pvp->dtpv_pops.dtps_enable !=
7584 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7586 mutex_enter(&dtrace_provider_lock);
7587 mutex_enter(&dtrace_lock);
7589 pvp->dtpv_defunct = 1;
7591 mutex_exit(&dtrace_lock);
7592 mutex_exit(&dtrace_provider_lock);
7596 * Indicate whether or not DTrace has attached.
7599 dtrace_attached(void)
7602 * dtrace_provider will be non-NULL iff the DTrace driver has
7603 * attached. (It's non-NULL because DTrace is always itself a
7606 return (dtrace_provider != NULL);
7610 * Remove all the unenabled probes for the given provider. This function is
7611 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7612 * -- just as many of its associated probes as it can.
7615 dtrace_condense(dtrace_provider_id_t id)
7617 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7619 dtrace_probe_t *probe;
7622 * Make sure this isn't the dtrace provider itself.
7624 ASSERT(prov->dtpv_pops.dtps_enable !=
7625 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
7627 mutex_enter(&dtrace_provider_lock);
7628 mutex_enter(&dtrace_lock);
7631 * Attempt to destroy the probes associated with this provider.
7633 for (i = 0; i < dtrace_nprobes; i++) {
7634 if ((probe = dtrace_probes[i]) == NULL)
7637 if (probe->dtpr_provider != prov)
7640 if (probe->dtpr_ecb != NULL)
7643 dtrace_probes[i] = NULL;
7645 dtrace_hash_remove(dtrace_bymod, probe);
7646 dtrace_hash_remove(dtrace_byfunc, probe);
7647 dtrace_hash_remove(dtrace_byname, probe);
7649 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7651 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7652 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7653 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7654 kmem_free(probe, sizeof (dtrace_probe_t));
7656 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7658 free_unr(dtrace_arena, i + 1);
7662 mutex_exit(&dtrace_lock);
7663 mutex_exit(&dtrace_provider_lock);
7669 * DTrace Probe Management Functions
7671 * The functions in this section perform the DTrace probe management,
7672 * including functions to create probes, look-up probes, and call into the
7673 * providers to request that probes be provided. Some of these functions are
7674 * in the Provider-to-Framework API; these functions can be identified by the
7675 * fact that they are not declared "static".
7679 * Create a probe with the specified module name, function name, and name.
7682 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7683 const char *func, const char *name, int aframes, void *arg)
7685 dtrace_probe_t *probe, **probes;
7686 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7689 if (provider == dtrace_provider) {
7690 ASSERT(MUTEX_HELD(&dtrace_lock));
7692 mutex_enter(&dtrace_lock);
7696 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7697 VM_BESTFIT | VM_SLEEP);
7699 id = alloc_unr(dtrace_arena);
7701 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7703 probe->dtpr_id = id;
7704 probe->dtpr_gen = dtrace_probegen++;
7705 probe->dtpr_mod = dtrace_strdup(mod);
7706 probe->dtpr_func = dtrace_strdup(func);
7707 probe->dtpr_name = dtrace_strdup(name);
7708 probe->dtpr_arg = arg;
7709 probe->dtpr_aframes = aframes;
7710 probe->dtpr_provider = provider;
7712 dtrace_hash_add(dtrace_bymod, probe);
7713 dtrace_hash_add(dtrace_byfunc, probe);
7714 dtrace_hash_add(dtrace_byname, probe);
7716 if (id - 1 >= dtrace_nprobes) {
7717 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7718 size_t nsize = osize << 1;
7722 ASSERT(dtrace_probes == NULL);
7723 nsize = sizeof (dtrace_probe_t *);
7726 probes = kmem_zalloc(nsize, KM_SLEEP);
7728 if (dtrace_probes == NULL) {
7730 dtrace_probes = probes;
7733 dtrace_probe_t **oprobes = dtrace_probes;
7735 bcopy(oprobes, probes, osize);
7736 dtrace_membar_producer();
7737 dtrace_probes = probes;
7742 * All CPUs are now seeing the new probes array; we can
7743 * safely free the old array.
7745 kmem_free(oprobes, osize);
7746 dtrace_nprobes <<= 1;
7749 ASSERT(id - 1 < dtrace_nprobes);
7752 ASSERT(dtrace_probes[id - 1] == NULL);
7753 dtrace_probes[id - 1] = probe;
7755 if (provider != dtrace_provider)
7756 mutex_exit(&dtrace_lock);
7761 static dtrace_probe_t *
7762 dtrace_probe_lookup_id(dtrace_id_t id)
7764 ASSERT(MUTEX_HELD(&dtrace_lock));
7766 if (id == 0 || id > dtrace_nprobes)
7769 return (dtrace_probes[id - 1]);
7773 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7775 *((dtrace_id_t *)arg) = probe->dtpr_id;
7777 return (DTRACE_MATCH_DONE);
7781 * Look up a probe based on provider and one or more of module name, function
7782 * name and probe name.
7785 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
7786 char *func, char *name)
7788 dtrace_probekey_t pkey;
7792 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7793 pkey.dtpk_pmatch = &dtrace_match_string;
7794 pkey.dtpk_mod = mod;
7795 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7796 pkey.dtpk_func = func;
7797 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7798 pkey.dtpk_name = name;
7799 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7800 pkey.dtpk_id = DTRACE_IDNONE;
7802 mutex_enter(&dtrace_lock);
7803 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7804 dtrace_probe_lookup_match, &id);
7805 mutex_exit(&dtrace_lock);
7807 ASSERT(match == 1 || match == 0);
7808 return (match ? id : 0);
7812 * Returns the probe argument associated with the specified probe.
7815 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7817 dtrace_probe_t *probe;
7820 mutex_enter(&dtrace_lock);
7822 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7823 probe->dtpr_provider == (dtrace_provider_t *)id)
7824 rval = probe->dtpr_arg;
7826 mutex_exit(&dtrace_lock);
7832 * Copy a probe into a probe description.
7835 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7837 bzero(pdp, sizeof (dtrace_probedesc_t));
7838 pdp->dtpd_id = prp->dtpr_id;
7840 (void) strncpy(pdp->dtpd_provider,
7841 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7843 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7844 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7845 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7850 dtrace_probe_provide_cb(linker_file_t lf, void *arg)
7852 dtrace_provider_t *prv = (dtrace_provider_t *) arg;
7854 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf);
7862 * Called to indicate that a probe -- or probes -- should be provided by a
7863 * specfied provider. If the specified description is NULL, the provider will
7864 * be told to provide all of its probes. (This is done whenever a new
7865 * consumer comes along, or whenever a retained enabling is to be matched.) If
7866 * the specified description is non-NULL, the provider is given the
7867 * opportunity to dynamically provide the specified probe, allowing providers
7868 * to support the creation of probes on-the-fly. (So-called _autocreated_
7869 * probes.) If the provider is NULL, the operations will be applied to all
7870 * providers; if the provider is non-NULL the operations will only be applied
7871 * to the specified provider. The dtrace_provider_lock must be held, and the
7872 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7873 * will need to grab the dtrace_lock when it reenters the framework through
7874 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7877 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7884 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7888 prv = dtrace_provider;
7893 * First, call the blanket provide operation.
7895 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7898 * Now call the per-module provide operation. We will grab
7899 * mod_lock to prevent the list from being modified. Note
7900 * that this also prevents the mod_busy bits from changing.
7901 * (mod_busy can only be changed with mod_lock held.)
7903 mutex_enter(&mod_lock);
7908 if (ctl->mod_busy || ctl->mod_mp == NULL)
7911 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7913 } while ((ctl = ctl->mod_next) != &modules);
7915 (void) linker_file_foreach(dtrace_probe_provide_cb, prv);
7918 mutex_exit(&mod_lock);
7919 } while (all && (prv = prv->dtpv_next) != NULL);
7924 * Iterate over each probe, and call the Framework-to-Provider API function
7928 dtrace_probe_foreach(uintptr_t offs)
7930 dtrace_provider_t *prov;
7931 void (*func)(void *, dtrace_id_t, void *);
7932 dtrace_probe_t *probe;
7933 dtrace_icookie_t cookie;
7937 * We disable interrupts to walk through the probe array. This is
7938 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7939 * won't see stale data.
7941 cookie = dtrace_interrupt_disable();
7943 for (i = 0; i < dtrace_nprobes; i++) {
7944 if ((probe = dtrace_probes[i]) == NULL)
7947 if (probe->dtpr_ecb == NULL) {
7949 * This probe isn't enabled -- don't call the function.
7954 prov = probe->dtpr_provider;
7955 func = *((void(**)(void *, dtrace_id_t, void *))
7956 ((uintptr_t)&prov->dtpv_pops + offs));
7958 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7961 dtrace_interrupt_enable(cookie);
7966 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7968 dtrace_probekey_t pkey;
7973 ASSERT(MUTEX_HELD(&dtrace_lock));
7974 dtrace_ecb_create_cache = NULL;
7978 * If we're passed a NULL description, we're being asked to
7979 * create an ECB with a NULL probe.
7981 (void) dtrace_ecb_create_enable(NULL, enab);
7985 dtrace_probekey(desc, &pkey);
7986 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7987 &priv, &uid, &zoneid);
7989 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
7994 * DTrace Helper Provider Functions
7997 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
7999 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8000 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8001 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8005 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8006 const dof_provider_t *dofprov, char *strtab)
8008 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8009 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8010 dofprov->dofpv_provattr);
8011 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8012 dofprov->dofpv_modattr);
8013 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8014 dofprov->dofpv_funcattr);
8015 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8016 dofprov->dofpv_nameattr);
8017 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8018 dofprov->dofpv_argsattr);
8022 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8024 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8025 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8026 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8027 dof_provider_t *provider;
8029 uint32_t *off, *enoff;
8033 dtrace_helper_provdesc_t dhpv;
8034 dtrace_helper_probedesc_t dhpb;
8035 dtrace_meta_t *meta = dtrace_meta_pid;
8036 dtrace_mops_t *mops = &meta->dtm_mops;
8039 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8040 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8041 provider->dofpv_strtab * dof->dofh_secsize);
8042 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8043 provider->dofpv_probes * dof->dofh_secsize);
8044 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8045 provider->dofpv_prargs * dof->dofh_secsize);
8046 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8047 provider->dofpv_proffs * dof->dofh_secsize);
8049 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8050 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8051 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8055 * See dtrace_helper_provider_validate().
8057 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8058 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8059 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8060 provider->dofpv_prenoffs * dof->dofh_secsize);
8061 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8064 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8067 * Create the provider.
8069 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8071 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8077 * Create the probes.
8079 for (i = 0; i < nprobes; i++) {
8080 probe = (dof_probe_t *)(uintptr_t)(daddr +
8081 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8083 dhpb.dthpb_mod = dhp->dofhp_mod;
8084 dhpb.dthpb_func = strtab + probe->dofpr_func;
8085 dhpb.dthpb_name = strtab + probe->dofpr_name;
8086 dhpb.dthpb_base = probe->dofpr_addr;
8087 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8088 dhpb.dthpb_noffs = probe->dofpr_noffs;
8089 if (enoff != NULL) {
8090 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8091 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8093 dhpb.dthpb_enoffs = NULL;
8094 dhpb.dthpb_nenoffs = 0;
8096 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8097 dhpb.dthpb_nargc = probe->dofpr_nargc;
8098 dhpb.dthpb_xargc = probe->dofpr_xargc;
8099 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8100 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8102 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8107 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8109 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8110 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8113 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8115 for (i = 0; i < dof->dofh_secnum; i++) {
8116 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8117 dof->dofh_secoff + i * dof->dofh_secsize);
8119 if (sec->dofs_type != DOF_SECT_PROVIDER)
8122 dtrace_helper_provide_one(dhp, sec, pid);
8126 * We may have just created probes, so we must now rematch against
8127 * any retained enablings. Note that this call will acquire both
8128 * cpu_lock and dtrace_lock; the fact that we are holding
8129 * dtrace_meta_lock now is what defines the ordering with respect to
8130 * these three locks.
8132 dtrace_enabling_matchall();
8137 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8139 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8140 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8142 dof_provider_t *provider;
8144 dtrace_helper_provdesc_t dhpv;
8145 dtrace_meta_t *meta = dtrace_meta_pid;
8146 dtrace_mops_t *mops = &meta->dtm_mops;
8148 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8149 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8150 provider->dofpv_strtab * dof->dofh_secsize);
8152 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8155 * Create the provider.
8157 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8159 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8165 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8167 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8168 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8171 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8173 for (i = 0; i < dof->dofh_secnum; i++) {
8174 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8175 dof->dofh_secoff + i * dof->dofh_secsize);
8177 if (sec->dofs_type != DOF_SECT_PROVIDER)
8180 dtrace_helper_provider_remove_one(dhp, sec, pid);
8186 * DTrace Meta Provider-to-Framework API Functions
8188 * These functions implement the Meta Provider-to-Framework API, as described
8189 * in <sys/dtrace.h>.
8192 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8193 dtrace_meta_provider_id_t *idp)
8195 dtrace_meta_t *meta;
8196 dtrace_helpers_t *help, *next;
8199 *idp = DTRACE_METAPROVNONE;
8202 * We strictly don't need the name, but we hold onto it for
8203 * debuggability. All hail error queues!
8206 cmn_err(CE_WARN, "failed to register meta-provider: "
8212 mops->dtms_create_probe == NULL ||
8213 mops->dtms_provide_pid == NULL ||
8214 mops->dtms_remove_pid == NULL) {
8215 cmn_err(CE_WARN, "failed to register meta-register %s: "
8216 "invalid ops", name);
8220 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8221 meta->dtm_mops = *mops;
8222 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8223 (void) strcpy(meta->dtm_name, name);
8224 meta->dtm_arg = arg;
8226 mutex_enter(&dtrace_meta_lock);
8227 mutex_enter(&dtrace_lock);
8229 if (dtrace_meta_pid != NULL) {
8230 mutex_exit(&dtrace_lock);
8231 mutex_exit(&dtrace_meta_lock);
8232 cmn_err(CE_WARN, "failed to register meta-register %s: "
8233 "user-land meta-provider exists", name);
8234 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8235 kmem_free(meta, sizeof (dtrace_meta_t));
8239 dtrace_meta_pid = meta;
8240 *idp = (dtrace_meta_provider_id_t)meta;
8243 * If there are providers and probes ready to go, pass them
8244 * off to the new meta provider now.
8247 help = dtrace_deferred_pid;
8248 dtrace_deferred_pid = NULL;
8250 mutex_exit(&dtrace_lock);
8252 while (help != NULL) {
8253 for (i = 0; i < help->dthps_nprovs; i++) {
8254 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8258 next = help->dthps_next;
8259 help->dthps_next = NULL;
8260 help->dthps_prev = NULL;
8261 help->dthps_deferred = 0;
8265 mutex_exit(&dtrace_meta_lock);
8271 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8273 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8275 mutex_enter(&dtrace_meta_lock);
8276 mutex_enter(&dtrace_lock);
8278 if (old == dtrace_meta_pid) {
8279 pp = &dtrace_meta_pid;
8281 panic("attempt to unregister non-existent "
8282 "dtrace meta-provider %p\n", (void *)old);
8285 if (old->dtm_count != 0) {
8286 mutex_exit(&dtrace_lock);
8287 mutex_exit(&dtrace_meta_lock);
8293 mutex_exit(&dtrace_lock);
8294 mutex_exit(&dtrace_meta_lock);
8296 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8297 kmem_free(old, sizeof (dtrace_meta_t));
8304 * DTrace DIF Object Functions
8307 dtrace_difo_err(uint_t pc, const char *format, ...)
8309 if (dtrace_err_verbose) {
8312 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8313 va_start(alist, format);
8314 (void) vuprintf(format, alist);
8318 #ifdef DTRACE_ERRDEBUG
8319 dtrace_errdebug(format);
8325 * Validate a DTrace DIF object by checking the IR instructions. The following
8326 * rules are currently enforced by dtrace_difo_validate():
8328 * 1. Each instruction must have a valid opcode
8329 * 2. Each register, string, variable, or subroutine reference must be valid
8330 * 3. No instruction can modify register %r0 (must be zero)
8331 * 4. All instruction reserved bits must be set to zero
8332 * 5. The last instruction must be a "ret" instruction
8333 * 6. All branch targets must reference a valid instruction _after_ the branch
8336 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8340 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8344 kcheckload = cr == NULL ||
8345 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8347 dp->dtdo_destructive = 0;
8349 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8350 dif_instr_t instr = dp->dtdo_buf[pc];
8352 uint_t r1 = DIF_INSTR_R1(instr);
8353 uint_t r2 = DIF_INSTR_R2(instr);
8354 uint_t rd = DIF_INSTR_RD(instr);
8355 uint_t rs = DIF_INSTR_RS(instr);
8356 uint_t label = DIF_INSTR_LABEL(instr);
8357 uint_t v = DIF_INSTR_VAR(instr);
8358 uint_t subr = DIF_INSTR_SUBR(instr);
8359 uint_t type = DIF_INSTR_TYPE(instr);
8360 uint_t op = DIF_INSTR_OP(instr);
8378 err += efunc(pc, "invalid register %u\n", r1);
8380 err += efunc(pc, "invalid register %u\n", r2);
8382 err += efunc(pc, "invalid register %u\n", rd);
8384 err += efunc(pc, "cannot write to %r0\n");
8390 err += efunc(pc, "invalid register %u\n", r1);
8392 err += efunc(pc, "non-zero reserved bits\n");
8394 err += efunc(pc, "invalid register %u\n", rd);
8396 err += efunc(pc, "cannot write to %r0\n");
8406 err += efunc(pc, "invalid register %u\n", r1);
8408 err += efunc(pc, "non-zero reserved bits\n");
8410 err += efunc(pc, "invalid register %u\n", rd);
8412 err += efunc(pc, "cannot write to %r0\n");
8414 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8415 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8425 err += efunc(pc, "invalid register %u\n", r1);
8427 err += efunc(pc, "non-zero reserved bits\n");
8429 err += efunc(pc, "invalid register %u\n", rd);
8431 err += efunc(pc, "cannot write to %r0\n");
8441 err += efunc(pc, "invalid register %u\n", r1);
8443 err += efunc(pc, "non-zero reserved bits\n");
8445 err += efunc(pc, "invalid register %u\n", rd);
8447 err += efunc(pc, "cannot write to %r0\n");
8454 err += efunc(pc, "invalid register %u\n", r1);
8456 err += efunc(pc, "non-zero reserved bits\n");
8458 err += efunc(pc, "invalid register %u\n", rd);
8460 err += efunc(pc, "cannot write to 0 address\n");
8465 err += efunc(pc, "invalid register %u\n", r1);
8467 err += efunc(pc, "invalid register %u\n", r2);
8469 err += efunc(pc, "non-zero reserved bits\n");
8473 err += efunc(pc, "invalid register %u\n", r1);
8474 if (r2 != 0 || rd != 0)
8475 err += efunc(pc, "non-zero reserved bits\n");
8488 if (label >= dp->dtdo_len) {
8489 err += efunc(pc, "invalid branch target %u\n",
8493 err += efunc(pc, "backward branch to %u\n",
8498 if (r1 != 0 || r2 != 0)
8499 err += efunc(pc, "non-zero reserved bits\n");
8501 err += efunc(pc, "invalid register %u\n", rd);
8505 case DIF_OP_FLUSHTS:
8506 if (r1 != 0 || r2 != 0 || rd != 0)
8507 err += efunc(pc, "non-zero reserved bits\n");
8510 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8511 err += efunc(pc, "invalid integer ref %u\n",
8512 DIF_INSTR_INTEGER(instr));
8515 err += efunc(pc, "invalid register %u\n", rd);
8517 err += efunc(pc, "cannot write to %r0\n");
8520 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8521 err += efunc(pc, "invalid string ref %u\n",
8522 DIF_INSTR_STRING(instr));
8525 err += efunc(pc, "invalid register %u\n", rd);
8527 err += efunc(pc, "cannot write to %r0\n");
8531 if (r1 > DIF_VAR_ARRAY_MAX)
8532 err += efunc(pc, "invalid array %u\n", r1);
8534 err += efunc(pc, "invalid register %u\n", r2);
8536 err += efunc(pc, "invalid register %u\n", rd);
8538 err += efunc(pc, "cannot write to %r0\n");
8545 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8546 err += efunc(pc, "invalid variable %u\n", v);
8548 err += efunc(pc, "invalid register %u\n", rd);
8550 err += efunc(pc, "cannot write to %r0\n");
8557 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8558 err += efunc(pc, "invalid variable %u\n", v);
8560 err += efunc(pc, "invalid register %u\n", rd);
8563 if (subr > DIF_SUBR_MAX)
8564 err += efunc(pc, "invalid subr %u\n", subr);
8566 err += efunc(pc, "invalid register %u\n", rd);
8568 err += efunc(pc, "cannot write to %r0\n");
8570 if (subr == DIF_SUBR_COPYOUT ||
8571 subr == DIF_SUBR_COPYOUTSTR) {
8572 dp->dtdo_destructive = 1;
8576 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8577 err += efunc(pc, "invalid ref type %u\n", type);
8579 err += efunc(pc, "invalid register %u\n", r2);
8581 err += efunc(pc, "invalid register %u\n", rs);
8584 if (type != DIF_TYPE_CTF)
8585 err += efunc(pc, "invalid val type %u\n", type);
8587 err += efunc(pc, "invalid register %u\n", r2);
8589 err += efunc(pc, "invalid register %u\n", rs);
8592 err += efunc(pc, "invalid opcode %u\n",
8593 DIF_INSTR_OP(instr));
8597 if (dp->dtdo_len != 0 &&
8598 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8599 err += efunc(dp->dtdo_len - 1,
8600 "expected 'ret' as last DIF instruction\n");
8603 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8605 * If we're not returning by reference, the size must be either
8606 * 0 or the size of one of the base types.
8608 switch (dp->dtdo_rtype.dtdt_size) {
8610 case sizeof (uint8_t):
8611 case sizeof (uint16_t):
8612 case sizeof (uint32_t):
8613 case sizeof (uint64_t):
8617 err += efunc(dp->dtdo_len - 1, "bad return size");
8621 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8622 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8623 dtrace_diftype_t *vt, *et;
8626 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8627 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8628 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8629 err += efunc(i, "unrecognized variable scope %d\n",
8634 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8635 v->dtdv_kind != DIFV_KIND_SCALAR) {
8636 err += efunc(i, "unrecognized variable type %d\n",
8641 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8642 err += efunc(i, "%d exceeds variable id limit\n", id);
8646 if (id < DIF_VAR_OTHER_UBASE)
8650 * For user-defined variables, we need to check that this
8651 * definition is identical to any previous definition that we
8654 ndx = id - DIF_VAR_OTHER_UBASE;
8656 switch (v->dtdv_scope) {
8657 case DIFV_SCOPE_GLOBAL:
8658 if (ndx < vstate->dtvs_nglobals) {
8659 dtrace_statvar_t *svar;
8661 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8662 existing = &svar->dtsv_var;
8667 case DIFV_SCOPE_THREAD:
8668 if (ndx < vstate->dtvs_ntlocals)
8669 existing = &vstate->dtvs_tlocals[ndx];
8672 case DIFV_SCOPE_LOCAL:
8673 if (ndx < vstate->dtvs_nlocals) {
8674 dtrace_statvar_t *svar;
8676 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8677 existing = &svar->dtsv_var;
8685 if (vt->dtdt_flags & DIF_TF_BYREF) {
8686 if (vt->dtdt_size == 0) {
8687 err += efunc(i, "zero-sized variable\n");
8691 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8692 vt->dtdt_size > dtrace_global_maxsize) {
8693 err += efunc(i, "oversized by-ref global\n");
8698 if (existing == NULL || existing->dtdv_id == 0)
8701 ASSERT(existing->dtdv_id == v->dtdv_id);
8702 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8704 if (existing->dtdv_kind != v->dtdv_kind)
8705 err += efunc(i, "%d changed variable kind\n", id);
8707 et = &existing->dtdv_type;
8709 if (vt->dtdt_flags != et->dtdt_flags) {
8710 err += efunc(i, "%d changed variable type flags\n", id);
8714 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8715 err += efunc(i, "%d changed variable type size\n", id);
8725 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8726 * are much more constrained than normal DIFOs. Specifically, they may
8729 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8730 * miscellaneous string routines
8731 * 2. Access DTrace variables other than the args[] array, and the
8732 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8733 * 3. Have thread-local variables.
8734 * 4. Have dynamic variables.
8737 dtrace_difo_validate_helper(dtrace_difo_t *dp)
8739 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8743 for (pc = 0; pc < dp->dtdo_len; pc++) {
8744 dif_instr_t instr = dp->dtdo_buf[pc];
8746 uint_t v = DIF_INSTR_VAR(instr);
8747 uint_t subr = DIF_INSTR_SUBR(instr);
8748 uint_t op = DIF_INSTR_OP(instr);
8803 case DIF_OP_FLUSHTS:
8815 if (v >= DIF_VAR_OTHER_UBASE)
8818 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8821 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8822 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8823 v == DIF_VAR_EXECARGS ||
8824 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8825 v == DIF_VAR_UID || v == DIF_VAR_GID)
8828 err += efunc(pc, "illegal variable %u\n", v);
8835 err += efunc(pc, "illegal dynamic variable load\n");
8841 err += efunc(pc, "illegal dynamic variable store\n");
8845 if (subr == DIF_SUBR_ALLOCA ||
8846 subr == DIF_SUBR_BCOPY ||
8847 subr == DIF_SUBR_COPYIN ||
8848 subr == DIF_SUBR_COPYINTO ||
8849 subr == DIF_SUBR_COPYINSTR ||
8850 subr == DIF_SUBR_INDEX ||
8851 subr == DIF_SUBR_INET_NTOA ||
8852 subr == DIF_SUBR_INET_NTOA6 ||
8853 subr == DIF_SUBR_INET_NTOP ||
8854 subr == DIF_SUBR_LLTOSTR ||
8855 subr == DIF_SUBR_RINDEX ||
8856 subr == DIF_SUBR_STRCHR ||
8857 subr == DIF_SUBR_STRJOIN ||
8858 subr == DIF_SUBR_STRRCHR ||
8859 subr == DIF_SUBR_STRSTR ||
8860 subr == DIF_SUBR_HTONS ||
8861 subr == DIF_SUBR_HTONL ||
8862 subr == DIF_SUBR_HTONLL ||
8863 subr == DIF_SUBR_NTOHS ||
8864 subr == DIF_SUBR_NTOHL ||
8865 subr == DIF_SUBR_NTOHLL ||
8866 subr == DIF_SUBR_MEMREF ||
8867 subr == DIF_SUBR_TYPEREF)
8870 err += efunc(pc, "invalid subr %u\n", subr);
8874 err += efunc(pc, "invalid opcode %u\n",
8875 DIF_INSTR_OP(instr));
8884 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8888 dtrace_difo_cacheable(dtrace_difo_t *dp)
8895 for (i = 0; i < dp->dtdo_varlen; i++) {
8896 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8898 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8901 switch (v->dtdv_id) {
8902 case DIF_VAR_CURTHREAD:
8905 case DIF_VAR_EXECARGS:
8906 case DIF_VAR_EXECNAME:
8907 case DIF_VAR_ZONENAME:
8916 * This DIF object may be cacheable. Now we need to look for any
8917 * array loading instructions, any memory loading instructions, or
8918 * any stores to thread-local variables.
8920 for (i = 0; i < dp->dtdo_len; i++) {
8921 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8923 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8924 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8925 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8926 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8934 dtrace_difo_hold(dtrace_difo_t *dp)
8938 ASSERT(MUTEX_HELD(&dtrace_lock));
8941 ASSERT(dp->dtdo_refcnt != 0);
8944 * We need to check this DIF object for references to the variable
8945 * DIF_VAR_VTIMESTAMP.
8947 for (i = 0; i < dp->dtdo_varlen; i++) {
8948 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8950 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8953 if (dtrace_vtime_references++ == 0)
8954 dtrace_vtime_enable();
8959 * This routine calculates the dynamic variable chunksize for a given DIF
8960 * object. The calculation is not fool-proof, and can probably be tricked by
8961 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8962 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8963 * if a dynamic variable size exceeds the chunksize.
8966 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8969 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8970 const dif_instr_t *text = dp->dtdo_buf;
8976 for (pc = 0; pc < dp->dtdo_len; pc++) {
8977 dif_instr_t instr = text[pc];
8978 uint_t op = DIF_INSTR_OP(instr);
8979 uint_t rd = DIF_INSTR_RD(instr);
8980 uint_t r1 = DIF_INSTR_R1(instr);
8984 dtrace_key_t *key = tupregs;
8988 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8993 key = &tupregs[DIF_DTR_NREGS];
8994 key[0].dttk_size = 0;
8995 key[1].dttk_size = 0;
8997 scope = DIFV_SCOPE_THREAD;
9004 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9005 key[nkeys++].dttk_size = 0;
9007 key[nkeys++].dttk_size = 0;
9009 if (op == DIF_OP_STTAA) {
9010 scope = DIFV_SCOPE_THREAD;
9012 scope = DIFV_SCOPE_GLOBAL;
9018 if (ttop == DIF_DTR_NREGS)
9021 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9023 * If the register for the size of the "pushtr"
9024 * is %r0 (or the value is 0) and the type is
9025 * a string, we'll use the system-wide default
9028 tupregs[ttop++].dttk_size =
9029 dtrace_strsize_default;
9034 tupregs[ttop++].dttk_size = sval;
9040 if (ttop == DIF_DTR_NREGS)
9043 tupregs[ttop++].dttk_size = 0;
9046 case DIF_OP_FLUSHTS:
9063 * We have a dynamic variable allocation; calculate its size.
9065 for (ksize = 0, i = 0; i < nkeys; i++)
9066 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9068 size = sizeof (dtrace_dynvar_t);
9069 size += sizeof (dtrace_key_t) * (nkeys - 1);
9073 * Now we need to determine the size of the stored data.
9075 id = DIF_INSTR_VAR(instr);
9077 for (i = 0; i < dp->dtdo_varlen; i++) {
9078 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9080 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9081 size += v->dtdv_type.dtdt_size;
9086 if (i == dp->dtdo_varlen)
9090 * We have the size. If this is larger than the chunk size
9091 * for our dynamic variable state, reset the chunk size.
9093 size = P2ROUNDUP(size, sizeof (uint64_t));
9095 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9096 vstate->dtvs_dynvars.dtds_chunksize = size;
9101 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9103 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9106 ASSERT(MUTEX_HELD(&dtrace_lock));
9107 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9109 for (i = 0; i < dp->dtdo_varlen; i++) {
9110 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9111 dtrace_statvar_t *svar, ***svarp = NULL;
9113 uint8_t scope = v->dtdv_scope;
9116 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9119 id -= DIF_VAR_OTHER_UBASE;
9122 case DIFV_SCOPE_THREAD:
9123 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9124 dtrace_difv_t *tlocals;
9126 if ((ntlocals = (otlocals << 1)) == 0)
9129 osz = otlocals * sizeof (dtrace_difv_t);
9130 nsz = ntlocals * sizeof (dtrace_difv_t);
9132 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9135 bcopy(vstate->dtvs_tlocals,
9137 kmem_free(vstate->dtvs_tlocals, osz);
9140 vstate->dtvs_tlocals = tlocals;
9141 vstate->dtvs_ntlocals = ntlocals;
9144 vstate->dtvs_tlocals[id] = *v;
9147 case DIFV_SCOPE_LOCAL:
9148 np = &vstate->dtvs_nlocals;
9149 svarp = &vstate->dtvs_locals;
9151 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9152 dsize = NCPU * (v->dtdv_type.dtdt_size +
9155 dsize = NCPU * sizeof (uint64_t);
9159 case DIFV_SCOPE_GLOBAL:
9160 np = &vstate->dtvs_nglobals;
9161 svarp = &vstate->dtvs_globals;
9163 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9164 dsize = v->dtdv_type.dtdt_size +
9173 while (id >= (oldsvars = *np)) {
9174 dtrace_statvar_t **statics;
9175 int newsvars, oldsize, newsize;
9177 if ((newsvars = (oldsvars << 1)) == 0)
9180 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9181 newsize = newsvars * sizeof (dtrace_statvar_t *);
9183 statics = kmem_zalloc(newsize, KM_SLEEP);
9186 bcopy(*svarp, statics, oldsize);
9187 kmem_free(*svarp, oldsize);
9194 if ((svar = (*svarp)[id]) == NULL) {
9195 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9196 svar->dtsv_var = *v;
9198 if ((svar->dtsv_size = dsize) != 0) {
9199 svar->dtsv_data = (uint64_t)(uintptr_t)
9200 kmem_zalloc(dsize, KM_SLEEP);
9203 (*svarp)[id] = svar;
9206 svar->dtsv_refcnt++;
9209 dtrace_difo_chunksize(dp, vstate);
9210 dtrace_difo_hold(dp);
9214 static dtrace_difo_t *
9215 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9220 ASSERT(dp->dtdo_buf != NULL);
9221 ASSERT(dp->dtdo_refcnt != 0);
9223 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9225 ASSERT(dp->dtdo_buf != NULL);
9226 sz = dp->dtdo_len * sizeof (dif_instr_t);
9227 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9228 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9229 new->dtdo_len = dp->dtdo_len;
9231 if (dp->dtdo_strtab != NULL) {
9232 ASSERT(dp->dtdo_strlen != 0);
9233 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9234 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9235 new->dtdo_strlen = dp->dtdo_strlen;
9238 if (dp->dtdo_inttab != NULL) {
9239 ASSERT(dp->dtdo_intlen != 0);
9240 sz = dp->dtdo_intlen * sizeof (uint64_t);
9241 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9242 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9243 new->dtdo_intlen = dp->dtdo_intlen;
9246 if (dp->dtdo_vartab != NULL) {
9247 ASSERT(dp->dtdo_varlen != 0);
9248 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9249 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9250 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9251 new->dtdo_varlen = dp->dtdo_varlen;
9254 dtrace_difo_init(new, vstate);
9260 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9264 ASSERT(dp->dtdo_refcnt == 0);
9266 for (i = 0; i < dp->dtdo_varlen; i++) {
9267 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9268 dtrace_statvar_t *svar, **svarp = NULL;
9270 uint8_t scope = v->dtdv_scope;
9274 case DIFV_SCOPE_THREAD:
9277 case DIFV_SCOPE_LOCAL:
9278 np = &vstate->dtvs_nlocals;
9279 svarp = vstate->dtvs_locals;
9282 case DIFV_SCOPE_GLOBAL:
9283 np = &vstate->dtvs_nglobals;
9284 svarp = vstate->dtvs_globals;
9291 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9294 id -= DIF_VAR_OTHER_UBASE;
9298 ASSERT(svar != NULL);
9299 ASSERT(svar->dtsv_refcnt > 0);
9301 if (--svar->dtsv_refcnt > 0)
9304 if (svar->dtsv_size != 0) {
9305 ASSERT(svar->dtsv_data != 0);
9306 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9310 kmem_free(svar, sizeof (dtrace_statvar_t));
9314 if (dp->dtdo_buf != NULL)
9315 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9316 if (dp->dtdo_inttab != NULL)
9317 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9318 if (dp->dtdo_strtab != NULL)
9319 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9320 if (dp->dtdo_vartab != NULL)
9321 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9323 kmem_free(dp, sizeof (dtrace_difo_t));
9327 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9331 ASSERT(MUTEX_HELD(&dtrace_lock));
9332 ASSERT(dp->dtdo_refcnt != 0);
9334 for (i = 0; i < dp->dtdo_varlen; i++) {
9335 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9337 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9340 ASSERT(dtrace_vtime_references > 0);
9341 if (--dtrace_vtime_references == 0)
9342 dtrace_vtime_disable();
9345 if (--dp->dtdo_refcnt == 0)
9346 dtrace_difo_destroy(dp, vstate);
9350 * DTrace Format Functions
9353 dtrace_format_add(dtrace_state_t *state, char *str)
9356 uint16_t ndx, len = strlen(str) + 1;
9358 fmt = kmem_zalloc(len, KM_SLEEP);
9359 bcopy(str, fmt, len);
9361 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9362 if (state->dts_formats[ndx] == NULL) {
9363 state->dts_formats[ndx] = fmt;
9368 if (state->dts_nformats == USHRT_MAX) {
9370 * This is only likely if a denial-of-service attack is being
9371 * attempted. As such, it's okay to fail silently here.
9373 kmem_free(fmt, len);
9378 * For simplicity, we always resize the formats array to be exactly the
9379 * number of formats.
9381 ndx = state->dts_nformats++;
9382 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9384 if (state->dts_formats != NULL) {
9386 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9387 kmem_free(state->dts_formats, ndx * sizeof (char *));
9390 state->dts_formats = new;
9391 state->dts_formats[ndx] = fmt;
9397 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9401 ASSERT(state->dts_formats != NULL);
9402 ASSERT(format <= state->dts_nformats);
9403 ASSERT(state->dts_formats[format - 1] != NULL);
9405 fmt = state->dts_formats[format - 1];
9406 kmem_free(fmt, strlen(fmt) + 1);
9407 state->dts_formats[format - 1] = NULL;
9411 dtrace_format_destroy(dtrace_state_t *state)
9415 if (state->dts_nformats == 0) {
9416 ASSERT(state->dts_formats == NULL);
9420 ASSERT(state->dts_formats != NULL);
9422 for (i = 0; i < state->dts_nformats; i++) {
9423 char *fmt = state->dts_formats[i];
9428 kmem_free(fmt, strlen(fmt) + 1);
9431 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9432 state->dts_nformats = 0;
9433 state->dts_formats = NULL;
9437 * DTrace Predicate Functions
9439 static dtrace_predicate_t *
9440 dtrace_predicate_create(dtrace_difo_t *dp)
9442 dtrace_predicate_t *pred;
9444 ASSERT(MUTEX_HELD(&dtrace_lock));
9445 ASSERT(dp->dtdo_refcnt != 0);
9447 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9448 pred->dtp_difo = dp;
9449 pred->dtp_refcnt = 1;
9451 if (!dtrace_difo_cacheable(dp))
9454 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9456 * This is only theoretically possible -- we have had 2^32
9457 * cacheable predicates on this machine. We cannot allow any
9458 * more predicates to become cacheable: as unlikely as it is,
9459 * there may be a thread caching a (now stale) predicate cache
9460 * ID. (N.B.: the temptation is being successfully resisted to
9461 * have this cmn_err() "Holy shit -- we executed this code!")
9466 pred->dtp_cacheid = dtrace_predcache_id++;
9472 dtrace_predicate_hold(dtrace_predicate_t *pred)
9474 ASSERT(MUTEX_HELD(&dtrace_lock));
9475 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9476 ASSERT(pred->dtp_refcnt > 0);
9482 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9484 dtrace_difo_t *dp = pred->dtp_difo;
9486 ASSERT(MUTEX_HELD(&dtrace_lock));
9487 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9488 ASSERT(pred->dtp_refcnt > 0);
9490 if (--pred->dtp_refcnt == 0) {
9491 dtrace_difo_release(pred->dtp_difo, vstate);
9492 kmem_free(pred, sizeof (dtrace_predicate_t));
9497 * DTrace Action Description Functions
9499 static dtrace_actdesc_t *
9500 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9501 uint64_t uarg, uint64_t arg)
9503 dtrace_actdesc_t *act;
9506 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9507 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9510 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9511 act->dtad_kind = kind;
9512 act->dtad_ntuple = ntuple;
9513 act->dtad_uarg = uarg;
9514 act->dtad_arg = arg;
9515 act->dtad_refcnt = 1;
9521 dtrace_actdesc_hold(dtrace_actdesc_t *act)
9523 ASSERT(act->dtad_refcnt >= 1);
9528 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9530 dtrace_actkind_t kind = act->dtad_kind;
9533 ASSERT(act->dtad_refcnt >= 1);
9535 if (--act->dtad_refcnt != 0)
9538 if ((dp = act->dtad_difo) != NULL)
9539 dtrace_difo_release(dp, vstate);
9541 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9542 char *str = (char *)(uintptr_t)act->dtad_arg;
9545 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9546 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9550 kmem_free(str, strlen(str) + 1);
9553 kmem_free(act, sizeof (dtrace_actdesc_t));
9557 * DTrace ECB Functions
9559 static dtrace_ecb_t *
9560 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9565 ASSERT(MUTEX_HELD(&dtrace_lock));
9567 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9568 ecb->dte_predicate = NULL;
9569 ecb->dte_probe = probe;
9572 * The default size is the size of the default action: recording
9575 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9576 ecb->dte_alignment = sizeof (dtrace_epid_t);
9578 epid = state->dts_epid++;
9580 if (epid - 1 >= state->dts_necbs) {
9581 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9582 int necbs = state->dts_necbs << 1;
9584 ASSERT(epid == state->dts_necbs + 1);
9587 ASSERT(oecbs == NULL);
9591 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9594 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9596 dtrace_membar_producer();
9597 state->dts_ecbs = ecbs;
9599 if (oecbs != NULL) {
9601 * If this state is active, we must dtrace_sync()
9602 * before we can free the old dts_ecbs array: we're
9603 * coming in hot, and there may be active ring
9604 * buffer processing (which indexes into the dts_ecbs
9605 * array) on another CPU.
9607 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9610 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9613 dtrace_membar_producer();
9614 state->dts_necbs = necbs;
9617 ecb->dte_state = state;
9619 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9620 dtrace_membar_producer();
9621 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9627 dtrace_ecb_enable(dtrace_ecb_t *ecb)
9629 dtrace_probe_t *probe = ecb->dte_probe;
9631 ASSERT(MUTEX_HELD(&cpu_lock));
9632 ASSERT(MUTEX_HELD(&dtrace_lock));
9633 ASSERT(ecb->dte_next == NULL);
9635 if (probe == NULL) {
9637 * This is the NULL probe -- there's nothing to do.
9642 if (probe->dtpr_ecb == NULL) {
9643 dtrace_provider_t *prov = probe->dtpr_provider;
9646 * We're the first ECB on this probe.
9648 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9650 if (ecb->dte_predicate != NULL)
9651 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9653 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9654 probe->dtpr_id, probe->dtpr_arg);
9657 * This probe is already active. Swing the last pointer to
9658 * point to the new ECB, and issue a dtrace_sync() to assure
9659 * that all CPUs have seen the change.
9661 ASSERT(probe->dtpr_ecb_last != NULL);
9662 probe->dtpr_ecb_last->dte_next = ecb;
9663 probe->dtpr_ecb_last = ecb;
9664 probe->dtpr_predcache = 0;
9671 dtrace_ecb_resize(dtrace_ecb_t *ecb)
9673 uint32_t maxalign = sizeof (dtrace_epid_t);
9674 uint32_t align = sizeof (uint8_t), offs, diff;
9675 dtrace_action_t *act;
9677 uint32_t aggbase = UINT32_MAX;
9678 dtrace_state_t *state = ecb->dte_state;
9681 * If we record anything, we always record the epid. (And we always
9684 offs = sizeof (dtrace_epid_t);
9685 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9687 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9688 dtrace_recdesc_t *rec = &act->dta_rec;
9690 if ((align = rec->dtrd_alignment) > maxalign)
9693 if (!wastuple && act->dta_intuple) {
9695 * This is the first record in a tuple. Align the
9696 * offset to be at offset 4 in an 8-byte aligned
9699 diff = offs + sizeof (dtrace_aggid_t);
9701 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9702 offs += sizeof (uint64_t) - diff;
9704 aggbase = offs - sizeof (dtrace_aggid_t);
9705 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9709 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9711 * The current offset is not properly aligned; align it.
9713 offs += align - diff;
9716 rec->dtrd_offset = offs;
9718 if (offs + rec->dtrd_size > ecb->dte_needed) {
9719 ecb->dte_needed = offs + rec->dtrd_size;
9721 if (ecb->dte_needed > state->dts_needed)
9722 state->dts_needed = ecb->dte_needed;
9725 if (DTRACEACT_ISAGG(act->dta_kind)) {
9726 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9727 dtrace_action_t *first = agg->dtag_first, *prev;
9729 ASSERT(rec->dtrd_size != 0 && first != NULL);
9731 ASSERT(aggbase != UINT32_MAX);
9733 agg->dtag_base = aggbase;
9735 while ((prev = first->dta_prev) != NULL &&
9736 DTRACEACT_ISAGG(prev->dta_kind)) {
9737 agg = (dtrace_aggregation_t *)prev;
9738 first = agg->dtag_first;
9742 offs = prev->dta_rec.dtrd_offset +
9743 prev->dta_rec.dtrd_size;
9745 offs = sizeof (dtrace_epid_t);
9749 if (!act->dta_intuple)
9750 ecb->dte_size = offs + rec->dtrd_size;
9752 offs += rec->dtrd_size;
9755 wastuple = act->dta_intuple;
9758 if ((act = ecb->dte_action) != NULL &&
9759 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9760 ecb->dte_size == sizeof (dtrace_epid_t)) {
9762 * If the size is still sizeof (dtrace_epid_t), then all
9763 * actions store no data; set the size to 0.
9765 ecb->dte_alignment = maxalign;
9769 * If the needed space is still sizeof (dtrace_epid_t), then
9770 * all actions need no additional space; set the needed
9773 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9774 ecb->dte_needed = 0;
9780 * Set our alignment, and make sure that the dte_size and dte_needed
9781 * are aligned to the size of an EPID.
9783 ecb->dte_alignment = maxalign;
9784 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9785 ~(sizeof (dtrace_epid_t) - 1);
9786 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9787 ~(sizeof (dtrace_epid_t) - 1);
9788 ASSERT(ecb->dte_size <= ecb->dte_needed);
9791 static dtrace_action_t *
9792 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9794 dtrace_aggregation_t *agg;
9795 size_t size = sizeof (uint64_t);
9796 int ntuple = desc->dtad_ntuple;
9797 dtrace_action_t *act;
9798 dtrace_recdesc_t *frec;
9799 dtrace_aggid_t aggid;
9800 dtrace_state_t *state = ecb->dte_state;
9802 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9803 agg->dtag_ecb = ecb;
9805 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9807 switch (desc->dtad_kind) {
9809 agg->dtag_initial = INT64_MAX;
9810 agg->dtag_aggregate = dtrace_aggregate_min;
9814 agg->dtag_initial = INT64_MIN;
9815 agg->dtag_aggregate = dtrace_aggregate_max;
9818 case DTRACEAGG_COUNT:
9819 agg->dtag_aggregate = dtrace_aggregate_count;
9822 case DTRACEAGG_QUANTIZE:
9823 agg->dtag_aggregate = dtrace_aggregate_quantize;
9824 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9828 case DTRACEAGG_LQUANTIZE: {
9829 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9830 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9832 agg->dtag_initial = desc->dtad_arg;
9833 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9835 if (step == 0 || levels == 0)
9838 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9843 agg->dtag_aggregate = dtrace_aggregate_avg;
9844 size = sizeof (uint64_t) * 2;
9847 case DTRACEAGG_STDDEV:
9848 agg->dtag_aggregate = dtrace_aggregate_stddev;
9849 size = sizeof (uint64_t) * 4;
9853 agg->dtag_aggregate = dtrace_aggregate_sum;
9860 agg->dtag_action.dta_rec.dtrd_size = size;
9866 * We must make sure that we have enough actions for the n-tuple.
9868 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9869 if (DTRACEACT_ISAGG(act->dta_kind))
9872 if (--ntuple == 0) {
9874 * This is the action with which our n-tuple begins.
9876 agg->dtag_first = act;
9882 * This n-tuple is short by ntuple elements. Return failure.
9884 ASSERT(ntuple != 0);
9886 kmem_free(agg, sizeof (dtrace_aggregation_t));
9891 * If the last action in the tuple has a size of zero, it's actually
9892 * an expression argument for the aggregating action.
9894 ASSERT(ecb->dte_action_last != NULL);
9895 act = ecb->dte_action_last;
9897 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9898 ASSERT(act->dta_difo != NULL);
9900 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9901 agg->dtag_hasarg = 1;
9905 * We need to allocate an id for this aggregation.
9908 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9909 VM_BESTFIT | VM_SLEEP);
9911 aggid = alloc_unr(state->dts_aggid_arena);
9914 if (aggid - 1 >= state->dts_naggregations) {
9915 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9916 dtrace_aggregation_t **aggs;
9917 int naggs = state->dts_naggregations << 1;
9918 int onaggs = state->dts_naggregations;
9920 ASSERT(aggid == state->dts_naggregations + 1);
9923 ASSERT(oaggs == NULL);
9927 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9929 if (oaggs != NULL) {
9930 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9931 kmem_free(oaggs, onaggs * sizeof (*aggs));
9934 state->dts_aggregations = aggs;
9935 state->dts_naggregations = naggs;
9938 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9939 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9941 frec = &agg->dtag_first->dta_rec;
9942 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9943 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9945 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9946 ASSERT(!act->dta_intuple);
9947 act->dta_intuple = 1;
9950 return (&agg->dtag_action);
9954 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9956 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9957 dtrace_state_t *state = ecb->dte_state;
9958 dtrace_aggid_t aggid = agg->dtag_id;
9960 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9962 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9964 free_unr(state->dts_aggid_arena, aggid);
9967 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9968 state->dts_aggregations[aggid - 1] = NULL;
9970 kmem_free(agg, sizeof (dtrace_aggregation_t));
9974 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9976 dtrace_action_t *action, *last;
9977 dtrace_difo_t *dp = desc->dtad_difo;
9978 uint32_t size = 0, align = sizeof (uint8_t), mask;
9979 uint16_t format = 0;
9980 dtrace_recdesc_t *rec;
9981 dtrace_state_t *state = ecb->dte_state;
9982 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
9983 uint64_t arg = desc->dtad_arg;
9985 ASSERT(MUTEX_HELD(&dtrace_lock));
9986 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9988 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9990 * If this is an aggregating action, there must be neither
9991 * a speculate nor a commit on the action chain.
9993 dtrace_action_t *act;
9995 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9996 if (act->dta_kind == DTRACEACT_COMMIT)
9999 if (act->dta_kind == DTRACEACT_SPECULATE)
10003 action = dtrace_ecb_aggregation_create(ecb, desc);
10005 if (action == NULL)
10008 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10009 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10010 dp != NULL && dp->dtdo_destructive)) {
10011 state->dts_destructive = 1;
10014 switch (desc->dtad_kind) {
10015 case DTRACEACT_PRINTF:
10016 case DTRACEACT_PRINTA:
10017 case DTRACEACT_SYSTEM:
10018 case DTRACEACT_FREOPEN:
10020 * We know that our arg is a string -- turn it into a
10024 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
10029 ASSERT(arg > KERNELBASE);
10031 format = dtrace_format_add(state,
10032 (char *)(uintptr_t)arg);
10036 case DTRACEACT_LIBACT:
10037 case DTRACEACT_DIFEXPR:
10041 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10044 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10045 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10048 size = opt[DTRACEOPT_STRSIZE];
10053 case DTRACEACT_STACK:
10054 if ((nframes = arg) == 0) {
10055 nframes = opt[DTRACEOPT_STACKFRAMES];
10056 ASSERT(nframes > 0);
10060 size = nframes * sizeof (pc_t);
10063 case DTRACEACT_JSTACK:
10064 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10065 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10067 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10068 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10070 arg = DTRACE_USTACK_ARG(nframes, strsize);
10073 case DTRACEACT_USTACK:
10074 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10075 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10076 strsize = DTRACE_USTACK_STRSIZE(arg);
10077 nframes = opt[DTRACEOPT_USTACKFRAMES];
10078 ASSERT(nframes > 0);
10079 arg = DTRACE_USTACK_ARG(nframes, strsize);
10083 * Save a slot for the pid.
10085 size = (nframes + 1) * sizeof (uint64_t);
10086 size += DTRACE_USTACK_STRSIZE(arg);
10087 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10091 case DTRACEACT_SYM:
10092 case DTRACEACT_MOD:
10093 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10094 sizeof (uint64_t)) ||
10095 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10099 case DTRACEACT_USYM:
10100 case DTRACEACT_UMOD:
10101 case DTRACEACT_UADDR:
10103 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10104 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10108 * We have a slot for the pid, plus a slot for the
10109 * argument. To keep things simple (aligned with
10110 * bitness-neutral sizing), we store each as a 64-bit
10113 size = 2 * sizeof (uint64_t);
10116 case DTRACEACT_STOP:
10117 case DTRACEACT_BREAKPOINT:
10118 case DTRACEACT_PANIC:
10121 case DTRACEACT_CHILL:
10122 case DTRACEACT_DISCARD:
10123 case DTRACEACT_RAISE:
10128 case DTRACEACT_EXIT:
10130 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10131 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10135 case DTRACEACT_SPECULATE:
10136 if (ecb->dte_size > sizeof (dtrace_epid_t))
10142 state->dts_speculates = 1;
10145 case DTRACEACT_PRINTM:
10146 size = dp->dtdo_rtype.dtdt_size;
10149 case DTRACEACT_PRINTT:
10150 size = dp->dtdo_rtype.dtdt_size;
10153 case DTRACEACT_COMMIT: {
10154 dtrace_action_t *act = ecb->dte_action;
10156 for (; act != NULL; act = act->dta_next) {
10157 if (act->dta_kind == DTRACEACT_COMMIT)
10170 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10172 * If this is a data-storing action or a speculate,
10173 * we must be sure that there isn't a commit on the
10176 dtrace_action_t *act = ecb->dte_action;
10178 for (; act != NULL; act = act->dta_next) {
10179 if (act->dta_kind == DTRACEACT_COMMIT)
10184 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10185 action->dta_rec.dtrd_size = size;
10188 action->dta_refcnt = 1;
10189 rec = &action->dta_rec;
10190 size = rec->dtrd_size;
10192 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10193 if (!(size & mask)) {
10199 action->dta_kind = desc->dtad_kind;
10201 if ((action->dta_difo = dp) != NULL)
10202 dtrace_difo_hold(dp);
10204 rec->dtrd_action = action->dta_kind;
10205 rec->dtrd_arg = arg;
10206 rec->dtrd_uarg = desc->dtad_uarg;
10207 rec->dtrd_alignment = (uint16_t)align;
10208 rec->dtrd_format = format;
10210 if ((last = ecb->dte_action_last) != NULL) {
10211 ASSERT(ecb->dte_action != NULL);
10212 action->dta_prev = last;
10213 last->dta_next = action;
10215 ASSERT(ecb->dte_action == NULL);
10216 ecb->dte_action = action;
10219 ecb->dte_action_last = action;
10225 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10227 dtrace_action_t *act = ecb->dte_action, *next;
10228 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10232 if (act != NULL && act->dta_refcnt > 1) {
10233 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10236 for (; act != NULL; act = next) {
10237 next = act->dta_next;
10238 ASSERT(next != NULL || act == ecb->dte_action_last);
10239 ASSERT(act->dta_refcnt == 1);
10241 if ((format = act->dta_rec.dtrd_format) != 0)
10242 dtrace_format_remove(ecb->dte_state, format);
10244 if ((dp = act->dta_difo) != NULL)
10245 dtrace_difo_release(dp, vstate);
10247 if (DTRACEACT_ISAGG(act->dta_kind)) {
10248 dtrace_ecb_aggregation_destroy(ecb, act);
10250 kmem_free(act, sizeof (dtrace_action_t));
10255 ecb->dte_action = NULL;
10256 ecb->dte_action_last = NULL;
10257 ecb->dte_size = sizeof (dtrace_epid_t);
10261 dtrace_ecb_disable(dtrace_ecb_t *ecb)
10264 * We disable the ECB by removing it from its probe.
10266 dtrace_ecb_t *pecb, *prev = NULL;
10267 dtrace_probe_t *probe = ecb->dte_probe;
10269 ASSERT(MUTEX_HELD(&dtrace_lock));
10271 if (probe == NULL) {
10273 * This is the NULL probe; there is nothing to disable.
10278 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10284 ASSERT(pecb != NULL);
10286 if (prev == NULL) {
10287 probe->dtpr_ecb = ecb->dte_next;
10289 prev->dte_next = ecb->dte_next;
10292 if (ecb == probe->dtpr_ecb_last) {
10293 ASSERT(ecb->dte_next == NULL);
10294 probe->dtpr_ecb_last = prev;
10298 * The ECB has been disconnected from the probe; now sync to assure
10299 * that all CPUs have seen the change before returning.
10303 if (probe->dtpr_ecb == NULL) {
10305 * That was the last ECB on the probe; clear the predicate
10306 * cache ID for the probe, disable it and sync one more time
10307 * to assure that we'll never hit it again.
10309 dtrace_provider_t *prov = probe->dtpr_provider;
10311 ASSERT(ecb->dte_next == NULL);
10312 ASSERT(probe->dtpr_ecb_last == NULL);
10313 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10314 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10315 probe->dtpr_id, probe->dtpr_arg);
10319 * There is at least one ECB remaining on the probe. If there
10320 * is _exactly_ one, set the probe's predicate cache ID to be
10321 * the predicate cache ID of the remaining ECB.
10323 ASSERT(probe->dtpr_ecb_last != NULL);
10324 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10326 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10327 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10329 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10332 probe->dtpr_predcache = p->dtp_cacheid;
10335 ecb->dte_next = NULL;
10340 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10342 dtrace_state_t *state = ecb->dte_state;
10343 dtrace_vstate_t *vstate = &state->dts_vstate;
10344 dtrace_predicate_t *pred;
10345 dtrace_epid_t epid = ecb->dte_epid;
10347 ASSERT(MUTEX_HELD(&dtrace_lock));
10348 ASSERT(ecb->dte_next == NULL);
10349 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10351 if ((pred = ecb->dte_predicate) != NULL)
10352 dtrace_predicate_release(pred, vstate);
10354 dtrace_ecb_action_remove(ecb);
10356 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10357 state->dts_ecbs[epid - 1] = NULL;
10359 kmem_free(ecb, sizeof (dtrace_ecb_t));
10362 static dtrace_ecb_t *
10363 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10364 dtrace_enabling_t *enab)
10367 dtrace_predicate_t *pred;
10368 dtrace_actdesc_t *act;
10369 dtrace_provider_t *prov;
10370 dtrace_ecbdesc_t *desc = enab->dten_current;
10372 ASSERT(MUTEX_HELD(&dtrace_lock));
10373 ASSERT(state != NULL);
10375 ecb = dtrace_ecb_add(state, probe);
10376 ecb->dte_uarg = desc->dted_uarg;
10378 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10379 dtrace_predicate_hold(pred);
10380 ecb->dte_predicate = pred;
10383 if (probe != NULL) {
10385 * If the provider shows more leg than the consumer is old
10386 * enough to see, we need to enable the appropriate implicit
10387 * predicate bits to prevent the ecb from activating at
10390 * Providers specifying DTRACE_PRIV_USER at register time
10391 * are stating that they need the /proc-style privilege
10392 * model to be enforced, and this is what DTRACE_COND_OWNER
10393 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10395 prov = probe->dtpr_provider;
10396 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10397 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10398 ecb->dte_cond |= DTRACE_COND_OWNER;
10400 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10401 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10402 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10405 * If the provider shows us kernel innards and the user
10406 * is lacking sufficient privilege, enable the
10407 * DTRACE_COND_USERMODE implicit predicate.
10409 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10410 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10411 ecb->dte_cond |= DTRACE_COND_USERMODE;
10414 if (dtrace_ecb_create_cache != NULL) {
10416 * If we have a cached ecb, we'll use its action list instead
10417 * of creating our own (saving both time and space).
10419 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10420 dtrace_action_t *act = cached->dte_action;
10423 ASSERT(act->dta_refcnt > 0);
10425 ecb->dte_action = act;
10426 ecb->dte_action_last = cached->dte_action_last;
10427 ecb->dte_needed = cached->dte_needed;
10428 ecb->dte_size = cached->dte_size;
10429 ecb->dte_alignment = cached->dte_alignment;
10435 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10436 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10437 dtrace_ecb_destroy(ecb);
10442 dtrace_ecb_resize(ecb);
10444 return (dtrace_ecb_create_cache = ecb);
10448 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10451 dtrace_enabling_t *enab = arg;
10452 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10454 ASSERT(state != NULL);
10456 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10458 * This probe was created in a generation for which this
10459 * enabling has previously created ECBs; we don't want to
10460 * enable it again, so just kick out.
10462 return (DTRACE_MATCH_NEXT);
10465 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10466 return (DTRACE_MATCH_DONE);
10468 dtrace_ecb_enable(ecb);
10469 return (DTRACE_MATCH_NEXT);
10472 static dtrace_ecb_t *
10473 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10477 ASSERT(MUTEX_HELD(&dtrace_lock));
10479 if (id == 0 || id > state->dts_necbs)
10482 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10483 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10485 return (state->dts_ecbs[id - 1]);
10488 static dtrace_aggregation_t *
10489 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10491 dtrace_aggregation_t *agg;
10493 ASSERT(MUTEX_HELD(&dtrace_lock));
10495 if (id == 0 || id > state->dts_naggregations)
10498 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10499 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10500 agg->dtag_id == id);
10502 return (state->dts_aggregations[id - 1]);
10506 * DTrace Buffer Functions
10508 * The following functions manipulate DTrace buffers. Most of these functions
10509 * are called in the context of establishing or processing consumer state;
10510 * exceptions are explicitly noted.
10514 * Note: called from cross call context. This function switches the two
10515 * buffers on a given CPU. The atomicity of this operation is assured by
10516 * disabling interrupts while the actual switch takes place; the disabling of
10517 * interrupts serializes the execution with any execution of dtrace_probe() on
10521 dtrace_buffer_switch(dtrace_buffer_t *buf)
10523 caddr_t tomax = buf->dtb_tomax;
10524 caddr_t xamot = buf->dtb_xamot;
10525 dtrace_icookie_t cookie;
10527 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10528 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10530 cookie = dtrace_interrupt_disable();
10531 buf->dtb_tomax = xamot;
10532 buf->dtb_xamot = tomax;
10533 buf->dtb_xamot_drops = buf->dtb_drops;
10534 buf->dtb_xamot_offset = buf->dtb_offset;
10535 buf->dtb_xamot_errors = buf->dtb_errors;
10536 buf->dtb_xamot_flags = buf->dtb_flags;
10537 buf->dtb_offset = 0;
10538 buf->dtb_drops = 0;
10539 buf->dtb_errors = 0;
10540 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10541 dtrace_interrupt_enable(cookie);
10545 * Note: called from cross call context. This function activates a buffer
10546 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10547 * is guaranteed by the disabling of interrupts.
10550 dtrace_buffer_activate(dtrace_state_t *state)
10552 dtrace_buffer_t *buf;
10553 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10555 buf = &state->dts_buffer[curcpu];
10557 if (buf->dtb_tomax != NULL) {
10559 * We might like to assert that the buffer is marked inactive,
10560 * but this isn't necessarily true: the buffer for the CPU
10561 * that processes the BEGIN probe has its buffer activated
10562 * manually. In this case, we take the (harmless) action
10563 * re-clearing the bit INACTIVE bit.
10565 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10568 dtrace_interrupt_enable(cookie);
10572 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10580 dtrace_buffer_t *buf;
10583 ASSERT(MUTEX_HELD(&cpu_lock));
10584 ASSERT(MUTEX_HELD(&dtrace_lock));
10586 if (size > dtrace_nonroot_maxsize &&
10587 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10593 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10596 buf = &bufs[cp->cpu_id];
10599 * If there is already a buffer allocated for this CPU, it
10600 * is only possible that this is a DR event. In this case,
10601 * the buffer size must match our specified size.
10603 if (buf->dtb_tomax != NULL) {
10604 ASSERT(buf->dtb_size == size);
10608 ASSERT(buf->dtb_xamot == NULL);
10610 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10613 buf->dtb_size = size;
10614 buf->dtb_flags = flags;
10615 buf->dtb_offset = 0;
10616 buf->dtb_drops = 0;
10618 if (flags & DTRACEBUF_NOSWITCH)
10621 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10623 } while ((cp = cp->cpu_next) != cpu_list);
10631 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10634 buf = &bufs[cp->cpu_id];
10636 if (buf->dtb_xamot != NULL) {
10637 ASSERT(buf->dtb_tomax != NULL);
10638 ASSERT(buf->dtb_size == size);
10639 kmem_free(buf->dtb_xamot, size);
10642 if (buf->dtb_tomax != NULL) {
10643 ASSERT(buf->dtb_size == size);
10644 kmem_free(buf->dtb_tomax, size);
10647 buf->dtb_tomax = NULL;
10648 buf->dtb_xamot = NULL;
10650 } while ((cp = cp->cpu_next) != cpu_list);
10656 #if defined(__amd64__)
10658 * FreeBSD isn't good at limiting the amount of memory we
10659 * ask to malloc, so let's place a limit here before trying
10660 * to do something that might well end in tears at bedtime.
10662 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
10666 ASSERT(MUTEX_HELD(&dtrace_lock));
10667 for (i = 0; i <= mp_maxid; i++) {
10668 if ((cp = pcpu_find(i)) == NULL)
10671 if (cpu != DTRACE_CPUALL && cpu != i)
10677 * If there is already a buffer allocated for this CPU, it
10678 * is only possible that this is a DR event. In this case,
10679 * the buffer size must match our specified size.
10681 if (buf->dtb_tomax != NULL) {
10682 ASSERT(buf->dtb_size == size);
10686 ASSERT(buf->dtb_xamot == NULL);
10688 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10691 buf->dtb_size = size;
10692 buf->dtb_flags = flags;
10693 buf->dtb_offset = 0;
10694 buf->dtb_drops = 0;
10696 if (flags & DTRACEBUF_NOSWITCH)
10699 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10707 * Error allocating memory, so free the buffers that were
10708 * allocated before the failed allocation.
10710 for (i = 0; i <= mp_maxid; i++) {
10711 if ((cp = pcpu_find(i)) == NULL)
10714 if (cpu != DTRACE_CPUALL && cpu != i)
10719 if (buf->dtb_xamot != NULL) {
10720 ASSERT(buf->dtb_tomax != NULL);
10721 ASSERT(buf->dtb_size == size);
10722 kmem_free(buf->dtb_xamot, size);
10725 if (buf->dtb_tomax != NULL) {
10726 ASSERT(buf->dtb_size == size);
10727 kmem_free(buf->dtb_tomax, size);
10730 buf->dtb_tomax = NULL;
10731 buf->dtb_xamot = NULL;
10741 * Note: called from probe context. This function just increments the drop
10742 * count on a buffer. It has been made a function to allow for the
10743 * possibility of understanding the source of mysterious drop counts. (A
10744 * problem for which one may be particularly disappointed that DTrace cannot
10745 * be used to understand DTrace.)
10748 dtrace_buffer_drop(dtrace_buffer_t *buf)
10754 * Note: called from probe context. This function is called to reserve space
10755 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10756 * mstate. Returns the new offset in the buffer, or a negative value if an
10757 * error has occurred.
10760 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10761 dtrace_state_t *state, dtrace_mstate_t *mstate)
10763 intptr_t offs = buf->dtb_offset, soffs;
10768 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10771 if ((tomax = buf->dtb_tomax) == NULL) {
10772 dtrace_buffer_drop(buf);
10776 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10777 while (offs & (align - 1)) {
10779 * Assert that our alignment is off by a number which
10780 * is itself sizeof (uint32_t) aligned.
10782 ASSERT(!((align - (offs & (align - 1))) &
10783 (sizeof (uint32_t) - 1)));
10784 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10785 offs += sizeof (uint32_t);
10788 if ((soffs = offs + needed) > buf->dtb_size) {
10789 dtrace_buffer_drop(buf);
10793 if (mstate == NULL)
10796 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10797 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10798 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10803 if (buf->dtb_flags & DTRACEBUF_FILL) {
10804 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10805 (buf->dtb_flags & DTRACEBUF_FULL))
10810 total = needed + (offs & (align - 1));
10813 * For a ring buffer, life is quite a bit more complicated. Before
10814 * we can store any padding, we need to adjust our wrapping offset.
10815 * (If we've never before wrapped or we're not about to, no adjustment
10818 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10819 offs + total > buf->dtb_size) {
10820 woffs = buf->dtb_xamot_offset;
10822 if (offs + total > buf->dtb_size) {
10824 * We can't fit in the end of the buffer. First, a
10825 * sanity check that we can fit in the buffer at all.
10827 if (total > buf->dtb_size) {
10828 dtrace_buffer_drop(buf);
10833 * We're going to be storing at the top of the buffer,
10834 * so now we need to deal with the wrapped offset. We
10835 * only reset our wrapped offset to 0 if it is
10836 * currently greater than the current offset. If it
10837 * is less than the current offset, it is because a
10838 * previous allocation induced a wrap -- but the
10839 * allocation didn't subsequently take the space due
10840 * to an error or false predicate evaluation. In this
10841 * case, we'll just leave the wrapped offset alone: if
10842 * the wrapped offset hasn't been advanced far enough
10843 * for this allocation, it will be adjusted in the
10846 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10854 * Now we know that we're going to be storing to the
10855 * top of the buffer and that there is room for us
10856 * there. We need to clear the buffer from the current
10857 * offset to the end (there may be old gunk there).
10859 while (offs < buf->dtb_size)
10863 * We need to set our offset to zero. And because we
10864 * are wrapping, we need to set the bit indicating as
10865 * much. We can also adjust our needed space back
10866 * down to the space required by the ECB -- we know
10867 * that the top of the buffer is aligned.
10871 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10874 * There is room for us in the buffer, so we simply
10875 * need to check the wrapped offset.
10877 if (woffs < offs) {
10879 * The wrapped offset is less than the offset.
10880 * This can happen if we allocated buffer space
10881 * that induced a wrap, but then we didn't
10882 * subsequently take the space due to an error
10883 * or false predicate evaluation. This is
10884 * okay; we know that _this_ allocation isn't
10885 * going to induce a wrap. We still can't
10886 * reset the wrapped offset to be zero,
10887 * however: the space may have been trashed in
10888 * the previous failed probe attempt. But at
10889 * least the wrapped offset doesn't need to
10890 * be adjusted at all...
10896 while (offs + total > woffs) {
10897 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10900 if (epid == DTRACE_EPIDNONE) {
10901 size = sizeof (uint32_t);
10903 ASSERT(epid <= state->dts_necbs);
10904 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10906 size = state->dts_ecbs[epid - 1]->dte_size;
10909 ASSERT(woffs + size <= buf->dtb_size);
10912 if (woffs + size == buf->dtb_size) {
10914 * We've reached the end of the buffer; we want
10915 * to set the wrapped offset to 0 and break
10916 * out. However, if the offs is 0, then we're
10917 * in a strange edge-condition: the amount of
10918 * space that we want to reserve plus the size
10919 * of the record that we're overwriting is
10920 * greater than the size of the buffer. This
10921 * is problematic because if we reserve the
10922 * space but subsequently don't consume it (due
10923 * to a failed predicate or error) the wrapped
10924 * offset will be 0 -- yet the EPID at offset 0
10925 * will not be committed. This situation is
10926 * relatively easy to deal with: if we're in
10927 * this case, the buffer is indistinguishable
10928 * from one that hasn't wrapped; we need only
10929 * finish the job by clearing the wrapped bit,
10930 * explicitly setting the offset to be 0, and
10931 * zero'ing out the old data in the buffer.
10934 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10935 buf->dtb_offset = 0;
10938 while (woffs < buf->dtb_size)
10939 tomax[woffs++] = 0;
10950 * We have a wrapped offset. It may be that the wrapped offset
10951 * has become zero -- that's okay.
10953 buf->dtb_xamot_offset = woffs;
10958 * Now we can plow the buffer with any necessary padding.
10960 while (offs & (align - 1)) {
10962 * Assert that our alignment is off by a number which
10963 * is itself sizeof (uint32_t) aligned.
10965 ASSERT(!((align - (offs & (align - 1))) &
10966 (sizeof (uint32_t) - 1)));
10967 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10968 offs += sizeof (uint32_t);
10971 if (buf->dtb_flags & DTRACEBUF_FILL) {
10972 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10973 buf->dtb_flags |= DTRACEBUF_FULL;
10978 if (mstate == NULL)
10982 * For ring buffers and fill buffers, the scratch space is always
10983 * the inactive buffer.
10985 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10986 mstate->dtms_scratch_size = buf->dtb_size;
10987 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10993 dtrace_buffer_polish(dtrace_buffer_t *buf)
10995 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10996 ASSERT(MUTEX_HELD(&dtrace_lock));
10998 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11002 * We need to polish the ring buffer. There are three cases:
11004 * - The first (and presumably most common) is that there is no gap
11005 * between the buffer offset and the wrapped offset. In this case,
11006 * there is nothing in the buffer that isn't valid data; we can
11007 * mark the buffer as polished and return.
11009 * - The second (less common than the first but still more common
11010 * than the third) is that there is a gap between the buffer offset
11011 * and the wrapped offset, and the wrapped offset is larger than the
11012 * buffer offset. This can happen because of an alignment issue, or
11013 * can happen because of a call to dtrace_buffer_reserve() that
11014 * didn't subsequently consume the buffer space. In this case,
11015 * we need to zero the data from the buffer offset to the wrapped
11018 * - The third (and least common) is that there is a gap between the
11019 * buffer offset and the wrapped offset, but the wrapped offset is
11020 * _less_ than the buffer offset. This can only happen because a
11021 * call to dtrace_buffer_reserve() induced a wrap, but the space
11022 * was not subsequently consumed. In this case, we need to zero the
11023 * space from the offset to the end of the buffer _and_ from the
11024 * top of the buffer to the wrapped offset.
11026 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11027 bzero(buf->dtb_tomax + buf->dtb_offset,
11028 buf->dtb_xamot_offset - buf->dtb_offset);
11031 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11032 bzero(buf->dtb_tomax + buf->dtb_offset,
11033 buf->dtb_size - buf->dtb_offset);
11034 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11039 dtrace_buffer_free(dtrace_buffer_t *bufs)
11043 for (i = 0; i < NCPU; i++) {
11044 dtrace_buffer_t *buf = &bufs[i];
11046 if (buf->dtb_tomax == NULL) {
11047 ASSERT(buf->dtb_xamot == NULL);
11048 ASSERT(buf->dtb_size == 0);
11052 if (buf->dtb_xamot != NULL) {
11053 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11054 kmem_free(buf->dtb_xamot, buf->dtb_size);
11057 kmem_free(buf->dtb_tomax, buf->dtb_size);
11059 buf->dtb_tomax = NULL;
11060 buf->dtb_xamot = NULL;
11065 * DTrace Enabling Functions
11067 static dtrace_enabling_t *
11068 dtrace_enabling_create(dtrace_vstate_t *vstate)
11070 dtrace_enabling_t *enab;
11072 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11073 enab->dten_vstate = vstate;
11079 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11081 dtrace_ecbdesc_t **ndesc;
11082 size_t osize, nsize;
11085 * We can't add to enablings after we've enabled them, or after we've
11088 ASSERT(enab->dten_probegen == 0);
11089 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11091 if (enab->dten_ndesc < enab->dten_maxdesc) {
11092 enab->dten_desc[enab->dten_ndesc++] = ecb;
11096 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11098 if (enab->dten_maxdesc == 0) {
11099 enab->dten_maxdesc = 1;
11101 enab->dten_maxdesc <<= 1;
11104 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11106 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11107 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11108 bcopy(enab->dten_desc, ndesc, osize);
11109 if (enab->dten_desc != NULL)
11110 kmem_free(enab->dten_desc, osize);
11112 enab->dten_desc = ndesc;
11113 enab->dten_desc[enab->dten_ndesc++] = ecb;
11117 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11118 dtrace_probedesc_t *pd)
11120 dtrace_ecbdesc_t *new;
11121 dtrace_predicate_t *pred;
11122 dtrace_actdesc_t *act;
11125 * We're going to create a new ECB description that matches the
11126 * specified ECB in every way, but has the specified probe description.
11128 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11130 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11131 dtrace_predicate_hold(pred);
11133 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11134 dtrace_actdesc_hold(act);
11136 new->dted_action = ecb->dted_action;
11137 new->dted_pred = ecb->dted_pred;
11138 new->dted_probe = *pd;
11139 new->dted_uarg = ecb->dted_uarg;
11141 dtrace_enabling_add(enab, new);
11145 dtrace_enabling_dump(dtrace_enabling_t *enab)
11149 for (i = 0; i < enab->dten_ndesc; i++) {
11150 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11152 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11153 desc->dtpd_provider, desc->dtpd_mod,
11154 desc->dtpd_func, desc->dtpd_name);
11159 dtrace_enabling_destroy(dtrace_enabling_t *enab)
11162 dtrace_ecbdesc_t *ep;
11163 dtrace_vstate_t *vstate = enab->dten_vstate;
11165 ASSERT(MUTEX_HELD(&dtrace_lock));
11167 for (i = 0; i < enab->dten_ndesc; i++) {
11168 dtrace_actdesc_t *act, *next;
11169 dtrace_predicate_t *pred;
11171 ep = enab->dten_desc[i];
11173 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11174 dtrace_predicate_release(pred, vstate);
11176 for (act = ep->dted_action; act != NULL; act = next) {
11177 next = act->dtad_next;
11178 dtrace_actdesc_release(act, vstate);
11181 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11184 if (enab->dten_desc != NULL)
11185 kmem_free(enab->dten_desc,
11186 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11189 * If this was a retained enabling, decrement the dts_nretained count
11190 * and take it off of the dtrace_retained list.
11192 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11193 dtrace_retained == enab) {
11194 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11195 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11196 enab->dten_vstate->dtvs_state->dts_nretained--;
11199 if (enab->dten_prev == NULL) {
11200 if (dtrace_retained == enab) {
11201 dtrace_retained = enab->dten_next;
11203 if (dtrace_retained != NULL)
11204 dtrace_retained->dten_prev = NULL;
11207 ASSERT(enab != dtrace_retained);
11208 ASSERT(dtrace_retained != NULL);
11209 enab->dten_prev->dten_next = enab->dten_next;
11212 if (enab->dten_next != NULL) {
11213 ASSERT(dtrace_retained != NULL);
11214 enab->dten_next->dten_prev = enab->dten_prev;
11217 kmem_free(enab, sizeof (dtrace_enabling_t));
11221 dtrace_enabling_retain(dtrace_enabling_t *enab)
11223 dtrace_state_t *state;
11225 ASSERT(MUTEX_HELD(&dtrace_lock));
11226 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11227 ASSERT(enab->dten_vstate != NULL);
11229 state = enab->dten_vstate->dtvs_state;
11230 ASSERT(state != NULL);
11233 * We only allow each state to retain dtrace_retain_max enablings.
11235 if (state->dts_nretained >= dtrace_retain_max)
11238 state->dts_nretained++;
11240 if (dtrace_retained == NULL) {
11241 dtrace_retained = enab;
11245 enab->dten_next = dtrace_retained;
11246 dtrace_retained->dten_prev = enab;
11247 dtrace_retained = enab;
11253 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11254 dtrace_probedesc_t *create)
11256 dtrace_enabling_t *new, *enab;
11257 int found = 0, err = ENOENT;
11259 ASSERT(MUTEX_HELD(&dtrace_lock));
11260 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11261 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11262 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11263 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11265 new = dtrace_enabling_create(&state->dts_vstate);
11268 * Iterate over all retained enablings, looking for enablings that
11269 * match the specified state.
11271 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11275 * dtvs_state can only be NULL for helper enablings -- and
11276 * helper enablings can't be retained.
11278 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11280 if (enab->dten_vstate->dtvs_state != state)
11284 * Now iterate over each probe description; we're looking for
11285 * an exact match to the specified probe description.
11287 for (i = 0; i < enab->dten_ndesc; i++) {
11288 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11289 dtrace_probedesc_t *pd = &ep->dted_probe;
11291 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11294 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11297 if (strcmp(pd->dtpd_func, match->dtpd_func))
11300 if (strcmp(pd->dtpd_name, match->dtpd_name))
11304 * We have a winning probe! Add it to our growing
11308 dtrace_enabling_addlike(new, ep, create);
11312 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11313 dtrace_enabling_destroy(new);
11321 dtrace_enabling_retract(dtrace_state_t *state)
11323 dtrace_enabling_t *enab, *next;
11325 ASSERT(MUTEX_HELD(&dtrace_lock));
11328 * Iterate over all retained enablings, destroy the enablings retained
11329 * for the specified state.
11331 for (enab = dtrace_retained; enab != NULL; enab = next) {
11332 next = enab->dten_next;
11335 * dtvs_state can only be NULL for helper enablings -- and
11336 * helper enablings can't be retained.
11338 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11340 if (enab->dten_vstate->dtvs_state == state) {
11341 ASSERT(state->dts_nretained > 0);
11342 dtrace_enabling_destroy(enab);
11346 ASSERT(state->dts_nretained == 0);
11350 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11355 ASSERT(MUTEX_HELD(&cpu_lock));
11356 ASSERT(MUTEX_HELD(&dtrace_lock));
11358 for (i = 0; i < enab->dten_ndesc; i++) {
11359 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11361 enab->dten_current = ep;
11362 enab->dten_error = 0;
11364 matched += dtrace_probe_enable(&ep->dted_probe, enab);
11366 if (enab->dten_error != 0) {
11368 * If we get an error half-way through enabling the
11369 * probes, we kick out -- perhaps with some number of
11370 * them enabled. Leaving enabled probes enabled may
11371 * be slightly confusing for user-level, but we expect
11372 * that no one will attempt to actually drive on in
11373 * the face of such errors. If this is an anonymous
11374 * enabling (indicated with a NULL nmatched pointer),
11375 * we cmn_err() a message. We aren't expecting to
11376 * get such an error -- such as it can exist at all,
11377 * it would be a result of corrupted DOF in the driver
11380 if (nmatched == NULL) {
11381 cmn_err(CE_WARN, "dtrace_enabling_match() "
11382 "error on %p: %d", (void *)ep,
11386 return (enab->dten_error);
11390 enab->dten_probegen = dtrace_probegen;
11391 if (nmatched != NULL)
11392 *nmatched = matched;
11398 dtrace_enabling_matchall(void)
11400 dtrace_enabling_t *enab;
11402 mutex_enter(&cpu_lock);
11403 mutex_enter(&dtrace_lock);
11406 * Iterate over all retained enablings to see if any probes match
11407 * against them. We only perform this operation on enablings for which
11408 * we have sufficient permissions by virtue of being in the global zone
11409 * or in the same zone as the DTrace client. Because we can be called
11410 * after dtrace_detach() has been called, we cannot assert that there
11411 * are retained enablings. We can safely load from dtrace_retained,
11412 * however: the taskq_destroy() at the end of dtrace_detach() will
11413 * block pending our completion.
11415 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11417 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11419 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr))
11421 (void) dtrace_enabling_match(enab, NULL);
11424 mutex_exit(&dtrace_lock);
11425 mutex_exit(&cpu_lock);
11429 * If an enabling is to be enabled without having matched probes (that is, if
11430 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11431 * enabling must be _primed_ by creating an ECB for every ECB description.
11432 * This must be done to assure that we know the number of speculations, the
11433 * number of aggregations, the minimum buffer size needed, etc. before we
11434 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11435 * enabling any probes, we create ECBs for every ECB decription, but with a
11436 * NULL probe -- which is exactly what this function does.
11439 dtrace_enabling_prime(dtrace_state_t *state)
11441 dtrace_enabling_t *enab;
11444 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11445 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11447 if (enab->dten_vstate->dtvs_state != state)
11451 * We don't want to prime an enabling more than once, lest
11452 * we allow a malicious user to induce resource exhaustion.
11453 * (The ECBs that result from priming an enabling aren't
11454 * leaked -- but they also aren't deallocated until the
11455 * consumer state is destroyed.)
11457 if (enab->dten_primed)
11460 for (i = 0; i < enab->dten_ndesc; i++) {
11461 enab->dten_current = enab->dten_desc[i];
11462 (void) dtrace_probe_enable(NULL, enab);
11465 enab->dten_primed = 1;
11470 * Called to indicate that probes should be provided due to retained
11471 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11472 * must take an initial lap through the enabling calling the dtps_provide()
11473 * entry point explicitly to allow for autocreated probes.
11476 dtrace_enabling_provide(dtrace_provider_t *prv)
11479 dtrace_probedesc_t desc;
11481 ASSERT(MUTEX_HELD(&dtrace_lock));
11482 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11486 prv = dtrace_provider;
11490 dtrace_enabling_t *enab = dtrace_retained;
11491 void *parg = prv->dtpv_arg;
11493 for (; enab != NULL; enab = enab->dten_next) {
11494 for (i = 0; i < enab->dten_ndesc; i++) {
11495 desc = enab->dten_desc[i]->dted_probe;
11496 mutex_exit(&dtrace_lock);
11497 prv->dtpv_pops.dtps_provide(parg, &desc);
11498 mutex_enter(&dtrace_lock);
11501 } while (all && (prv = prv->dtpv_next) != NULL);
11503 mutex_exit(&dtrace_lock);
11504 dtrace_probe_provide(NULL, all ? NULL : prv);
11505 mutex_enter(&dtrace_lock);
11509 * DTrace DOF Functions
11513 dtrace_dof_error(dof_hdr_t *dof, const char *str)
11515 if (dtrace_err_verbose)
11516 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11518 #ifdef DTRACE_ERRDEBUG
11519 dtrace_errdebug(str);
11524 * Create DOF out of a currently enabled state. Right now, we only create
11525 * DOF containing the run-time options -- but this could be expanded to create
11526 * complete DOF representing the enabled state.
11529 dtrace_dof_create(dtrace_state_t *state)
11533 dof_optdesc_t *opt;
11534 int i, len = sizeof (dof_hdr_t) +
11535 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11536 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11538 ASSERT(MUTEX_HELD(&dtrace_lock));
11540 dof = kmem_zalloc(len, KM_SLEEP);
11541 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11542 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11543 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11544 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11546 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11547 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11548 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11549 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11550 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11551 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11553 dof->dofh_flags = 0;
11554 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11555 dof->dofh_secsize = sizeof (dof_sec_t);
11556 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11557 dof->dofh_secoff = sizeof (dof_hdr_t);
11558 dof->dofh_loadsz = len;
11559 dof->dofh_filesz = len;
11563 * Fill in the option section header...
11565 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11566 sec->dofs_type = DOF_SECT_OPTDESC;
11567 sec->dofs_align = sizeof (uint64_t);
11568 sec->dofs_flags = DOF_SECF_LOAD;
11569 sec->dofs_entsize = sizeof (dof_optdesc_t);
11571 opt = (dof_optdesc_t *)((uintptr_t)sec +
11572 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11574 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11575 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11577 for (i = 0; i < DTRACEOPT_MAX; i++) {
11578 opt[i].dofo_option = i;
11579 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11580 opt[i].dofo_value = state->dts_options[i];
11587 dtrace_dof_copyin(uintptr_t uarg, int *errp)
11589 dof_hdr_t hdr, *dof;
11591 ASSERT(!MUTEX_HELD(&dtrace_lock));
11594 * First, we're going to copyin() the sizeof (dof_hdr_t).
11596 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11597 dtrace_dof_error(NULL, "failed to copyin DOF header");
11603 * Now we'll allocate the entire DOF and copy it in -- provided
11604 * that the length isn't outrageous.
11606 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11607 dtrace_dof_error(&hdr, "load size exceeds maximum");
11612 if (hdr.dofh_loadsz < sizeof (hdr)) {
11613 dtrace_dof_error(&hdr, "invalid load size");
11618 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11620 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) {
11621 kmem_free(dof, hdr.dofh_loadsz);
11630 static __inline uchar_t
11631 dtrace_dof_char(char c) {
11650 return (c - 'A' + 10);
11657 return (c - 'a' + 10);
11659 /* Should not reach here. */
11665 dtrace_dof_property(const char *name)
11669 unsigned int len, i;
11674 * Unfortunately, array of values in .conf files are always (and
11675 * only) interpreted to be integer arrays. We must read our DOF
11676 * as an integer array, and then squeeze it into a byte array.
11678 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11679 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11682 for (i = 0; i < len; i++)
11683 buf[i] = (uchar_t)(((int *)buf)[i]);
11685 if (len < sizeof (dof_hdr_t)) {
11686 ddi_prop_free(buf);
11687 dtrace_dof_error(NULL, "truncated header");
11691 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11692 ddi_prop_free(buf);
11693 dtrace_dof_error(NULL, "truncated DOF");
11697 if (loadsz >= dtrace_dof_maxsize) {
11698 ddi_prop_free(buf);
11699 dtrace_dof_error(NULL, "oversized DOF");
11703 dof = kmem_alloc(loadsz, KM_SLEEP);
11704 bcopy(buf, dof, loadsz);
11705 ddi_prop_free(buf);
11710 if ((p_env = getenv(name)) == NULL)
11713 len = strlen(p_env) / 2;
11715 buf = kmem_alloc(len, KM_SLEEP);
11717 dof = (dof_hdr_t *) buf;
11721 for (i = 0; i < len; i++) {
11722 buf[i] = (dtrace_dof_char(p[0]) << 4) |
11723 dtrace_dof_char(p[1]);
11729 if (len < sizeof (dof_hdr_t)) {
11731 dtrace_dof_error(NULL, "truncated header");
11735 if (len < (loadsz = dof->dofh_loadsz)) {
11737 dtrace_dof_error(NULL, "truncated DOF");
11741 if (loadsz >= dtrace_dof_maxsize) {
11743 dtrace_dof_error(NULL, "oversized DOF");
11752 dtrace_dof_destroy(dof_hdr_t *dof)
11754 kmem_free(dof, dof->dofh_loadsz);
11758 * Return the dof_sec_t pointer corresponding to a given section index. If the
11759 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11760 * a type other than DOF_SECT_NONE is specified, the header is checked against
11761 * this type and NULL is returned if the types do not match.
11764 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11766 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11767 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11769 if (i >= dof->dofh_secnum) {
11770 dtrace_dof_error(dof, "referenced section index is invalid");
11774 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11775 dtrace_dof_error(dof, "referenced section is not loadable");
11779 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11780 dtrace_dof_error(dof, "referenced section is the wrong type");
11787 static dtrace_probedesc_t *
11788 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11790 dof_probedesc_t *probe;
11792 uintptr_t daddr = (uintptr_t)dof;
11796 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11797 dtrace_dof_error(dof, "invalid probe section");
11801 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11802 dtrace_dof_error(dof, "bad alignment in probe description");
11806 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11807 dtrace_dof_error(dof, "truncated probe description");
11811 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11812 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11814 if (strtab == NULL)
11817 str = daddr + strtab->dofs_offset;
11818 size = strtab->dofs_size;
11820 if (probe->dofp_provider >= strtab->dofs_size) {
11821 dtrace_dof_error(dof, "corrupt probe provider");
11825 (void) strncpy(desc->dtpd_provider,
11826 (char *)(str + probe->dofp_provider),
11827 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11829 if (probe->dofp_mod >= strtab->dofs_size) {
11830 dtrace_dof_error(dof, "corrupt probe module");
11834 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11835 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11837 if (probe->dofp_func >= strtab->dofs_size) {
11838 dtrace_dof_error(dof, "corrupt probe function");
11842 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11843 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11845 if (probe->dofp_name >= strtab->dofs_size) {
11846 dtrace_dof_error(dof, "corrupt probe name");
11850 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11851 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11856 static dtrace_difo_t *
11857 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11862 dof_difohdr_t *dofd;
11863 uintptr_t daddr = (uintptr_t)dof;
11864 size_t max = dtrace_difo_maxsize;
11867 static const struct {
11875 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11876 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11877 sizeof (dif_instr_t), "multiple DIF sections" },
11879 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11880 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11881 sizeof (uint64_t), "multiple integer tables" },
11883 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11884 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11885 sizeof (char), "multiple string tables" },
11887 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11888 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11889 sizeof (uint_t), "multiple variable tables" },
11891 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
11894 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11895 dtrace_dof_error(dof, "invalid DIFO header section");
11899 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11900 dtrace_dof_error(dof, "bad alignment in DIFO header");
11904 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11905 sec->dofs_size % sizeof (dof_secidx_t)) {
11906 dtrace_dof_error(dof, "bad size in DIFO header");
11910 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11911 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11913 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11914 dp->dtdo_rtype = dofd->dofd_rtype;
11916 for (l = 0; l < n; l++) {
11921 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11922 dofd->dofd_links[l])) == NULL)
11923 goto err; /* invalid section link */
11925 if (ttl + subsec->dofs_size > max) {
11926 dtrace_dof_error(dof, "exceeds maximum size");
11930 ttl += subsec->dofs_size;
11932 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11933 if (subsec->dofs_type != difo[i].section)
11936 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11937 dtrace_dof_error(dof, "section not loaded");
11941 if (subsec->dofs_align != difo[i].align) {
11942 dtrace_dof_error(dof, "bad alignment");
11946 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11947 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11949 if (*bufp != NULL) {
11950 dtrace_dof_error(dof, difo[i].msg);
11954 if (difo[i].entsize != subsec->dofs_entsize) {
11955 dtrace_dof_error(dof, "entry size mismatch");
11959 if (subsec->dofs_entsize != 0 &&
11960 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11961 dtrace_dof_error(dof, "corrupt entry size");
11965 *lenp = subsec->dofs_size;
11966 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11967 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11968 *bufp, subsec->dofs_size);
11970 if (subsec->dofs_entsize != 0)
11971 *lenp /= subsec->dofs_entsize;
11977 * If we encounter a loadable DIFO sub-section that is not
11978 * known to us, assume this is a broken program and fail.
11980 if (difo[i].section == DOF_SECT_NONE &&
11981 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11982 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11987 if (dp->dtdo_buf == NULL) {
11989 * We can't have a DIF object without DIF text.
11991 dtrace_dof_error(dof, "missing DIF text");
11996 * Before we validate the DIF object, run through the variable table
11997 * looking for the strings -- if any of their size are under, we'll set
11998 * their size to be the system-wide default string size. Note that
11999 * this should _not_ happen if the "strsize" option has been set --
12000 * in this case, the compiler should have set the size to reflect the
12001 * setting of the option.
12003 for (i = 0; i < dp->dtdo_varlen; i++) {
12004 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12005 dtrace_diftype_t *t = &v->dtdv_type;
12007 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12010 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12011 t->dtdt_size = dtrace_strsize_default;
12014 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12017 dtrace_difo_init(dp, vstate);
12021 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12022 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12023 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12024 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12026 kmem_free(dp, sizeof (dtrace_difo_t));
12030 static dtrace_predicate_t *
12031 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12036 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12039 return (dtrace_predicate_create(dp));
12042 static dtrace_actdesc_t *
12043 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12046 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12047 dof_actdesc_t *desc;
12048 dof_sec_t *difosec;
12050 uintptr_t daddr = (uintptr_t)dof;
12052 dtrace_actkind_t kind;
12054 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12055 dtrace_dof_error(dof, "invalid action section");
12059 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12060 dtrace_dof_error(dof, "truncated action description");
12064 if (sec->dofs_align != sizeof (uint64_t)) {
12065 dtrace_dof_error(dof, "bad alignment in action description");
12069 if (sec->dofs_size < sec->dofs_entsize) {
12070 dtrace_dof_error(dof, "section entry size exceeds total size");
12074 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12075 dtrace_dof_error(dof, "bad entry size in action description");
12079 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12080 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12084 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12085 desc = (dof_actdesc_t *)(daddr +
12086 (uintptr_t)sec->dofs_offset + offs);
12087 kind = (dtrace_actkind_t)desc->dofa_kind;
12089 if (DTRACEACT_ISPRINTFLIKE(kind) &&
12090 (kind != DTRACEACT_PRINTA ||
12091 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12097 * printf()-like actions must have a format string.
12099 if ((strtab = dtrace_dof_sect(dof,
12100 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12103 str = (char *)((uintptr_t)dof +
12104 (uintptr_t)strtab->dofs_offset);
12106 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12107 if (str[i] == '\0')
12111 if (i >= strtab->dofs_size) {
12112 dtrace_dof_error(dof, "bogus format string");
12116 if (i == desc->dofa_arg) {
12117 dtrace_dof_error(dof, "empty format string");
12121 i -= desc->dofa_arg;
12122 fmt = kmem_alloc(i + 1, KM_SLEEP);
12123 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12124 arg = (uint64_t)(uintptr_t)fmt;
12126 if (kind == DTRACEACT_PRINTA) {
12127 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12130 arg = desc->dofa_arg;
12134 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12135 desc->dofa_uarg, arg);
12137 if (last != NULL) {
12138 last->dtad_next = act;
12145 if (desc->dofa_difo == DOF_SECIDX_NONE)
12148 if ((difosec = dtrace_dof_sect(dof,
12149 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12152 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12154 if (act->dtad_difo == NULL)
12158 ASSERT(first != NULL);
12162 for (act = first; act != NULL; act = next) {
12163 next = act->dtad_next;
12164 dtrace_actdesc_release(act, vstate);
12170 static dtrace_ecbdesc_t *
12171 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12174 dtrace_ecbdesc_t *ep;
12175 dof_ecbdesc_t *ecb;
12176 dtrace_probedesc_t *desc;
12177 dtrace_predicate_t *pred = NULL;
12179 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12180 dtrace_dof_error(dof, "truncated ECB description");
12184 if (sec->dofs_align != sizeof (uint64_t)) {
12185 dtrace_dof_error(dof, "bad alignment in ECB description");
12189 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12190 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12195 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12196 ep->dted_uarg = ecb->dofe_uarg;
12197 desc = &ep->dted_probe;
12199 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12202 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12203 if ((sec = dtrace_dof_sect(dof,
12204 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12207 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12210 ep->dted_pred.dtpdd_predicate = pred;
12213 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12214 if ((sec = dtrace_dof_sect(dof,
12215 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12218 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12220 if (ep->dted_action == NULL)
12228 dtrace_predicate_release(pred, vstate);
12229 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12234 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12235 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12236 * site of any user SETX relocations to account for load object base address.
12237 * In the future, if we need other relocations, this function can be extended.
12240 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12242 uintptr_t daddr = (uintptr_t)dof;
12243 dof_relohdr_t *dofr =
12244 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12245 dof_sec_t *ss, *rs, *ts;
12249 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12250 sec->dofs_align != sizeof (dof_secidx_t)) {
12251 dtrace_dof_error(dof, "invalid relocation header");
12255 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12256 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12257 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12259 if (ss == NULL || rs == NULL || ts == NULL)
12260 return (-1); /* dtrace_dof_error() has been called already */
12262 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12263 rs->dofs_align != sizeof (uint64_t)) {
12264 dtrace_dof_error(dof, "invalid relocation section");
12268 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12269 n = rs->dofs_size / rs->dofs_entsize;
12271 for (i = 0; i < n; i++) {
12272 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12274 switch (r->dofr_type) {
12275 case DOF_RELO_NONE:
12277 case DOF_RELO_SETX:
12278 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12279 sizeof (uint64_t) > ts->dofs_size) {
12280 dtrace_dof_error(dof, "bad relocation offset");
12284 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12285 dtrace_dof_error(dof, "misaligned setx relo");
12289 *(uint64_t *)taddr += ubase;
12292 dtrace_dof_error(dof, "invalid relocation type");
12296 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12303 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12304 * header: it should be at the front of a memory region that is at least
12305 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12306 * size. It need not be validated in any other way.
12309 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12310 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12312 uint64_t len = dof->dofh_loadsz, seclen;
12313 uintptr_t daddr = (uintptr_t)dof;
12314 dtrace_ecbdesc_t *ep;
12315 dtrace_enabling_t *enab;
12318 ASSERT(MUTEX_HELD(&dtrace_lock));
12319 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12322 * Check the DOF header identification bytes. In addition to checking
12323 * valid settings, we also verify that unused bits/bytes are zeroed so
12324 * we can use them later without fear of regressing existing binaries.
12326 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12327 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12328 dtrace_dof_error(dof, "DOF magic string mismatch");
12332 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12333 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12334 dtrace_dof_error(dof, "DOF has invalid data model");
12338 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12339 dtrace_dof_error(dof, "DOF encoding mismatch");
12343 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12344 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12345 dtrace_dof_error(dof, "DOF version mismatch");
12349 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12350 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12354 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12355 dtrace_dof_error(dof, "DOF uses too many integer registers");
12359 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12360 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12364 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12365 if (dof->dofh_ident[i] != 0) {
12366 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12371 if (dof->dofh_flags & ~DOF_FL_VALID) {
12372 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12376 if (dof->dofh_secsize == 0) {
12377 dtrace_dof_error(dof, "zero section header size");
12382 * Check that the section headers don't exceed the amount of DOF
12383 * data. Note that we cast the section size and number of sections
12384 * to uint64_t's to prevent possible overflow in the multiplication.
12386 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12388 if (dof->dofh_secoff > len || seclen > len ||
12389 dof->dofh_secoff + seclen > len) {
12390 dtrace_dof_error(dof, "truncated section headers");
12394 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12395 dtrace_dof_error(dof, "misaligned section headers");
12399 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12400 dtrace_dof_error(dof, "misaligned section size");
12405 * Take an initial pass through the section headers to be sure that
12406 * the headers don't have stray offsets. If the 'noprobes' flag is
12407 * set, do not permit sections relating to providers, probes, or args.
12409 for (i = 0; i < dof->dofh_secnum; i++) {
12410 dof_sec_t *sec = (dof_sec_t *)(daddr +
12411 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12414 switch (sec->dofs_type) {
12415 case DOF_SECT_PROVIDER:
12416 case DOF_SECT_PROBES:
12417 case DOF_SECT_PRARGS:
12418 case DOF_SECT_PROFFS:
12419 dtrace_dof_error(dof, "illegal sections "
12425 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12426 continue; /* just ignore non-loadable sections */
12428 if (sec->dofs_align & (sec->dofs_align - 1)) {
12429 dtrace_dof_error(dof, "bad section alignment");
12433 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12434 dtrace_dof_error(dof, "misaligned section");
12438 if (sec->dofs_offset > len || sec->dofs_size > len ||
12439 sec->dofs_offset + sec->dofs_size > len) {
12440 dtrace_dof_error(dof, "corrupt section header");
12444 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12445 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12446 dtrace_dof_error(dof, "non-terminating string table");
12452 * Take a second pass through the sections and locate and perform any
12453 * relocations that are present. We do this after the first pass to
12454 * be sure that all sections have had their headers validated.
12456 for (i = 0; i < dof->dofh_secnum; i++) {
12457 dof_sec_t *sec = (dof_sec_t *)(daddr +
12458 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12460 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12461 continue; /* skip sections that are not loadable */
12463 switch (sec->dofs_type) {
12464 case DOF_SECT_URELHDR:
12465 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12471 if ((enab = *enabp) == NULL)
12472 enab = *enabp = dtrace_enabling_create(vstate);
12474 for (i = 0; i < dof->dofh_secnum; i++) {
12475 dof_sec_t *sec = (dof_sec_t *)(daddr +
12476 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12478 if (sec->dofs_type != DOF_SECT_ECBDESC)
12481 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12482 dtrace_enabling_destroy(enab);
12487 dtrace_enabling_add(enab, ep);
12494 * Process DOF for any options. This routine assumes that the DOF has been
12495 * at least processed by dtrace_dof_slurp().
12498 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12503 dof_optdesc_t *desc;
12505 for (i = 0; i < dof->dofh_secnum; i++) {
12506 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12507 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12509 if (sec->dofs_type != DOF_SECT_OPTDESC)
12512 if (sec->dofs_align != sizeof (uint64_t)) {
12513 dtrace_dof_error(dof, "bad alignment in "
12514 "option description");
12518 if ((entsize = sec->dofs_entsize) == 0) {
12519 dtrace_dof_error(dof, "zeroed option entry size");
12523 if (entsize < sizeof (dof_optdesc_t)) {
12524 dtrace_dof_error(dof, "bad option entry size");
12528 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12529 desc = (dof_optdesc_t *)((uintptr_t)dof +
12530 (uintptr_t)sec->dofs_offset + offs);
12532 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12533 dtrace_dof_error(dof, "non-zero option string");
12537 if (desc->dofo_value == DTRACEOPT_UNSET) {
12538 dtrace_dof_error(dof, "unset option");
12542 if ((rval = dtrace_state_option(state,
12543 desc->dofo_option, desc->dofo_value)) != 0) {
12544 dtrace_dof_error(dof, "rejected option");
12554 * DTrace Consumer State Functions
12557 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12559 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12562 dtrace_dynvar_t *dvar, *next, *start;
12565 ASSERT(MUTEX_HELD(&dtrace_lock));
12566 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12568 bzero(dstate, sizeof (dtrace_dstate_t));
12570 if ((dstate->dtds_chunksize = chunksize) == 0)
12571 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12573 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12576 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12579 dstate->dtds_size = size;
12580 dstate->dtds_base = base;
12581 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12582 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12584 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12586 if (hashsize != 1 && (hashsize & 1))
12589 dstate->dtds_hashsize = hashsize;
12590 dstate->dtds_hash = dstate->dtds_base;
12593 * Set all of our hash buckets to point to the single sink, and (if
12594 * it hasn't already been set), set the sink's hash value to be the
12595 * sink sentinel value. The sink is needed for dynamic variable
12596 * lookups to know that they have iterated over an entire, valid hash
12599 for (i = 0; i < hashsize; i++)
12600 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12602 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12603 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12606 * Determine number of active CPUs. Divide free list evenly among
12609 start = (dtrace_dynvar_t *)
12610 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12611 limit = (uintptr_t)base + size;
12613 maxper = (limit - (uintptr_t)start) / NCPU;
12614 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12616 for (i = 0; i < NCPU; i++) {
12621 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12624 * If we don't even have enough chunks to make it once through
12625 * NCPUs, we're just going to allocate everything to the first
12626 * CPU. And if we're on the last CPU, we're going to allocate
12627 * whatever is left over. In either case, we set the limit to
12628 * be the limit of the dynamic variable space.
12630 if (maxper == 0 || i == NCPU - 1) {
12631 limit = (uintptr_t)base + size;
12634 limit = (uintptr_t)start + maxper;
12635 start = (dtrace_dynvar_t *)limit;
12638 ASSERT(limit <= (uintptr_t)base + size);
12641 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12642 dstate->dtds_chunksize);
12644 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12647 dvar->dtdv_next = next;
12659 dtrace_dstate_fini(dtrace_dstate_t *dstate)
12661 ASSERT(MUTEX_HELD(&cpu_lock));
12663 if (dstate->dtds_base == NULL)
12666 kmem_free(dstate->dtds_base, dstate->dtds_size);
12667 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12671 dtrace_vstate_fini(dtrace_vstate_t *vstate)
12674 * Logical XOR, where are you?
12676 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12678 if (vstate->dtvs_nglobals > 0) {
12679 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12680 sizeof (dtrace_statvar_t *));
12683 if (vstate->dtvs_ntlocals > 0) {
12684 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12685 sizeof (dtrace_difv_t));
12688 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12690 if (vstate->dtvs_nlocals > 0) {
12691 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12692 sizeof (dtrace_statvar_t *));
12698 dtrace_state_clean(dtrace_state_t *state)
12700 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12703 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12704 dtrace_speculation_clean(state);
12708 dtrace_state_deadman(dtrace_state_t *state)
12714 now = dtrace_gethrtime();
12716 if (state != dtrace_anon.dta_state &&
12717 now - state->dts_laststatus >= dtrace_deadman_user)
12721 * We must be sure that dts_alive never appears to be less than the
12722 * value upon entry to dtrace_state_deadman(), and because we lack a
12723 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12724 * store INT64_MAX to it, followed by a memory barrier, followed by
12725 * the new value. This assures that dts_alive never appears to be
12726 * less than its true value, regardless of the order in which the
12727 * stores to the underlying storage are issued.
12729 state->dts_alive = INT64_MAX;
12730 dtrace_membar_producer();
12731 state->dts_alive = now;
12735 dtrace_state_clean(void *arg)
12737 dtrace_state_t *state = arg;
12738 dtrace_optval_t *opt = state->dts_options;
12740 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12743 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12744 dtrace_speculation_clean(state);
12746 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
12747 dtrace_state_clean, state);
12751 dtrace_state_deadman(void *arg)
12753 dtrace_state_t *state = arg;
12758 dtrace_debug_output();
12760 now = dtrace_gethrtime();
12762 if (state != dtrace_anon.dta_state &&
12763 now - state->dts_laststatus >= dtrace_deadman_user)
12767 * We must be sure that dts_alive never appears to be less than the
12768 * value upon entry to dtrace_state_deadman(), and because we lack a
12769 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12770 * store INT64_MAX to it, followed by a memory barrier, followed by
12771 * the new value. This assures that dts_alive never appears to be
12772 * less than its true value, regardless of the order in which the
12773 * stores to the underlying storage are issued.
12775 state->dts_alive = INT64_MAX;
12776 dtrace_membar_producer();
12777 state->dts_alive = now;
12779 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
12780 dtrace_state_deadman, state);
12784 static dtrace_state_t *
12786 dtrace_state_create(dev_t *devp, cred_t *cr)
12788 dtrace_state_create(struct cdev *dev)
12799 dtrace_state_t *state;
12800 dtrace_optval_t *opt;
12801 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12803 ASSERT(MUTEX_HELD(&dtrace_lock));
12804 ASSERT(MUTEX_HELD(&cpu_lock));
12807 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12808 VM_BESTFIT | VM_SLEEP);
12810 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12811 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12815 state = ddi_get_soft_state(dtrace_softstate, minor);
12822 /* Allocate memory for the state. */
12823 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
12826 state->dts_epid = DTRACE_EPIDNONE + 1;
12828 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
12830 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12831 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12833 if (devp != NULL) {
12834 major = getemajor(*devp);
12836 major = ddi_driver_major(dtrace_devi);
12839 state->dts_dev = makedevice(major, minor);
12842 *devp = state->dts_dev;
12844 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
12845 state->dts_dev = dev;
12849 * We allocate NCPU buffers. On the one hand, this can be quite
12850 * a bit of memory per instance (nearly 36K on a Starcat). On the
12851 * other hand, it saves an additional memory reference in the probe
12854 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12855 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12858 state->dts_cleaner = CYCLIC_NONE;
12859 state->dts_deadman = CYCLIC_NONE;
12861 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE);
12862 callout_init(&state->dts_deadman, CALLOUT_MPSAFE);
12864 state->dts_vstate.dtvs_state = state;
12866 for (i = 0; i < DTRACEOPT_MAX; i++)
12867 state->dts_options[i] = DTRACEOPT_UNSET;
12870 * Set the default options.
12872 opt = state->dts_options;
12873 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12874 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12875 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12876 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12877 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12878 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12879 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12880 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12881 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12882 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12883 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12884 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12885 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12886 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12888 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12891 * Depending on the user credentials, we set flag bits which alter probe
12892 * visibility or the amount of destructiveness allowed. In the case of
12893 * actual anonymous tracing, or the possession of all privileges, all of
12894 * the normal checks are bypassed.
12896 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12897 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12898 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12901 * Set up the credentials for this instantiation. We take a
12902 * hold on the credential to prevent it from disappearing on
12903 * us; this in turn prevents the zone_t referenced by this
12904 * credential from disappearing. This means that we can
12905 * examine the credential and the zone from probe context.
12908 state->dts_cred.dcr_cred = cr;
12911 * CRA_PROC means "we have *some* privilege for dtrace" and
12912 * unlocks the use of variables like pid, zonename, etc.
12914 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12915 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12916 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12920 * dtrace_user allows use of syscall and profile providers.
12921 * If the user also has proc_owner and/or proc_zone, we
12922 * extend the scope to include additional visibility and
12923 * destructive power.
12925 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12926 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12927 state->dts_cred.dcr_visible |=
12928 DTRACE_CRV_ALLPROC;
12930 state->dts_cred.dcr_action |=
12931 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12934 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12935 state->dts_cred.dcr_visible |=
12936 DTRACE_CRV_ALLZONE;
12938 state->dts_cred.dcr_action |=
12939 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12943 * If we have all privs in whatever zone this is,
12944 * we can do destructive things to processes which
12945 * have altered credentials.
12948 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12949 cr->cr_zone->zone_privset)) {
12950 state->dts_cred.dcr_action |=
12951 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12957 * Holding the dtrace_kernel privilege also implies that
12958 * the user has the dtrace_user privilege from a visibility
12959 * perspective. But without further privileges, some
12960 * destructive actions are not available.
12962 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12964 * Make all probes in all zones visible. However,
12965 * this doesn't mean that all actions become available
12968 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12969 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12971 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12974 * Holding proc_owner means that destructive actions
12975 * for *this* zone are allowed.
12977 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12978 state->dts_cred.dcr_action |=
12979 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12982 * Holding proc_zone means that destructive actions
12983 * for this user/group ID in all zones is allowed.
12985 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12986 state->dts_cred.dcr_action |=
12987 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12991 * If we have all privs in whatever zone this is,
12992 * we can do destructive things to processes which
12993 * have altered credentials.
12995 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12996 cr->cr_zone->zone_privset)) {
12997 state->dts_cred.dcr_action |=
12998 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13004 * Holding the dtrace_proc privilege gives control over fasttrap
13005 * and pid providers. We need to grant wider destructive
13006 * privileges in the event that the user has proc_owner and/or
13009 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13010 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13011 state->dts_cred.dcr_action |=
13012 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13014 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13015 state->dts_cred.dcr_action |=
13016 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13024 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13026 dtrace_optval_t *opt = state->dts_options, size;
13027 processorid_t cpu = 0;;
13028 int flags = 0, rval;
13030 ASSERT(MUTEX_HELD(&dtrace_lock));
13031 ASSERT(MUTEX_HELD(&cpu_lock));
13032 ASSERT(which < DTRACEOPT_MAX);
13033 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13034 (state == dtrace_anon.dta_state &&
13035 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13037 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13040 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13041 cpu = opt[DTRACEOPT_CPU];
13043 if (which == DTRACEOPT_SPECSIZE)
13044 flags |= DTRACEBUF_NOSWITCH;
13046 if (which == DTRACEOPT_BUFSIZE) {
13047 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13048 flags |= DTRACEBUF_RING;
13050 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13051 flags |= DTRACEBUF_FILL;
13053 if (state != dtrace_anon.dta_state ||
13054 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13055 flags |= DTRACEBUF_INACTIVE;
13058 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
13060 * The size must be 8-byte aligned. If the size is not 8-byte
13061 * aligned, drop it down by the difference.
13063 if (size & (sizeof (uint64_t) - 1))
13064 size -= size & (sizeof (uint64_t) - 1);
13066 if (size < state->dts_reserve) {
13068 * Buffers always must be large enough to accommodate
13069 * their prereserved space. We return E2BIG instead
13070 * of ENOMEM in this case to allow for user-level
13071 * software to differentiate the cases.
13076 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
13078 if (rval != ENOMEM) {
13083 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13091 dtrace_state_buffers(dtrace_state_t *state)
13093 dtrace_speculation_t *spec = state->dts_speculations;
13096 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13097 DTRACEOPT_BUFSIZE)) != 0)
13100 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13101 DTRACEOPT_AGGSIZE)) != 0)
13104 for (i = 0; i < state->dts_nspeculations; i++) {
13105 if ((rval = dtrace_state_buffer(state,
13106 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13114 dtrace_state_prereserve(dtrace_state_t *state)
13117 dtrace_probe_t *probe;
13119 state->dts_reserve = 0;
13121 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13125 * If our buffer policy is a "fill" buffer policy, we need to set the
13126 * prereserved space to be the space required by the END probes.
13128 probe = dtrace_probes[dtrace_probeid_end - 1];
13129 ASSERT(probe != NULL);
13131 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13132 if (ecb->dte_state != state)
13135 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13140 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13142 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13143 dtrace_speculation_t *spec;
13144 dtrace_buffer_t *buf;
13146 cyc_handler_t hdlr;
13149 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13150 dtrace_icookie_t cookie;
13152 mutex_enter(&cpu_lock);
13153 mutex_enter(&dtrace_lock);
13155 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13161 * Before we can perform any checks, we must prime all of the
13162 * retained enablings that correspond to this state.
13164 dtrace_enabling_prime(state);
13166 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13171 dtrace_state_prereserve(state);
13174 * Now we want to do is try to allocate our speculations.
13175 * We do not automatically resize the number of speculations; if
13176 * this fails, we will fail the operation.
13178 nspec = opt[DTRACEOPT_NSPEC];
13179 ASSERT(nspec != DTRACEOPT_UNSET);
13181 if (nspec > INT_MAX) {
13186 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
13188 if (spec == NULL) {
13193 state->dts_speculations = spec;
13194 state->dts_nspeculations = (int)nspec;
13196 for (i = 0; i < nspec; i++) {
13197 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
13202 spec[i].dtsp_buffer = buf;
13205 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13206 if (dtrace_anon.dta_state == NULL) {
13211 if (state->dts_necbs != 0) {
13216 state->dts_anon = dtrace_anon_grab();
13217 ASSERT(state->dts_anon != NULL);
13218 state = state->dts_anon;
13221 * We want "grabanon" to be set in the grabbed state, so we'll
13222 * copy that option value from the grabbing state into the
13225 state->dts_options[DTRACEOPT_GRABANON] =
13226 opt[DTRACEOPT_GRABANON];
13228 *cpu = dtrace_anon.dta_beganon;
13231 * If the anonymous state is active (as it almost certainly
13232 * is if the anonymous enabling ultimately matched anything),
13233 * we don't allow any further option processing -- but we
13234 * don't return failure.
13236 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13240 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13241 opt[DTRACEOPT_AGGSIZE] != 0) {
13242 if (state->dts_aggregations == NULL) {
13244 * We're not going to create an aggregation buffer
13245 * because we don't have any ECBs that contain
13246 * aggregations -- set this option to 0.
13248 opt[DTRACEOPT_AGGSIZE] = 0;
13251 * If we have an aggregation buffer, we must also have
13252 * a buffer to use as scratch.
13254 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13255 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13256 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13261 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13262 opt[DTRACEOPT_SPECSIZE] != 0) {
13263 if (!state->dts_speculates) {
13265 * We're not going to create speculation buffers
13266 * because we don't have any ECBs that actually
13267 * speculate -- set the speculation size to 0.
13269 opt[DTRACEOPT_SPECSIZE] = 0;
13274 * The bare minimum size for any buffer that we're actually going to
13275 * do anything to is sizeof (uint64_t).
13277 sz = sizeof (uint64_t);
13279 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13280 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13281 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13283 * A buffer size has been explicitly set to 0 (or to a size
13284 * that will be adjusted to 0) and we need the space -- we
13285 * need to return failure. We return ENOSPC to differentiate
13286 * it from failing to allocate a buffer due to failure to meet
13287 * the reserve (for which we return E2BIG).
13293 if ((rval = dtrace_state_buffers(state)) != 0)
13296 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13297 sz = dtrace_dstate_defsize;
13300 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13305 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13307 } while (sz >>= 1);
13309 opt[DTRACEOPT_DYNVARSIZE] = sz;
13314 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13315 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13317 if (opt[DTRACEOPT_CLEANRATE] == 0)
13318 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13320 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13321 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13323 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13324 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13326 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13328 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13329 hdlr.cyh_arg = state;
13330 hdlr.cyh_level = CY_LOW_LEVEL;
13333 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13335 state->dts_cleaner = cyclic_add(&hdlr, &when);
13337 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13338 hdlr.cyh_arg = state;
13339 hdlr.cyh_level = CY_LOW_LEVEL;
13342 when.cyt_interval = dtrace_deadman_interval;
13344 state->dts_deadman = cyclic_add(&hdlr, &when);
13346 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
13347 dtrace_state_clean, state);
13348 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
13349 dtrace_state_deadman, state);
13352 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13355 * Now it's time to actually fire the BEGIN probe. We need to disable
13356 * interrupts here both to record the CPU on which we fired the BEGIN
13357 * probe (the data from this CPU will be processed first at user
13358 * level) and to manually activate the buffer for this CPU.
13360 cookie = dtrace_interrupt_disable();
13362 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13363 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13365 dtrace_probe(dtrace_probeid_begin,
13366 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13367 dtrace_interrupt_enable(cookie);
13369 * We may have had an exit action from a BEGIN probe; only change our
13370 * state to ACTIVE if we're still in WARMUP.
13372 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13373 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13375 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13376 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13379 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13380 * want each CPU to transition its principal buffer out of the
13381 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13382 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13383 * atomically transition from processing none of a state's ECBs to
13384 * processing all of them.
13386 dtrace_xcall(DTRACE_CPUALL,
13387 (dtrace_xcall_t)dtrace_buffer_activate, state);
13391 dtrace_buffer_free(state->dts_buffer);
13392 dtrace_buffer_free(state->dts_aggbuffer);
13394 if ((nspec = state->dts_nspeculations) == 0) {
13395 ASSERT(state->dts_speculations == NULL);
13399 spec = state->dts_speculations;
13400 ASSERT(spec != NULL);
13402 for (i = 0; i < state->dts_nspeculations; i++) {
13403 if ((buf = spec[i].dtsp_buffer) == NULL)
13406 dtrace_buffer_free(buf);
13407 kmem_free(buf, bufsize);
13410 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13411 state->dts_nspeculations = 0;
13412 state->dts_speculations = NULL;
13415 mutex_exit(&dtrace_lock);
13416 mutex_exit(&cpu_lock);
13422 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13424 dtrace_icookie_t cookie;
13426 ASSERT(MUTEX_HELD(&dtrace_lock));
13428 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13429 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13433 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13434 * to be sure that every CPU has seen it. See below for the details
13435 * on why this is done.
13437 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13441 * By this point, it is impossible for any CPU to be still processing
13442 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13443 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13444 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13445 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13446 * iff we're in the END probe.
13448 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13450 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13453 * Finally, we can release the reserve and call the END probe. We
13454 * disable interrupts across calling the END probe to allow us to
13455 * return the CPU on which we actually called the END probe. This
13456 * allows user-land to be sure that this CPU's principal buffer is
13459 state->dts_reserve = 0;
13461 cookie = dtrace_interrupt_disable();
13463 dtrace_probe(dtrace_probeid_end,
13464 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13465 dtrace_interrupt_enable(cookie);
13467 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13474 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13475 dtrace_optval_t val)
13477 ASSERT(MUTEX_HELD(&dtrace_lock));
13479 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13482 if (option >= DTRACEOPT_MAX)
13485 if (option != DTRACEOPT_CPU && val < 0)
13489 case DTRACEOPT_DESTRUCTIVE:
13490 if (dtrace_destructive_disallow)
13493 state->dts_cred.dcr_destructive = 1;
13496 case DTRACEOPT_BUFSIZE:
13497 case DTRACEOPT_DYNVARSIZE:
13498 case DTRACEOPT_AGGSIZE:
13499 case DTRACEOPT_SPECSIZE:
13500 case DTRACEOPT_STRSIZE:
13504 if (val >= LONG_MAX) {
13506 * If this is an otherwise negative value, set it to
13507 * the highest multiple of 128m less than LONG_MAX.
13508 * Technically, we're adjusting the size without
13509 * regard to the buffer resizing policy, but in fact,
13510 * this has no effect -- if we set the buffer size to
13511 * ~LONG_MAX and the buffer policy is ultimately set to
13512 * be "manual", the buffer allocation is guaranteed to
13513 * fail, if only because the allocation requires two
13514 * buffers. (We set the the size to the highest
13515 * multiple of 128m because it ensures that the size
13516 * will remain a multiple of a megabyte when
13517 * repeatedly halved -- all the way down to 15m.)
13519 val = LONG_MAX - (1 << 27) + 1;
13523 state->dts_options[option] = val;
13529 dtrace_state_destroy(dtrace_state_t *state)
13532 dtrace_vstate_t *vstate = &state->dts_vstate;
13534 minor_t minor = getminor(state->dts_dev);
13536 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13537 dtrace_speculation_t *spec = state->dts_speculations;
13538 int nspec = state->dts_nspeculations;
13541 ASSERT(MUTEX_HELD(&dtrace_lock));
13542 ASSERT(MUTEX_HELD(&cpu_lock));
13545 * First, retract any retained enablings for this state.
13547 dtrace_enabling_retract(state);
13548 ASSERT(state->dts_nretained == 0);
13550 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13551 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13553 * We have managed to come into dtrace_state_destroy() on a
13554 * hot enabling -- almost certainly because of a disorderly
13555 * shutdown of a consumer. (That is, a consumer that is
13556 * exiting without having called dtrace_stop().) In this case,
13557 * we're going to set our activity to be KILLED, and then
13558 * issue a sync to be sure that everyone is out of probe
13559 * context before we start blowing away ECBs.
13561 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13566 * Release the credential hold we took in dtrace_state_create().
13568 if (state->dts_cred.dcr_cred != NULL)
13569 crfree(state->dts_cred.dcr_cred);
13572 * Now we can safely disable and destroy any enabled probes. Because
13573 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13574 * (especially if they're all enabled), we take two passes through the
13575 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13576 * in the second we disable whatever is left over.
13578 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13579 for (i = 0; i < state->dts_necbs; i++) {
13580 if ((ecb = state->dts_ecbs[i]) == NULL)
13583 if (match && ecb->dte_probe != NULL) {
13584 dtrace_probe_t *probe = ecb->dte_probe;
13585 dtrace_provider_t *prov = probe->dtpr_provider;
13587 if (!(prov->dtpv_priv.dtpp_flags & match))
13591 dtrace_ecb_disable(ecb);
13592 dtrace_ecb_destroy(ecb);
13600 * Before we free the buffers, perform one more sync to assure that
13601 * every CPU is out of probe context.
13605 dtrace_buffer_free(state->dts_buffer);
13606 dtrace_buffer_free(state->dts_aggbuffer);
13608 for (i = 0; i < nspec; i++)
13609 dtrace_buffer_free(spec[i].dtsp_buffer);
13612 if (state->dts_cleaner != CYCLIC_NONE)
13613 cyclic_remove(state->dts_cleaner);
13615 if (state->dts_deadman != CYCLIC_NONE)
13616 cyclic_remove(state->dts_deadman);
13618 callout_stop(&state->dts_cleaner);
13619 callout_stop(&state->dts_deadman);
13622 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13623 dtrace_vstate_fini(vstate);
13624 if (state->dts_ecbs != NULL)
13625 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13627 if (state->dts_aggregations != NULL) {
13629 for (i = 0; i < state->dts_naggregations; i++)
13630 ASSERT(state->dts_aggregations[i] == NULL);
13632 ASSERT(state->dts_naggregations > 0);
13633 kmem_free(state->dts_aggregations,
13634 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13637 kmem_free(state->dts_buffer, bufsize);
13638 kmem_free(state->dts_aggbuffer, bufsize);
13640 for (i = 0; i < nspec; i++)
13641 kmem_free(spec[i].dtsp_buffer, bufsize);
13644 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13646 dtrace_format_destroy(state);
13648 if (state->dts_aggid_arena != NULL) {
13650 vmem_destroy(state->dts_aggid_arena);
13652 delete_unrhdr(state->dts_aggid_arena);
13654 state->dts_aggid_arena = NULL;
13657 ddi_soft_state_free(dtrace_softstate, minor);
13658 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13663 * DTrace Anonymous Enabling Functions
13665 static dtrace_state_t *
13666 dtrace_anon_grab(void)
13668 dtrace_state_t *state;
13670 ASSERT(MUTEX_HELD(&dtrace_lock));
13672 if ((state = dtrace_anon.dta_state) == NULL) {
13673 ASSERT(dtrace_anon.dta_enabling == NULL);
13677 ASSERT(dtrace_anon.dta_enabling != NULL);
13678 ASSERT(dtrace_retained != NULL);
13680 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13681 dtrace_anon.dta_enabling = NULL;
13682 dtrace_anon.dta_state = NULL;
13688 dtrace_anon_property(void)
13691 dtrace_state_t *state;
13693 char c[32]; /* enough for "dof-data-" + digits */
13695 ASSERT(MUTEX_HELD(&dtrace_lock));
13696 ASSERT(MUTEX_HELD(&cpu_lock));
13698 for (i = 0; ; i++) {
13699 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13701 dtrace_err_verbose = 1;
13703 if ((dof = dtrace_dof_property(c)) == NULL) {
13704 dtrace_err_verbose = 0;
13710 * We want to create anonymous state, so we need to transition
13711 * the kernel debugger to indicate that DTrace is active. If
13712 * this fails (e.g. because the debugger has modified text in
13713 * some way), we won't continue with the processing.
13715 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13716 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13717 "enabling ignored.");
13718 dtrace_dof_destroy(dof);
13724 * If we haven't allocated an anonymous state, we'll do so now.
13726 if ((state = dtrace_anon.dta_state) == NULL) {
13728 state = dtrace_state_create(NULL, NULL);
13730 state = dtrace_state_create(NULL);
13732 dtrace_anon.dta_state = state;
13734 if (state == NULL) {
13736 * This basically shouldn't happen: the only
13737 * failure mode from dtrace_state_create() is a
13738 * failure of ddi_soft_state_zalloc() that
13739 * itself should never happen. Still, the
13740 * interface allows for a failure mode, and
13741 * we want to fail as gracefully as possible:
13742 * we'll emit an error message and cease
13743 * processing anonymous state in this case.
13745 cmn_err(CE_WARN, "failed to create "
13746 "anonymous state");
13747 dtrace_dof_destroy(dof);
13752 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13753 &dtrace_anon.dta_enabling, 0, B_TRUE);
13756 rv = dtrace_dof_options(dof, state);
13758 dtrace_err_verbose = 0;
13759 dtrace_dof_destroy(dof);
13763 * This is malformed DOF; chuck any anonymous state
13766 ASSERT(dtrace_anon.dta_enabling == NULL);
13767 dtrace_state_destroy(state);
13768 dtrace_anon.dta_state = NULL;
13772 ASSERT(dtrace_anon.dta_enabling != NULL);
13775 if (dtrace_anon.dta_enabling != NULL) {
13779 * dtrace_enabling_retain() can only fail because we are
13780 * trying to retain more enablings than are allowed -- but
13781 * we only have one anonymous enabling, and we are guaranteed
13782 * to be allowed at least one retained enabling; we assert
13783 * that dtrace_enabling_retain() returns success.
13785 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13788 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13794 * DTrace Helper Functions
13797 dtrace_helper_trace(dtrace_helper_action_t *helper,
13798 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13800 uint32_t size, next, nnext, i;
13801 dtrace_helptrace_t *ent;
13802 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
13804 if (!dtrace_helptrace_enabled)
13807 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
13810 * What would a tracing framework be without its own tracing
13811 * framework? (Well, a hell of a lot simpler, for starters...)
13813 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13814 sizeof (uint64_t) - sizeof (uint64_t);
13817 * Iterate until we can allocate a slot in the trace buffer.
13820 next = dtrace_helptrace_next;
13822 if (next + size < dtrace_helptrace_bufsize) {
13823 nnext = next + size;
13827 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13830 * We have our slot; fill it in.
13835 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13836 ent->dtht_helper = helper;
13837 ent->dtht_where = where;
13838 ent->dtht_nlocals = vstate->dtvs_nlocals;
13840 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13841 mstate->dtms_fltoffs : -1;
13842 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13843 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
13845 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13846 dtrace_statvar_t *svar;
13848 if ((svar = vstate->dtvs_locals[i]) == NULL)
13851 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13852 ent->dtht_locals[i] =
13853 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
13860 dtrace_helper(int which, dtrace_mstate_t *mstate,
13861 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13863 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
13864 uint64_t sarg0 = mstate->dtms_arg[0];
13865 uint64_t sarg1 = mstate->dtms_arg[1];
13867 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13868 dtrace_helper_action_t *helper;
13869 dtrace_vstate_t *vstate;
13870 dtrace_difo_t *pred;
13871 int i, trace = dtrace_helptrace_enabled;
13873 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13875 if (helpers == NULL)
13878 if ((helper = helpers->dthps_actions[which]) == NULL)
13881 vstate = &helpers->dthps_vstate;
13882 mstate->dtms_arg[0] = arg0;
13883 mstate->dtms_arg[1] = arg1;
13886 * Now iterate over each helper. If its predicate evaluates to 'true',
13887 * we'll call the corresponding actions. Note that the below calls
13888 * to dtrace_dif_emulate() may set faults in machine state. This is
13889 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13890 * the stored DIF offset with its own (which is the desired behavior).
13891 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13892 * from machine state; this is okay, too.
13894 for (; helper != NULL; helper = helper->dtha_next) {
13895 if ((pred = helper->dtha_predicate) != NULL) {
13897 dtrace_helper_trace(helper, mstate, vstate, 0);
13899 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13902 if (*flags & CPU_DTRACE_FAULT)
13906 for (i = 0; i < helper->dtha_nactions; i++) {
13908 dtrace_helper_trace(helper,
13909 mstate, vstate, i + 1);
13911 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13912 mstate, vstate, state);
13914 if (*flags & CPU_DTRACE_FAULT)
13920 dtrace_helper_trace(helper, mstate, vstate,
13921 DTRACE_HELPTRACE_NEXT);
13925 dtrace_helper_trace(helper, mstate, vstate,
13926 DTRACE_HELPTRACE_DONE);
13929 * Restore the arg0 that we saved upon entry.
13931 mstate->dtms_arg[0] = sarg0;
13932 mstate->dtms_arg[1] = sarg1;
13938 dtrace_helper_trace(helper, mstate, vstate,
13939 DTRACE_HELPTRACE_ERR);
13942 * Restore the arg0 that we saved upon entry.
13944 mstate->dtms_arg[0] = sarg0;
13945 mstate->dtms_arg[1] = sarg1;
13951 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13952 dtrace_vstate_t *vstate)
13956 if (helper->dtha_predicate != NULL)
13957 dtrace_difo_release(helper->dtha_predicate, vstate);
13959 for (i = 0; i < helper->dtha_nactions; i++) {
13960 ASSERT(helper->dtha_actions[i] != NULL);
13961 dtrace_difo_release(helper->dtha_actions[i], vstate);
13964 kmem_free(helper->dtha_actions,
13965 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13966 kmem_free(helper, sizeof (dtrace_helper_action_t));
13970 dtrace_helper_destroygen(int gen)
13972 proc_t *p = curproc;
13973 dtrace_helpers_t *help = p->p_dtrace_helpers;
13974 dtrace_vstate_t *vstate;
13977 ASSERT(MUTEX_HELD(&dtrace_lock));
13979 if (help == NULL || gen > help->dthps_generation)
13982 vstate = &help->dthps_vstate;
13984 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13985 dtrace_helper_action_t *last = NULL, *h, *next;
13987 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13988 next = h->dtha_next;
13990 if (h->dtha_generation == gen) {
13991 if (last != NULL) {
13992 last->dtha_next = next;
13994 help->dthps_actions[i] = next;
13997 dtrace_helper_action_destroy(h, vstate);
14005 * Interate until we've cleared out all helper providers with the
14006 * given generation number.
14009 dtrace_helper_provider_t *prov;
14012 * Look for a helper provider with the right generation. We
14013 * have to start back at the beginning of the list each time
14014 * because we drop dtrace_lock. It's unlikely that we'll make
14015 * more than two passes.
14017 for (i = 0; i < help->dthps_nprovs; i++) {
14018 prov = help->dthps_provs[i];
14020 if (prov->dthp_generation == gen)
14025 * If there were no matches, we're done.
14027 if (i == help->dthps_nprovs)
14031 * Move the last helper provider into this slot.
14033 help->dthps_nprovs--;
14034 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14035 help->dthps_provs[help->dthps_nprovs] = NULL;
14037 mutex_exit(&dtrace_lock);
14040 * If we have a meta provider, remove this helper provider.
14042 mutex_enter(&dtrace_meta_lock);
14043 if (dtrace_meta_pid != NULL) {
14044 ASSERT(dtrace_deferred_pid == NULL);
14045 dtrace_helper_provider_remove(&prov->dthp_prov,
14048 mutex_exit(&dtrace_meta_lock);
14050 dtrace_helper_provider_destroy(prov);
14052 mutex_enter(&dtrace_lock);
14061 dtrace_helper_validate(dtrace_helper_action_t *helper)
14066 if ((dp = helper->dtha_predicate) != NULL)
14067 err += dtrace_difo_validate_helper(dp);
14069 for (i = 0; i < helper->dtha_nactions; i++)
14070 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14078 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14080 dtrace_helpers_t *help;
14081 dtrace_helper_action_t *helper, *last;
14082 dtrace_actdesc_t *act;
14083 dtrace_vstate_t *vstate;
14084 dtrace_predicate_t *pred;
14085 int count = 0, nactions = 0, i;
14087 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14090 help = curproc->p_dtrace_helpers;
14091 last = help->dthps_actions[which];
14092 vstate = &help->dthps_vstate;
14094 for (count = 0; last != NULL; last = last->dtha_next) {
14096 if (last->dtha_next == NULL)
14101 * If we already have dtrace_helper_actions_max helper actions for this
14102 * helper action type, we'll refuse to add a new one.
14104 if (count >= dtrace_helper_actions_max)
14107 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14108 helper->dtha_generation = help->dthps_generation;
14110 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14111 ASSERT(pred->dtp_difo != NULL);
14112 dtrace_difo_hold(pred->dtp_difo);
14113 helper->dtha_predicate = pred->dtp_difo;
14116 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14117 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14120 if (act->dtad_difo == NULL)
14126 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14127 (helper->dtha_nactions = nactions), KM_SLEEP);
14129 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14130 dtrace_difo_hold(act->dtad_difo);
14131 helper->dtha_actions[i++] = act->dtad_difo;
14134 if (!dtrace_helper_validate(helper))
14137 if (last == NULL) {
14138 help->dthps_actions[which] = helper;
14140 last->dtha_next = helper;
14143 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14144 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14145 dtrace_helptrace_next = 0;
14150 dtrace_helper_action_destroy(helper, vstate);
14155 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14156 dof_helper_t *dofhp)
14158 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14160 mutex_enter(&dtrace_meta_lock);
14161 mutex_enter(&dtrace_lock);
14163 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14165 * If the dtrace module is loaded but not attached, or if
14166 * there aren't isn't a meta provider registered to deal with
14167 * these provider descriptions, we need to postpone creating
14168 * the actual providers until later.
14171 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14172 dtrace_deferred_pid != help) {
14173 help->dthps_deferred = 1;
14174 help->dthps_pid = p->p_pid;
14175 help->dthps_next = dtrace_deferred_pid;
14176 help->dthps_prev = NULL;
14177 if (dtrace_deferred_pid != NULL)
14178 dtrace_deferred_pid->dthps_prev = help;
14179 dtrace_deferred_pid = help;
14182 mutex_exit(&dtrace_lock);
14184 } else if (dofhp != NULL) {
14186 * If the dtrace module is loaded and we have a particular
14187 * helper provider description, pass that off to the
14191 mutex_exit(&dtrace_lock);
14193 dtrace_helper_provide(dofhp, p->p_pid);
14197 * Otherwise, just pass all the helper provider descriptions
14198 * off to the meta provider.
14202 mutex_exit(&dtrace_lock);
14204 for (i = 0; i < help->dthps_nprovs; i++) {
14205 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14210 mutex_exit(&dtrace_meta_lock);
14214 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14216 dtrace_helpers_t *help;
14217 dtrace_helper_provider_t *hprov, **tmp_provs;
14218 uint_t tmp_maxprovs, i;
14220 ASSERT(MUTEX_HELD(&dtrace_lock));
14222 help = curproc->p_dtrace_helpers;
14223 ASSERT(help != NULL);
14226 * If we already have dtrace_helper_providers_max helper providers,
14227 * we're refuse to add a new one.
14229 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14233 * Check to make sure this isn't a duplicate.
14235 for (i = 0; i < help->dthps_nprovs; i++) {
14236 if (dofhp->dofhp_addr ==
14237 help->dthps_provs[i]->dthp_prov.dofhp_addr)
14241 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14242 hprov->dthp_prov = *dofhp;
14243 hprov->dthp_ref = 1;
14244 hprov->dthp_generation = gen;
14247 * Allocate a bigger table for helper providers if it's already full.
14249 if (help->dthps_maxprovs == help->dthps_nprovs) {
14250 tmp_maxprovs = help->dthps_maxprovs;
14251 tmp_provs = help->dthps_provs;
14253 if (help->dthps_maxprovs == 0)
14254 help->dthps_maxprovs = 2;
14256 help->dthps_maxprovs *= 2;
14257 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14258 help->dthps_maxprovs = dtrace_helper_providers_max;
14260 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14262 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14263 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14265 if (tmp_provs != NULL) {
14266 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14267 sizeof (dtrace_helper_provider_t *));
14268 kmem_free(tmp_provs, tmp_maxprovs *
14269 sizeof (dtrace_helper_provider_t *));
14273 help->dthps_provs[help->dthps_nprovs] = hprov;
14274 help->dthps_nprovs++;
14280 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14282 mutex_enter(&dtrace_lock);
14284 if (--hprov->dthp_ref == 0) {
14286 mutex_exit(&dtrace_lock);
14287 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14288 dtrace_dof_destroy(dof);
14289 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14291 mutex_exit(&dtrace_lock);
14296 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14298 uintptr_t daddr = (uintptr_t)dof;
14299 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14300 dof_provider_t *provider;
14301 dof_probe_t *probe;
14303 char *strtab, *typestr;
14304 dof_stridx_t typeidx;
14306 uint_t nprobes, j, k;
14308 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14310 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14311 dtrace_dof_error(dof, "misaligned section offset");
14316 * The section needs to be large enough to contain the DOF provider
14317 * structure appropriate for the given version.
14319 if (sec->dofs_size <
14320 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14321 offsetof(dof_provider_t, dofpv_prenoffs) :
14322 sizeof (dof_provider_t))) {
14323 dtrace_dof_error(dof, "provider section too small");
14327 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14328 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14329 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14330 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14331 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14333 if (str_sec == NULL || prb_sec == NULL ||
14334 arg_sec == NULL || off_sec == NULL)
14339 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14340 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14341 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14342 provider->dofpv_prenoffs)) == NULL)
14345 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14347 if (provider->dofpv_name >= str_sec->dofs_size ||
14348 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14349 dtrace_dof_error(dof, "invalid provider name");
14353 if (prb_sec->dofs_entsize == 0 ||
14354 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14355 dtrace_dof_error(dof, "invalid entry size");
14359 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14360 dtrace_dof_error(dof, "misaligned entry size");
14364 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14365 dtrace_dof_error(dof, "invalid entry size");
14369 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14370 dtrace_dof_error(dof, "misaligned section offset");
14374 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14375 dtrace_dof_error(dof, "invalid entry size");
14379 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14381 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14384 * Take a pass through the probes to check for errors.
14386 for (j = 0; j < nprobes; j++) {
14387 probe = (dof_probe_t *)(uintptr_t)(daddr +
14388 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14390 if (probe->dofpr_func >= str_sec->dofs_size) {
14391 dtrace_dof_error(dof, "invalid function name");
14395 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14396 dtrace_dof_error(dof, "function name too long");
14400 if (probe->dofpr_name >= str_sec->dofs_size ||
14401 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14402 dtrace_dof_error(dof, "invalid probe name");
14407 * The offset count must not wrap the index, and the offsets
14408 * must also not overflow the section's data.
14410 if (probe->dofpr_offidx + probe->dofpr_noffs <
14411 probe->dofpr_offidx ||
14412 (probe->dofpr_offidx + probe->dofpr_noffs) *
14413 off_sec->dofs_entsize > off_sec->dofs_size) {
14414 dtrace_dof_error(dof, "invalid probe offset");
14418 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14420 * If there's no is-enabled offset section, make sure
14421 * there aren't any is-enabled offsets. Otherwise
14422 * perform the same checks as for probe offsets
14423 * (immediately above).
14425 if (enoff_sec == NULL) {
14426 if (probe->dofpr_enoffidx != 0 ||
14427 probe->dofpr_nenoffs != 0) {
14428 dtrace_dof_error(dof, "is-enabled "
14429 "offsets with null section");
14432 } else if (probe->dofpr_enoffidx +
14433 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14434 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14435 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14436 dtrace_dof_error(dof, "invalid is-enabled "
14441 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14442 dtrace_dof_error(dof, "zero probe and "
14443 "is-enabled offsets");
14446 } else if (probe->dofpr_noffs == 0) {
14447 dtrace_dof_error(dof, "zero probe offsets");
14451 if (probe->dofpr_argidx + probe->dofpr_xargc <
14452 probe->dofpr_argidx ||
14453 (probe->dofpr_argidx + probe->dofpr_xargc) *
14454 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14455 dtrace_dof_error(dof, "invalid args");
14459 typeidx = probe->dofpr_nargv;
14460 typestr = strtab + probe->dofpr_nargv;
14461 for (k = 0; k < probe->dofpr_nargc; k++) {
14462 if (typeidx >= str_sec->dofs_size) {
14463 dtrace_dof_error(dof, "bad "
14464 "native argument type");
14468 typesz = strlen(typestr) + 1;
14469 if (typesz > DTRACE_ARGTYPELEN) {
14470 dtrace_dof_error(dof, "native "
14471 "argument type too long");
14478 typeidx = probe->dofpr_xargv;
14479 typestr = strtab + probe->dofpr_xargv;
14480 for (k = 0; k < probe->dofpr_xargc; k++) {
14481 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14482 dtrace_dof_error(dof, "bad "
14483 "native argument index");
14487 if (typeidx >= str_sec->dofs_size) {
14488 dtrace_dof_error(dof, "bad "
14489 "translated argument type");
14493 typesz = strlen(typestr) + 1;
14494 if (typesz > DTRACE_ARGTYPELEN) {
14495 dtrace_dof_error(dof, "translated argument "
14509 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14511 dtrace_helpers_t *help;
14512 dtrace_vstate_t *vstate;
14513 dtrace_enabling_t *enab = NULL;
14514 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14515 uintptr_t daddr = (uintptr_t)dof;
14517 ASSERT(MUTEX_HELD(&dtrace_lock));
14519 if ((help = curproc->p_dtrace_helpers) == NULL)
14520 help = dtrace_helpers_create(curproc);
14522 vstate = &help->dthps_vstate;
14524 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14525 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14526 dtrace_dof_destroy(dof);
14531 * Look for helper providers and validate their descriptions.
14534 for (i = 0; i < dof->dofh_secnum; i++) {
14535 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14536 dof->dofh_secoff + i * dof->dofh_secsize);
14538 if (sec->dofs_type != DOF_SECT_PROVIDER)
14541 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14542 dtrace_enabling_destroy(enab);
14543 dtrace_dof_destroy(dof);
14552 * Now we need to walk through the ECB descriptions in the enabling.
14554 for (i = 0; i < enab->dten_ndesc; i++) {
14555 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14556 dtrace_probedesc_t *desc = &ep->dted_probe;
14558 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14561 if (strcmp(desc->dtpd_mod, "helper") != 0)
14564 if (strcmp(desc->dtpd_func, "ustack") != 0)
14567 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14570 * Adding this helper action failed -- we are now going
14571 * to rip out the entire generation and return failure.
14573 (void) dtrace_helper_destroygen(help->dthps_generation);
14574 dtrace_enabling_destroy(enab);
14575 dtrace_dof_destroy(dof);
14582 if (nhelpers < enab->dten_ndesc)
14583 dtrace_dof_error(dof, "unmatched helpers");
14585 gen = help->dthps_generation++;
14586 dtrace_enabling_destroy(enab);
14588 if (dhp != NULL && nprovs > 0) {
14589 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14590 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14591 mutex_exit(&dtrace_lock);
14592 dtrace_helper_provider_register(curproc, help, dhp);
14593 mutex_enter(&dtrace_lock);
14600 dtrace_dof_destroy(dof);
14605 static dtrace_helpers_t *
14606 dtrace_helpers_create(proc_t *p)
14608 dtrace_helpers_t *help;
14610 ASSERT(MUTEX_HELD(&dtrace_lock));
14611 ASSERT(p->p_dtrace_helpers == NULL);
14613 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14614 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14615 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14617 p->p_dtrace_helpers = help;
14624 dtrace_helpers_destroy(void)
14626 dtrace_helpers_t *help;
14627 dtrace_vstate_t *vstate;
14628 proc_t *p = curproc;
14631 mutex_enter(&dtrace_lock);
14633 ASSERT(p->p_dtrace_helpers != NULL);
14634 ASSERT(dtrace_helpers > 0);
14636 help = p->p_dtrace_helpers;
14637 vstate = &help->dthps_vstate;
14640 * We're now going to lose the help from this process.
14642 p->p_dtrace_helpers = NULL;
14646 * Destory the helper actions.
14648 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14649 dtrace_helper_action_t *h, *next;
14651 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14652 next = h->dtha_next;
14653 dtrace_helper_action_destroy(h, vstate);
14658 mutex_exit(&dtrace_lock);
14661 * Destroy the helper providers.
14663 if (help->dthps_maxprovs > 0) {
14664 mutex_enter(&dtrace_meta_lock);
14665 if (dtrace_meta_pid != NULL) {
14666 ASSERT(dtrace_deferred_pid == NULL);
14668 for (i = 0; i < help->dthps_nprovs; i++) {
14669 dtrace_helper_provider_remove(
14670 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14673 mutex_enter(&dtrace_lock);
14674 ASSERT(help->dthps_deferred == 0 ||
14675 help->dthps_next != NULL ||
14676 help->dthps_prev != NULL ||
14677 help == dtrace_deferred_pid);
14680 * Remove the helper from the deferred list.
14682 if (help->dthps_next != NULL)
14683 help->dthps_next->dthps_prev = help->dthps_prev;
14684 if (help->dthps_prev != NULL)
14685 help->dthps_prev->dthps_next = help->dthps_next;
14686 if (dtrace_deferred_pid == help) {
14687 dtrace_deferred_pid = help->dthps_next;
14688 ASSERT(help->dthps_prev == NULL);
14691 mutex_exit(&dtrace_lock);
14694 mutex_exit(&dtrace_meta_lock);
14696 for (i = 0; i < help->dthps_nprovs; i++) {
14697 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14700 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14701 sizeof (dtrace_helper_provider_t *));
14704 mutex_enter(&dtrace_lock);
14706 dtrace_vstate_fini(&help->dthps_vstate);
14707 kmem_free(help->dthps_actions,
14708 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14709 kmem_free(help, sizeof (dtrace_helpers_t));
14712 mutex_exit(&dtrace_lock);
14716 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14718 dtrace_helpers_t *help, *newhelp;
14719 dtrace_helper_action_t *helper, *new, *last;
14721 dtrace_vstate_t *vstate;
14722 int i, j, sz, hasprovs = 0;
14724 mutex_enter(&dtrace_lock);
14725 ASSERT(from->p_dtrace_helpers != NULL);
14726 ASSERT(dtrace_helpers > 0);
14728 help = from->p_dtrace_helpers;
14729 newhelp = dtrace_helpers_create(to);
14730 ASSERT(to->p_dtrace_helpers != NULL);
14732 newhelp->dthps_generation = help->dthps_generation;
14733 vstate = &newhelp->dthps_vstate;
14736 * Duplicate the helper actions.
14738 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14739 if ((helper = help->dthps_actions[i]) == NULL)
14742 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14743 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14745 new->dtha_generation = helper->dtha_generation;
14747 if ((dp = helper->dtha_predicate) != NULL) {
14748 dp = dtrace_difo_duplicate(dp, vstate);
14749 new->dtha_predicate = dp;
14752 new->dtha_nactions = helper->dtha_nactions;
14753 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14754 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14756 for (j = 0; j < new->dtha_nactions; j++) {
14757 dtrace_difo_t *dp = helper->dtha_actions[j];
14759 ASSERT(dp != NULL);
14760 dp = dtrace_difo_duplicate(dp, vstate);
14761 new->dtha_actions[j] = dp;
14764 if (last != NULL) {
14765 last->dtha_next = new;
14767 newhelp->dthps_actions[i] = new;
14775 * Duplicate the helper providers and register them with the
14776 * DTrace framework.
14778 if (help->dthps_nprovs > 0) {
14779 newhelp->dthps_nprovs = help->dthps_nprovs;
14780 newhelp->dthps_maxprovs = help->dthps_nprovs;
14781 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14782 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14783 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14784 newhelp->dthps_provs[i] = help->dthps_provs[i];
14785 newhelp->dthps_provs[i]->dthp_ref++;
14791 mutex_exit(&dtrace_lock);
14794 dtrace_helper_provider_register(to, newhelp, NULL);
14800 * DTrace Hook Functions
14803 dtrace_module_loaded(modctl_t *ctl)
14805 dtrace_provider_t *prv;
14807 mutex_enter(&dtrace_provider_lock);
14808 mutex_enter(&mod_lock);
14810 ASSERT(ctl->mod_busy);
14813 * We're going to call each providers per-module provide operation
14814 * specifying only this module.
14816 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14817 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14819 mutex_exit(&mod_lock);
14820 mutex_exit(&dtrace_provider_lock);
14823 * If we have any retained enablings, we need to match against them.
14824 * Enabling probes requires that cpu_lock be held, and we cannot hold
14825 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14826 * module. (In particular, this happens when loading scheduling
14827 * classes.) So if we have any retained enablings, we need to dispatch
14828 * our task queue to do the match for us.
14830 mutex_enter(&dtrace_lock);
14832 if (dtrace_retained == NULL) {
14833 mutex_exit(&dtrace_lock);
14837 (void) taskq_dispatch(dtrace_taskq,
14838 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14840 mutex_exit(&dtrace_lock);
14843 * And now, for a little heuristic sleaze: in general, we want to
14844 * match modules as soon as they load. However, we cannot guarantee
14845 * this, because it would lead us to the lock ordering violation
14846 * outlined above. The common case, of course, is that cpu_lock is
14847 * _not_ held -- so we delay here for a clock tick, hoping that that's
14848 * long enough for the task queue to do its work. If it's not, it's
14849 * not a serious problem -- it just means that the module that we
14850 * just loaded may not be immediately instrumentable.
14856 dtrace_module_unloaded(modctl_t *ctl)
14858 dtrace_probe_t template, *probe, *first, *next;
14859 dtrace_provider_t *prov;
14861 template.dtpr_mod = ctl->mod_modname;
14863 mutex_enter(&dtrace_provider_lock);
14864 mutex_enter(&mod_lock);
14865 mutex_enter(&dtrace_lock);
14867 if (dtrace_bymod == NULL) {
14869 * The DTrace module is loaded (obviously) but not attached;
14870 * we don't have any work to do.
14872 mutex_exit(&dtrace_provider_lock);
14873 mutex_exit(&mod_lock);
14874 mutex_exit(&dtrace_lock);
14878 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14879 probe != NULL; probe = probe->dtpr_nextmod) {
14880 if (probe->dtpr_ecb != NULL) {
14881 mutex_exit(&dtrace_provider_lock);
14882 mutex_exit(&mod_lock);
14883 mutex_exit(&dtrace_lock);
14886 * This shouldn't _actually_ be possible -- we're
14887 * unloading a module that has an enabled probe in it.
14888 * (It's normally up to the provider to make sure that
14889 * this can't happen.) However, because dtps_enable()
14890 * doesn't have a failure mode, there can be an
14891 * enable/unload race. Upshot: we don't want to
14892 * assert, but we're not going to disable the
14895 if (dtrace_err_verbose) {
14896 cmn_err(CE_WARN, "unloaded module '%s' had "
14897 "enabled probes", ctl->mod_modname);
14906 for (first = NULL; probe != NULL; probe = next) {
14907 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14909 dtrace_probes[probe->dtpr_id - 1] = NULL;
14911 next = probe->dtpr_nextmod;
14912 dtrace_hash_remove(dtrace_bymod, probe);
14913 dtrace_hash_remove(dtrace_byfunc, probe);
14914 dtrace_hash_remove(dtrace_byname, probe);
14916 if (first == NULL) {
14918 probe->dtpr_nextmod = NULL;
14920 probe->dtpr_nextmod = first;
14926 * We've removed all of the module's probes from the hash chains and
14927 * from the probe array. Now issue a dtrace_sync() to be sure that
14928 * everyone has cleared out from any probe array processing.
14932 for (probe = first; probe != NULL; probe = first) {
14933 first = probe->dtpr_nextmod;
14934 prov = probe->dtpr_provider;
14935 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14937 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14938 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14939 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14940 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14941 kmem_free(probe, sizeof (dtrace_probe_t));
14944 mutex_exit(&dtrace_lock);
14945 mutex_exit(&mod_lock);
14946 mutex_exit(&dtrace_provider_lock);
14950 dtrace_suspend(void)
14952 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14956 dtrace_resume(void)
14958 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14963 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14965 ASSERT(MUTEX_HELD(&cpu_lock));
14966 mutex_enter(&dtrace_lock);
14970 dtrace_state_t *state;
14971 dtrace_optval_t *opt, rs, c;
14974 * For now, we only allocate a new buffer for anonymous state.
14976 if ((state = dtrace_anon.dta_state) == NULL)
14979 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14982 opt = state->dts_options;
14983 c = opt[DTRACEOPT_CPU];
14985 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14989 * Regardless of what the actual policy is, we're going to
14990 * temporarily set our resize policy to be manual. We're
14991 * also going to temporarily set our CPU option to denote
14992 * the newly configured CPU.
14994 rs = opt[DTRACEOPT_BUFRESIZE];
14995 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14996 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14998 (void) dtrace_state_buffers(state);
15000 opt[DTRACEOPT_BUFRESIZE] = rs;
15001 opt[DTRACEOPT_CPU] = c;
15008 * We don't free the buffer in the CPU_UNCONFIG case. (The
15009 * buffer will be freed when the consumer exits.)
15017 mutex_exit(&dtrace_lock);
15023 dtrace_cpu_setup_initial(processorid_t cpu)
15025 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15030 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15032 if (dtrace_toxranges >= dtrace_toxranges_max) {
15034 dtrace_toxrange_t *range;
15036 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15039 ASSERT(dtrace_toxrange == NULL);
15040 ASSERT(dtrace_toxranges_max == 0);
15041 dtrace_toxranges_max = 1;
15043 dtrace_toxranges_max <<= 1;
15046 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15047 range = kmem_zalloc(nsize, KM_SLEEP);
15049 if (dtrace_toxrange != NULL) {
15050 ASSERT(osize != 0);
15051 bcopy(dtrace_toxrange, range, osize);
15052 kmem_free(dtrace_toxrange, osize);
15055 dtrace_toxrange = range;
15058 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
15059 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
15061 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15062 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15063 dtrace_toxranges++;
15067 * DTrace Driver Cookbook Functions
15072 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15074 dtrace_provider_id_t id;
15075 dtrace_state_t *state = NULL;
15076 dtrace_enabling_t *enab;
15078 mutex_enter(&cpu_lock);
15079 mutex_enter(&dtrace_provider_lock);
15080 mutex_enter(&dtrace_lock);
15082 if (ddi_soft_state_init(&dtrace_softstate,
15083 sizeof (dtrace_state_t), 0) != 0) {
15084 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15085 mutex_exit(&cpu_lock);
15086 mutex_exit(&dtrace_provider_lock);
15087 mutex_exit(&dtrace_lock);
15088 return (DDI_FAILURE);
15091 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15092 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15093 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15094 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15095 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15096 ddi_remove_minor_node(devi, NULL);
15097 ddi_soft_state_fini(&dtrace_softstate);
15098 mutex_exit(&cpu_lock);
15099 mutex_exit(&dtrace_provider_lock);
15100 mutex_exit(&dtrace_lock);
15101 return (DDI_FAILURE);
15104 ddi_report_dev(devi);
15105 dtrace_devi = devi;
15107 dtrace_modload = dtrace_module_loaded;
15108 dtrace_modunload = dtrace_module_unloaded;
15109 dtrace_cpu_init = dtrace_cpu_setup_initial;
15110 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15111 dtrace_helpers_fork = dtrace_helpers_duplicate;
15112 dtrace_cpustart_init = dtrace_suspend;
15113 dtrace_cpustart_fini = dtrace_resume;
15114 dtrace_debugger_init = dtrace_suspend;
15115 dtrace_debugger_fini = dtrace_resume;
15117 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15119 ASSERT(MUTEX_HELD(&cpu_lock));
15121 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15122 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15123 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15124 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15125 VM_SLEEP | VMC_IDENTIFIER);
15126 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15129 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15130 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15131 NULL, NULL, NULL, NULL, NULL, 0);
15133 ASSERT(MUTEX_HELD(&cpu_lock));
15134 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15135 offsetof(dtrace_probe_t, dtpr_nextmod),
15136 offsetof(dtrace_probe_t, dtpr_prevmod));
15138 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15139 offsetof(dtrace_probe_t, dtpr_nextfunc),
15140 offsetof(dtrace_probe_t, dtpr_prevfunc));
15142 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15143 offsetof(dtrace_probe_t, dtpr_nextname),
15144 offsetof(dtrace_probe_t, dtpr_prevname));
15146 if (dtrace_retain_max < 1) {
15147 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15148 "setting to 1", dtrace_retain_max);
15149 dtrace_retain_max = 1;
15153 * Now discover our toxic ranges.
15155 dtrace_toxic_ranges(dtrace_toxrange_add);
15158 * Before we register ourselves as a provider to our own framework,
15159 * we would like to assert that dtrace_provider is NULL -- but that's
15160 * not true if we were loaded as a dependency of a DTrace provider.
15161 * Once we've registered, we can assert that dtrace_provider is our
15164 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15165 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15167 ASSERT(dtrace_provider != NULL);
15168 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15170 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15171 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15172 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15173 dtrace_provider, NULL, NULL, "END", 0, NULL);
15174 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15175 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15177 dtrace_anon_property();
15178 mutex_exit(&cpu_lock);
15181 * If DTrace helper tracing is enabled, we need to allocate the
15182 * trace buffer and initialize the values.
15184 if (dtrace_helptrace_enabled) {
15185 ASSERT(dtrace_helptrace_buffer == NULL);
15186 dtrace_helptrace_buffer =
15187 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15188 dtrace_helptrace_next = 0;
15192 * If there are already providers, we must ask them to provide their
15193 * probes, and then match any anonymous enabling against them. Note
15194 * that there should be no other retained enablings at this time:
15195 * the only retained enablings at this time should be the anonymous
15198 if (dtrace_anon.dta_enabling != NULL) {
15199 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15201 dtrace_enabling_provide(NULL);
15202 state = dtrace_anon.dta_state;
15205 * We couldn't hold cpu_lock across the above call to
15206 * dtrace_enabling_provide(), but we must hold it to actually
15207 * enable the probes. We have to drop all of our locks, pick
15208 * up cpu_lock, and regain our locks before matching the
15209 * retained anonymous enabling.
15211 mutex_exit(&dtrace_lock);
15212 mutex_exit(&dtrace_provider_lock);
15214 mutex_enter(&cpu_lock);
15215 mutex_enter(&dtrace_provider_lock);
15216 mutex_enter(&dtrace_lock);
15218 if ((enab = dtrace_anon.dta_enabling) != NULL)
15219 (void) dtrace_enabling_match(enab, NULL);
15221 mutex_exit(&cpu_lock);
15224 mutex_exit(&dtrace_lock);
15225 mutex_exit(&dtrace_provider_lock);
15227 if (state != NULL) {
15229 * If we created any anonymous state, set it going now.
15231 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15234 return (DDI_SUCCESS);
15241 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15243 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
15246 dtrace_state_t *state;
15252 if (getminor(*devp) == DTRACEMNRN_HELPER)
15256 * If this wasn't an open with the "helper" minor, then it must be
15257 * the "dtrace" minor.
15259 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE);
15261 cred_t *cred_p = NULL;
15264 * The first minor device is the one that is cloned so there is
15265 * nothing more to do here.
15267 if (minor(dev) == 0)
15271 * Devices are cloned, so if the DTrace state has already
15272 * been allocated, that means this device belongs to a
15273 * different client. Each client should open '/dev/dtrace'
15274 * to get a cloned device.
15276 if (dev->si_drv1 != NULL)
15279 cred_p = dev->si_cred;
15283 * If no DTRACE_PRIV_* bits are set in the credential, then the
15284 * caller lacks sufficient permission to do anything with DTrace.
15286 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15287 if (priv == DTRACE_PRIV_NONE) {
15289 /* Destroy the cloned device. */
15297 * Ask all providers to provide all their probes.
15299 mutex_enter(&dtrace_provider_lock);
15300 dtrace_probe_provide(NULL, NULL);
15301 mutex_exit(&dtrace_provider_lock);
15303 mutex_enter(&cpu_lock);
15304 mutex_enter(&dtrace_lock);
15306 dtrace_membar_producer();
15310 * If the kernel debugger is active (that is, if the kernel debugger
15311 * modified text in some way), we won't allow the open.
15313 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15315 mutex_exit(&cpu_lock);
15316 mutex_exit(&dtrace_lock);
15320 state = dtrace_state_create(devp, cred_p);
15322 state = dtrace_state_create(dev);
15323 dev->si_drv1 = state;
15326 mutex_exit(&cpu_lock);
15328 if (state == NULL) {
15330 if (--dtrace_opens == 0)
15331 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15335 mutex_exit(&dtrace_lock);
15337 /* Destroy the cloned device. */
15343 mutex_exit(&dtrace_lock);
15351 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15353 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
15357 minor_t minor = getminor(dev);
15358 dtrace_state_t *state;
15360 if (minor == DTRACEMNRN_HELPER)
15363 state = ddi_get_soft_state(dtrace_softstate, minor);
15365 dtrace_state_t *state = dev->si_drv1;
15367 /* Check if this is not a cloned device. */
15368 if (minor(dev) == 0)
15373 mutex_enter(&cpu_lock);
15374 mutex_enter(&dtrace_lock);
15376 if (state != NULL) {
15377 if (state->dts_anon) {
15379 * There is anonymous state. Destroy that first.
15381 ASSERT(dtrace_anon.dta_state == NULL);
15382 dtrace_state_destroy(state->dts_anon);
15385 dtrace_state_destroy(state);
15388 kmem_free(state, 0);
15389 dev->si_drv1 = NULL;
15393 ASSERT(dtrace_opens > 0);
15395 if (--dtrace_opens == 0)
15396 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15401 mutex_exit(&dtrace_lock);
15402 mutex_exit(&cpu_lock);
15404 /* Schedule this cloned device to be destroyed. */
15405 destroy_dev_sched(dev);
15413 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15416 dof_helper_t help, *dhp = NULL;
15419 case DTRACEHIOC_ADDDOF:
15420 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15421 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15426 arg = (intptr_t)help.dofhp_dof;
15429 case DTRACEHIOC_ADD: {
15430 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15435 mutex_enter(&dtrace_lock);
15438 * dtrace_helper_slurp() takes responsibility for the dof --
15439 * it may free it now or it may save it and free it later.
15441 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15448 mutex_exit(&dtrace_lock);
15452 case DTRACEHIOC_REMOVE: {
15453 mutex_enter(&dtrace_lock);
15454 rval = dtrace_helper_destroygen(arg);
15455 mutex_exit(&dtrace_lock);
15469 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15471 minor_t minor = getminor(dev);
15472 dtrace_state_t *state;
15475 if (minor == DTRACEMNRN_HELPER)
15476 return (dtrace_ioctl_helper(cmd, arg, rv));
15478 state = ddi_get_soft_state(dtrace_softstate, minor);
15480 if (state->dts_anon) {
15481 ASSERT(dtrace_anon.dta_state == NULL);
15482 state = state->dts_anon;
15486 case DTRACEIOC_PROVIDER: {
15487 dtrace_providerdesc_t pvd;
15488 dtrace_provider_t *pvp;
15490 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15493 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15494 mutex_enter(&dtrace_provider_lock);
15496 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15497 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15501 mutex_exit(&dtrace_provider_lock);
15506 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15507 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15509 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15515 case DTRACEIOC_EPROBE: {
15516 dtrace_eprobedesc_t epdesc;
15518 dtrace_action_t *act;
15524 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15527 mutex_enter(&dtrace_lock);
15529 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15530 mutex_exit(&dtrace_lock);
15534 if (ecb->dte_probe == NULL) {
15535 mutex_exit(&dtrace_lock);
15539 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15540 epdesc.dtepd_uarg = ecb->dte_uarg;
15541 epdesc.dtepd_size = ecb->dte_size;
15543 nrecs = epdesc.dtepd_nrecs;
15544 epdesc.dtepd_nrecs = 0;
15545 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15546 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15549 epdesc.dtepd_nrecs++;
15553 * Now that we have the size, we need to allocate a temporary
15554 * buffer in which to store the complete description. We need
15555 * the temporary buffer to be able to drop dtrace_lock()
15556 * across the copyout(), below.
15558 size = sizeof (dtrace_eprobedesc_t) +
15559 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15561 buf = kmem_alloc(size, KM_SLEEP);
15562 dest = (uintptr_t)buf;
15564 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15565 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15567 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15568 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15574 bcopy(&act->dta_rec, (void *)dest,
15575 sizeof (dtrace_recdesc_t));
15576 dest += sizeof (dtrace_recdesc_t);
15579 mutex_exit(&dtrace_lock);
15581 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15582 kmem_free(buf, size);
15586 kmem_free(buf, size);
15590 case DTRACEIOC_AGGDESC: {
15591 dtrace_aggdesc_t aggdesc;
15592 dtrace_action_t *act;
15593 dtrace_aggregation_t *agg;
15596 dtrace_recdesc_t *lrec;
15601 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15604 mutex_enter(&dtrace_lock);
15606 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15607 mutex_exit(&dtrace_lock);
15611 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15613 nrecs = aggdesc.dtagd_nrecs;
15614 aggdesc.dtagd_nrecs = 0;
15616 offs = agg->dtag_base;
15617 lrec = &agg->dtag_action.dta_rec;
15618 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15620 for (act = agg->dtag_first; ; act = act->dta_next) {
15621 ASSERT(act->dta_intuple ||
15622 DTRACEACT_ISAGG(act->dta_kind));
15625 * If this action has a record size of zero, it
15626 * denotes an argument to the aggregating action.
15627 * Because the presence of this record doesn't (or
15628 * shouldn't) affect the way the data is interpreted,
15629 * we don't copy it out to save user-level the
15630 * confusion of dealing with a zero-length record.
15632 if (act->dta_rec.dtrd_size == 0) {
15633 ASSERT(agg->dtag_hasarg);
15637 aggdesc.dtagd_nrecs++;
15639 if (act == &agg->dtag_action)
15644 * Now that we have the size, we need to allocate a temporary
15645 * buffer in which to store the complete description. We need
15646 * the temporary buffer to be able to drop dtrace_lock()
15647 * across the copyout(), below.
15649 size = sizeof (dtrace_aggdesc_t) +
15650 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15652 buf = kmem_alloc(size, KM_SLEEP);
15653 dest = (uintptr_t)buf;
15655 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15656 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15658 for (act = agg->dtag_first; ; act = act->dta_next) {
15659 dtrace_recdesc_t rec = act->dta_rec;
15662 * See the comment in the above loop for why we pass
15663 * over zero-length records.
15665 if (rec.dtrd_size == 0) {
15666 ASSERT(agg->dtag_hasarg);
15673 rec.dtrd_offset -= offs;
15674 bcopy(&rec, (void *)dest, sizeof (rec));
15675 dest += sizeof (dtrace_recdesc_t);
15677 if (act == &agg->dtag_action)
15681 mutex_exit(&dtrace_lock);
15683 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15684 kmem_free(buf, size);
15688 kmem_free(buf, size);
15692 case DTRACEIOC_ENABLE: {
15694 dtrace_enabling_t *enab = NULL;
15695 dtrace_vstate_t *vstate;
15701 * If a NULL argument has been passed, we take this as our
15702 * cue to reevaluate our enablings.
15705 dtrace_enabling_matchall();
15710 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15713 mutex_enter(&cpu_lock);
15714 mutex_enter(&dtrace_lock);
15715 vstate = &state->dts_vstate;
15717 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15718 mutex_exit(&dtrace_lock);
15719 mutex_exit(&cpu_lock);
15720 dtrace_dof_destroy(dof);
15724 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15725 mutex_exit(&dtrace_lock);
15726 mutex_exit(&cpu_lock);
15727 dtrace_dof_destroy(dof);
15731 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15732 dtrace_enabling_destroy(enab);
15733 mutex_exit(&dtrace_lock);
15734 mutex_exit(&cpu_lock);
15735 dtrace_dof_destroy(dof);
15739 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15740 err = dtrace_enabling_retain(enab);
15742 dtrace_enabling_destroy(enab);
15745 mutex_exit(&cpu_lock);
15746 mutex_exit(&dtrace_lock);
15747 dtrace_dof_destroy(dof);
15752 case DTRACEIOC_REPLICATE: {
15753 dtrace_repldesc_t desc;
15754 dtrace_probedesc_t *match = &desc.dtrpd_match;
15755 dtrace_probedesc_t *create = &desc.dtrpd_create;
15758 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15761 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15762 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15763 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15764 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15766 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15767 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15768 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15769 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15771 mutex_enter(&dtrace_lock);
15772 err = dtrace_enabling_replicate(state, match, create);
15773 mutex_exit(&dtrace_lock);
15778 case DTRACEIOC_PROBEMATCH:
15779 case DTRACEIOC_PROBES: {
15780 dtrace_probe_t *probe = NULL;
15781 dtrace_probedesc_t desc;
15782 dtrace_probekey_t pkey;
15789 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15792 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15793 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15794 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15795 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15798 * Before we attempt to match this probe, we want to give
15799 * all providers the opportunity to provide it.
15801 if (desc.dtpd_id == DTRACE_IDNONE) {
15802 mutex_enter(&dtrace_provider_lock);
15803 dtrace_probe_provide(&desc, NULL);
15804 mutex_exit(&dtrace_provider_lock);
15808 if (cmd == DTRACEIOC_PROBEMATCH) {
15809 dtrace_probekey(&desc, &pkey);
15810 pkey.dtpk_id = DTRACE_IDNONE;
15813 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15815 mutex_enter(&dtrace_lock);
15817 if (cmd == DTRACEIOC_PROBEMATCH) {
15818 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15819 if ((probe = dtrace_probes[i - 1]) != NULL &&
15820 (m = dtrace_match_probe(probe, &pkey,
15821 priv, uid, zoneid)) != 0)
15826 mutex_exit(&dtrace_lock);
15831 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15832 if ((probe = dtrace_probes[i - 1]) != NULL &&
15833 dtrace_match_priv(probe, priv, uid, zoneid))
15838 if (probe == NULL) {
15839 mutex_exit(&dtrace_lock);
15843 dtrace_probe_description(probe, &desc);
15844 mutex_exit(&dtrace_lock);
15846 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15852 case DTRACEIOC_PROBEARG: {
15853 dtrace_argdesc_t desc;
15854 dtrace_probe_t *probe;
15855 dtrace_provider_t *prov;
15857 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15860 if (desc.dtargd_id == DTRACE_IDNONE)
15863 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15866 mutex_enter(&dtrace_provider_lock);
15867 mutex_enter(&mod_lock);
15868 mutex_enter(&dtrace_lock);
15870 if (desc.dtargd_id > dtrace_nprobes) {
15871 mutex_exit(&dtrace_lock);
15872 mutex_exit(&mod_lock);
15873 mutex_exit(&dtrace_provider_lock);
15877 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15878 mutex_exit(&dtrace_lock);
15879 mutex_exit(&mod_lock);
15880 mutex_exit(&dtrace_provider_lock);
15884 mutex_exit(&dtrace_lock);
15886 prov = probe->dtpr_provider;
15888 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15890 * There isn't any typed information for this probe.
15891 * Set the argument number to DTRACE_ARGNONE.
15893 desc.dtargd_ndx = DTRACE_ARGNONE;
15895 desc.dtargd_native[0] = '\0';
15896 desc.dtargd_xlate[0] = '\0';
15897 desc.dtargd_mapping = desc.dtargd_ndx;
15899 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15900 probe->dtpr_id, probe->dtpr_arg, &desc);
15903 mutex_exit(&mod_lock);
15904 mutex_exit(&dtrace_provider_lock);
15906 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15912 case DTRACEIOC_GO: {
15913 processorid_t cpuid;
15914 rval = dtrace_state_go(state, &cpuid);
15919 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15925 case DTRACEIOC_STOP: {
15926 processorid_t cpuid;
15928 mutex_enter(&dtrace_lock);
15929 rval = dtrace_state_stop(state, &cpuid);
15930 mutex_exit(&dtrace_lock);
15935 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15941 case DTRACEIOC_DOFGET: {
15942 dof_hdr_t hdr, *dof;
15945 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15948 mutex_enter(&dtrace_lock);
15949 dof = dtrace_dof_create(state);
15950 mutex_exit(&dtrace_lock);
15952 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15953 rval = copyout(dof, (void *)arg, len);
15954 dtrace_dof_destroy(dof);
15956 return (rval == 0 ? 0 : EFAULT);
15959 case DTRACEIOC_AGGSNAP:
15960 case DTRACEIOC_BUFSNAP: {
15961 dtrace_bufdesc_t desc;
15963 dtrace_buffer_t *buf;
15965 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15968 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15971 mutex_enter(&dtrace_lock);
15973 if (cmd == DTRACEIOC_BUFSNAP) {
15974 buf = &state->dts_buffer[desc.dtbd_cpu];
15976 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
15979 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
15980 size_t sz = buf->dtb_offset;
15982 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
15983 mutex_exit(&dtrace_lock);
15988 * If this buffer has already been consumed, we're
15989 * going to indicate that there's nothing left here
15992 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
15993 mutex_exit(&dtrace_lock);
15995 desc.dtbd_size = 0;
15996 desc.dtbd_drops = 0;
15997 desc.dtbd_errors = 0;
15998 desc.dtbd_oldest = 0;
15999 sz = sizeof (desc);
16001 if (copyout(&desc, (void *)arg, sz) != 0)
16008 * If this is a ring buffer that has wrapped, we want
16009 * to copy the whole thing out.
16011 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16012 dtrace_buffer_polish(buf);
16013 sz = buf->dtb_size;
16016 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16017 mutex_exit(&dtrace_lock);
16021 desc.dtbd_size = sz;
16022 desc.dtbd_drops = buf->dtb_drops;
16023 desc.dtbd_errors = buf->dtb_errors;
16024 desc.dtbd_oldest = buf->dtb_xamot_offset;
16026 mutex_exit(&dtrace_lock);
16028 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16031 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16036 if (buf->dtb_tomax == NULL) {
16037 ASSERT(buf->dtb_xamot == NULL);
16038 mutex_exit(&dtrace_lock);
16042 cached = buf->dtb_tomax;
16043 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16045 dtrace_xcall(desc.dtbd_cpu,
16046 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16048 state->dts_errors += buf->dtb_xamot_errors;
16051 * If the buffers did not actually switch, then the cross call
16052 * did not take place -- presumably because the given CPU is
16053 * not in the ready set. If this is the case, we'll return
16056 if (buf->dtb_tomax == cached) {
16057 ASSERT(buf->dtb_xamot != cached);
16058 mutex_exit(&dtrace_lock);
16062 ASSERT(cached == buf->dtb_xamot);
16065 * We have our snapshot; now copy it out.
16067 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16068 buf->dtb_xamot_offset) != 0) {
16069 mutex_exit(&dtrace_lock);
16073 desc.dtbd_size = buf->dtb_xamot_offset;
16074 desc.dtbd_drops = buf->dtb_xamot_drops;
16075 desc.dtbd_errors = buf->dtb_xamot_errors;
16076 desc.dtbd_oldest = 0;
16078 mutex_exit(&dtrace_lock);
16081 * Finally, copy out the buffer description.
16083 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16089 case DTRACEIOC_CONF: {
16090 dtrace_conf_t conf;
16092 bzero(&conf, sizeof (conf));
16093 conf.dtc_difversion = DIF_VERSION;
16094 conf.dtc_difintregs = DIF_DIR_NREGS;
16095 conf.dtc_diftupregs = DIF_DTR_NREGS;
16096 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16098 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16104 case DTRACEIOC_STATUS: {
16105 dtrace_status_t stat;
16106 dtrace_dstate_t *dstate;
16111 * See the comment in dtrace_state_deadman() for the reason
16112 * for setting dts_laststatus to INT64_MAX before setting
16113 * it to the correct value.
16115 state->dts_laststatus = INT64_MAX;
16116 dtrace_membar_producer();
16117 state->dts_laststatus = dtrace_gethrtime();
16119 bzero(&stat, sizeof (stat));
16121 mutex_enter(&dtrace_lock);
16123 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16124 mutex_exit(&dtrace_lock);
16128 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16129 stat.dtst_exiting = 1;
16131 nerrs = state->dts_errors;
16132 dstate = &state->dts_vstate.dtvs_dynvars;
16134 for (i = 0; i < NCPU; i++) {
16135 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16137 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16138 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16139 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16141 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16142 stat.dtst_filled++;
16144 nerrs += state->dts_buffer[i].dtb_errors;
16146 for (j = 0; j < state->dts_nspeculations; j++) {
16147 dtrace_speculation_t *spec;
16148 dtrace_buffer_t *buf;
16150 spec = &state->dts_speculations[j];
16151 buf = &spec->dtsp_buffer[i];
16152 stat.dtst_specdrops += buf->dtb_xamot_drops;
16156 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16157 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16158 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16159 stat.dtst_dblerrors = state->dts_dblerrors;
16161 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16162 stat.dtst_errors = nerrs;
16164 mutex_exit(&dtrace_lock);
16166 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16172 case DTRACEIOC_FORMAT: {
16173 dtrace_fmtdesc_t fmt;
16177 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16180 mutex_enter(&dtrace_lock);
16182 if (fmt.dtfd_format == 0 ||
16183 fmt.dtfd_format > state->dts_nformats) {
16184 mutex_exit(&dtrace_lock);
16189 * Format strings are allocated contiguously and they are
16190 * never freed; if a format index is less than the number
16191 * of formats, we can assert that the format map is non-NULL
16192 * and that the format for the specified index is non-NULL.
16194 ASSERT(state->dts_formats != NULL);
16195 str = state->dts_formats[fmt.dtfd_format - 1];
16196 ASSERT(str != NULL);
16198 len = strlen(str) + 1;
16200 if (len > fmt.dtfd_length) {
16201 fmt.dtfd_length = len;
16203 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16204 mutex_exit(&dtrace_lock);
16208 if (copyout(str, fmt.dtfd_string, len) != 0) {
16209 mutex_exit(&dtrace_lock);
16214 mutex_exit(&dtrace_lock);
16227 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16229 dtrace_state_t *state;
16236 return (DDI_SUCCESS);
16239 return (DDI_FAILURE);
16242 mutex_enter(&cpu_lock);
16243 mutex_enter(&dtrace_provider_lock);
16244 mutex_enter(&dtrace_lock);
16246 ASSERT(dtrace_opens == 0);
16248 if (dtrace_helpers > 0) {
16249 mutex_exit(&dtrace_provider_lock);
16250 mutex_exit(&dtrace_lock);
16251 mutex_exit(&cpu_lock);
16252 return (DDI_FAILURE);
16255 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16256 mutex_exit(&dtrace_provider_lock);
16257 mutex_exit(&dtrace_lock);
16258 mutex_exit(&cpu_lock);
16259 return (DDI_FAILURE);
16262 dtrace_provider = NULL;
16264 if ((state = dtrace_anon_grab()) != NULL) {
16266 * If there were ECBs on this state, the provider should
16267 * have not been allowed to detach; assert that there is
16270 ASSERT(state->dts_necbs == 0);
16271 dtrace_state_destroy(state);
16274 * If we're being detached with anonymous state, we need to
16275 * indicate to the kernel debugger that DTrace is now inactive.
16277 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16280 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16281 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16282 dtrace_cpu_init = NULL;
16283 dtrace_helpers_cleanup = NULL;
16284 dtrace_helpers_fork = NULL;
16285 dtrace_cpustart_init = NULL;
16286 dtrace_cpustart_fini = NULL;
16287 dtrace_debugger_init = NULL;
16288 dtrace_debugger_fini = NULL;
16289 dtrace_modload = NULL;
16290 dtrace_modunload = NULL;
16292 mutex_exit(&cpu_lock);
16294 if (dtrace_helptrace_enabled) {
16295 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16296 dtrace_helptrace_buffer = NULL;
16299 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16300 dtrace_probes = NULL;
16301 dtrace_nprobes = 0;
16303 dtrace_hash_destroy(dtrace_bymod);
16304 dtrace_hash_destroy(dtrace_byfunc);
16305 dtrace_hash_destroy(dtrace_byname);
16306 dtrace_bymod = NULL;
16307 dtrace_byfunc = NULL;
16308 dtrace_byname = NULL;
16310 kmem_cache_destroy(dtrace_state_cache);
16311 vmem_destroy(dtrace_minor);
16312 vmem_destroy(dtrace_arena);
16314 if (dtrace_toxrange != NULL) {
16315 kmem_free(dtrace_toxrange,
16316 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16317 dtrace_toxrange = NULL;
16318 dtrace_toxranges = 0;
16319 dtrace_toxranges_max = 0;
16322 ddi_remove_minor_node(dtrace_devi, NULL);
16323 dtrace_devi = NULL;
16325 ddi_soft_state_fini(&dtrace_softstate);
16327 ASSERT(dtrace_vtime_references == 0);
16328 ASSERT(dtrace_opens == 0);
16329 ASSERT(dtrace_retained == NULL);
16331 mutex_exit(&dtrace_lock);
16332 mutex_exit(&dtrace_provider_lock);
16335 * We don't destroy the task queue until after we have dropped our
16336 * locks (taskq_destroy() may block on running tasks). To prevent
16337 * attempting to do work after we have effectively detached but before
16338 * the task queue has been destroyed, all tasks dispatched via the
16339 * task queue must check that DTrace is still attached before
16340 * performing any operation.
16342 taskq_destroy(dtrace_taskq);
16343 dtrace_taskq = NULL;
16345 return (DDI_SUCCESS);
16352 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16357 case DDI_INFO_DEVT2DEVINFO:
16358 *result = (void *)dtrace_devi;
16359 error = DDI_SUCCESS;
16361 case DDI_INFO_DEVT2INSTANCE:
16362 *result = (void *)0;
16363 error = DDI_SUCCESS;
16366 error = DDI_FAILURE;
16373 static struct cb_ops dtrace_cb_ops = {
16374 dtrace_open, /* open */
16375 dtrace_close, /* close */
16376 nulldev, /* strategy */
16377 nulldev, /* print */
16381 dtrace_ioctl, /* ioctl */
16382 nodev, /* devmap */
16384 nodev, /* segmap */
16385 nochpoll, /* poll */
16386 ddi_prop_op, /* cb_prop_op */
16388 D_NEW | D_MP /* Driver compatibility flag */
16391 static struct dev_ops dtrace_ops = {
16392 DEVO_REV, /* devo_rev */
16394 dtrace_info, /* get_dev_info */
16395 nulldev, /* identify */
16396 nulldev, /* probe */
16397 dtrace_attach, /* attach */
16398 dtrace_detach, /* detach */
16400 &dtrace_cb_ops, /* driver operations */
16401 NULL, /* bus operations */
16402 nodev /* dev power */
16405 static struct modldrv modldrv = {
16406 &mod_driverops, /* module type (this is a pseudo driver) */
16407 "Dynamic Tracing", /* name of module */
16408 &dtrace_ops, /* driver ops */
16411 static struct modlinkage modlinkage = {
16420 return (mod_install(&modlinkage));
16424 _info(struct modinfo *modinfop)
16426 return (mod_info(&modlinkage, modinfop));
16432 return (mod_remove(&modlinkage));
16436 static d_ioctl_t dtrace_ioctl;
16437 static void dtrace_load(void *);
16438 static int dtrace_unload(void);
16439 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **);
16440 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */
16441 static eventhandler_tag eh_tag; /* Event handler tag. */
16443 void dtrace_invop_init(void);
16444 void dtrace_invop_uninit(void);
16446 static struct cdevsw dtrace_cdevsw = {
16447 .d_version = D_VERSION,
16448 .d_flags = D_NEEDMINOR,
16449 .d_close = dtrace_close,
16450 .d_ioctl = dtrace_ioctl,
16451 .d_open = dtrace_open,
16452 .d_name = "dtrace",
16455 #include <dtrace_anon.c>
16456 #include <dtrace_clone.c>
16457 #include <dtrace_ioctl.c>
16458 #include <dtrace_load.c>
16459 #include <dtrace_modevent.c>
16460 #include <dtrace_sysctl.c>
16461 #include <dtrace_unload.c>
16462 #include <dtrace_vtime.c>
16463 #include <dtrace_hacks.c>
16464 #include <dtrace_isa.c>
16466 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
16467 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
16468 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
16470 DEV_MODULE(dtrace, dtrace_modevent, NULL);
16471 MODULE_VERSION(dtrace, 1);
16472 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
16473 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);