4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
28 * Copyright (c) 2012 by Delphix. All rights reserved.
44 #include <libproc_compat.h>
47 #define DT_MASK_LO 0x00000000FFFFFFFFULL
50 * We declare this here because (1) we need it and (2) we want to avoid a
51 * dependency on libm in libdtrace.
54 dt_fabsl(long double x)
63 * 128-bit arithmetic functions needed to support the stddev() aggregating
67 dt_gt_128(uint64_t *a, uint64_t *b)
69 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0]));
73 dt_ge_128(uint64_t *a, uint64_t *b)
75 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0]));
79 dt_le_128(uint64_t *a, uint64_t *b)
81 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0]));
85 * Shift the 128-bit value in a by b. If b is positive, shift left.
86 * If b is negative, shift right.
89 dt_shift_128(uint64_t *a, int b)
99 a[0] = a[1] >> (b - 64);
103 mask = 1LL << (64 - b);
105 a[0] |= ((a[1] & mask) << (64 - b));
110 a[1] = a[0] << (b - 64);
114 mask = a[0] >> (64 - b);
122 dt_nbits_128(uint64_t *a)
126 uint64_t zero[2] = { 0, 0 };
131 dt_shift_128(tmp, -1);
132 while (dt_gt_128(tmp, zero)) {
133 dt_shift_128(tmp, -1);
141 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference)
145 result[0] = minuend[0] - subtrahend[0];
146 result[1] = minuend[1] - subtrahend[1] -
147 (minuend[0] < subtrahend[0] ? 1 : 0);
149 difference[0] = result[0];
150 difference[1] = result[1];
154 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
158 result[0] = addend1[0] + addend2[0];
159 result[1] = addend1[1] + addend2[1] +
160 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
167 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
168 * use native multiplication on those, and then re-combine into the
169 * resulting 128-bit value.
171 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
178 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
180 uint64_t hi1, hi2, lo1, lo2;
186 lo1 = factor1 & DT_MASK_LO;
187 lo2 = factor2 & DT_MASK_LO;
189 product[0] = lo1 * lo2;
190 product[1] = hi1 * hi2;
194 dt_shift_128(tmp, 32);
195 dt_add_128(product, tmp, product);
199 dt_shift_128(tmp, 32);
200 dt_add_128(product, tmp, product);
204 * This is long-hand division.
206 * We initialize subtrahend by shifting divisor left as far as possible. We
207 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we
208 * subtract and set the appropriate bit in the result. We then shift
209 * subtrahend right by one bit for the next comparison.
212 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient)
214 uint64_t result[2] = { 0, 0 };
215 uint64_t remainder[2];
216 uint64_t subtrahend[2];
217 uint64_t divisor_128[2];
218 uint64_t mask[2] = { 1, 0 };
221 assert(divisor != 0);
223 divisor_128[0] = divisor;
226 remainder[0] = dividend[0];
227 remainder[1] = dividend[1];
229 subtrahend[0] = divisor;
232 while (divisor > 0) {
237 dt_shift_128(subtrahend, 128 - log);
238 dt_shift_128(mask, 128 - log);
240 while (dt_ge_128(remainder, divisor_128)) {
241 if (dt_ge_128(remainder, subtrahend)) {
242 dt_subtract_128(remainder, subtrahend, remainder);
243 result[0] |= mask[0];
244 result[1] |= mask[1];
247 dt_shift_128(subtrahend, -1);
248 dt_shift_128(mask, -1);
251 quotient[0] = result[0];
252 quotient[1] = result[1];
256 * This is the long-hand method of calculating a square root.
257 * The algorithm is as follows:
259 * 1. Group the digits by 2 from the right.
260 * 2. Over the leftmost group, find the largest single-digit number
261 * whose square is less than that group.
262 * 3. Subtract the result of the previous step (2 or 4, depending) and
263 * bring down the next two-digit group.
264 * 4. For the result R we have so far, find the largest single-digit number
265 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3.
266 * (Note that this is doubling R and performing a decimal left-shift by 1
267 * and searching for the appropriate decimal to fill the one's place.)
268 * The value x is the next digit in the square root.
269 * Repeat steps 3 and 4 until the desired precision is reached. (We're
270 * dealing with integers, so the above is sufficient.)
272 * In decimal, the square root of 582,734 would be calculated as so:
276 * -49 (7^2 == 49 => 7 is the first digit in the square root)
278 * 9 27 (Subtract and bring down the next group.)
279 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in
280 * ----- the square root)
281 * 51 34 (Subtract and bring down the next group.)
282 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in
283 * ----- the square root)
286 * The above algorithm applies similarly in binary, but note that the
287 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a
288 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the
289 * preceding difference?
291 * In binary, the square root of 11011011 would be calculated as so:
295 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1)
298 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1)
301 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1)
304 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0)
308 dt_sqrt_128(uint64_t *square)
310 uint64_t result[2] = { 0, 0 };
311 uint64_t diff[2] = { 0, 0 };
312 uint64_t one[2] = { 1, 0 };
313 uint64_t next_pair[2];
314 uint64_t next_try[2];
315 uint64_t bit_pairs, pair_shift;
318 bit_pairs = dt_nbits_128(square) / 2;
319 pair_shift = bit_pairs * 2;
321 for (i = 0; i <= bit_pairs; i++) {
323 * Bring down the next pair of bits.
325 next_pair[0] = square[0];
326 next_pair[1] = square[1];
327 dt_shift_128(next_pair, -pair_shift);
331 dt_shift_128(diff, 2);
332 dt_add_128(diff, next_pair, diff);
335 * next_try = R << 2 + 1
337 next_try[0] = result[0];
338 next_try[1] = result[1];
339 dt_shift_128(next_try, 2);
340 dt_add_128(next_try, one, next_try);
342 if (dt_le_128(next_try, diff)) {
343 dt_subtract_128(diff, next_try, diff);
344 dt_shift_128(result, 1);
345 dt_add_128(result, one, result);
347 dt_shift_128(result, 1);
353 assert(result[1] == 0);
359 dt_stddev(uint64_t *data, uint64_t normal)
361 uint64_t avg_of_squares[2];
362 uint64_t square_of_avg[2];
367 * The standard approximation for standard deviation is
368 * sqrt(average(x**2) - average(x)**2), i.e. the square root
369 * of the average of the squares minus the square of the average.
371 dt_divide_128(data + 2, normal, avg_of_squares);
372 dt_divide_128(avg_of_squares, data[0], avg_of_squares);
374 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0];
377 norm_avg = -norm_avg;
379 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg);
381 dt_subtract_128(avg_of_squares, square_of_avg, diff);
383 return (dt_sqrt_128(diff));
387 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last,
388 dtrace_bufdesc_t *buf, size_t offs)
390 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd;
391 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd;
392 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub;
393 dtrace_flowkind_t flow = DTRACEFLOW_NONE;
394 const char *str = NULL;
395 static const char *e_str[2] = { " -> ", " => " };
396 static const char *r_str[2] = { " <- ", " <= " };
397 static const char *ent = "entry", *ret = "return";
398 static int entlen = 0, retlen = 0;
399 dtrace_epid_t next, id = epd->dtepd_epid;
404 entlen = strlen(ent);
405 retlen = strlen(ret);
409 * If the name of the probe is "entry" or ends with "-entry", we
410 * treat it as an entry; if it is "return" or ends with "-return",
411 * we treat it as a return. (This allows application-provided probes
412 * like "method-entry" or "function-entry" to participate in flow
413 * indentation -- without accidentally misinterpreting popular probe
414 * names like "carpentry", "gentry" or "Coventry".)
416 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' &&
417 (sub == n || sub[-1] == '-')) {
418 flow = DTRACEFLOW_ENTRY;
419 str = e_str[strcmp(p, "syscall") == 0];
420 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' &&
421 (sub == n || sub[-1] == '-')) {
422 flow = DTRACEFLOW_RETURN;
423 str = r_str[strcmp(p, "syscall") == 0];
427 * If we're going to indent this, we need to check the ID of our last
428 * call. If we're looking at the same probe ID but a different EPID,
429 * we _don't_ want to indent. (Yes, there are some minor holes in
430 * this scheme -- it's a heuristic.)
432 if (flow == DTRACEFLOW_ENTRY) {
433 if ((last != DTRACE_EPIDNONE && id != last &&
434 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id))
435 flow = DTRACEFLOW_NONE;
439 * If we're going to unindent this, it's more difficult to see if
440 * we don't actually want to unindent it -- we need to look at the
443 if (flow == DTRACEFLOW_RETURN) {
444 offs += epd->dtepd_size;
447 if (offs >= buf->dtbd_size)
450 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
452 if (next == DTRACE_EPIDNONE)
454 } while (next == DTRACE_EPIDNONE);
456 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0)
459 if (next != id && npd->dtpd_id == pd->dtpd_id)
460 flow = DTRACEFLOW_NONE;
464 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) {
465 data->dtpda_prefix = str;
467 data->dtpda_prefix = "| ";
470 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0)
471 data->dtpda_indent -= 2;
473 data->dtpda_flow = flow;
481 return (DTRACE_CONSUME_THIS);
487 return (DTRACE_CONSUME_NEXT);
491 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val,
492 uint64_t normal, long double total, char positives, char negatives)
495 uint_t depth, len = 40;
497 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@";
498 const char *spaces = " ";
500 assert(strlen(ats) == len && strlen(spaces) == len);
501 assert(!(total == 0 && (positives || negatives)));
502 assert(!(val < 0 && !negatives));
503 assert(!(val > 0 && !positives));
504 assert(!(val != 0 && total == 0));
508 f = (dt_fabsl((long double)val) * len) / total;
509 depth = (uint_t)(f + 0.5);
514 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth,
515 spaces + depth, (long long)val / normal));
519 f = (dt_fabsl((long double)val) * len) / total;
520 depth = (uint_t)(f + 0.5);
522 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth,
523 ats + len - depth, (long long)val / normal));
527 * If we're here, we have both positive and negative bucket values.
528 * To express this graphically, we're going to generate both positive
529 * and negative bars separated by a centerline. These bars are half
530 * the size of normal quantize()/lquantize() bars, so we divide the
531 * length in half before calculating the bar length.
535 spaces = &spaces[len];
537 f = (dt_fabsl((long double)val) * len) / total;
538 depth = (uint_t)(f + 0.5);
541 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth,
542 ats + len - depth, len, "", (long long)val / normal));
544 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "",
545 ats + len - depth, spaces + depth,
546 (long long)val / normal));
551 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
552 size_t size, uint64_t normal)
554 const int64_t *data = addr;
555 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1;
556 long double total = 0;
557 char positives = 0, negatives = 0;
559 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
560 return (dt_set_errno(dtp, EDT_DMISMATCH));
562 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0)
565 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) {
567 * There isn't any data. This is possible if (and only if)
568 * negative increment values have been used. In this case,
569 * we'll print the buckets around 0.
571 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1;
572 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1;
577 while (last_bin > 0 && data[last_bin] == 0)
580 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1)
584 for (i = first_bin; i <= last_bin; i++) {
585 positives |= (data[i] > 0);
586 negatives |= (data[i] < 0);
587 total += dt_fabsl((long double)data[i]);
590 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
591 "------------- Distribution -------------", "count") < 0)
594 for (i = first_bin; i <= last_bin; i++) {
595 if (dt_printf(dtp, fp, "%16lld ",
596 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0)
599 if (dt_print_quantline(dtp, fp, data[i], normal, total,
600 positives, negatives) < 0)
608 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
609 size_t size, uint64_t normal)
611 const int64_t *data = addr;
612 int i, first_bin, last_bin, base;
614 long double total = 0;
615 uint16_t step, levels;
616 char positives = 0, negatives = 0;
618 if (size < sizeof (uint64_t))
619 return (dt_set_errno(dtp, EDT_DMISMATCH));
622 size -= sizeof (uint64_t);
624 base = DTRACE_LQUANTIZE_BASE(arg);
625 step = DTRACE_LQUANTIZE_STEP(arg);
626 levels = DTRACE_LQUANTIZE_LEVELS(arg);
629 last_bin = levels + 1;
631 if (size != sizeof (uint64_t) * (levels + 2))
632 return (dt_set_errno(dtp, EDT_DMISMATCH));
634 while (first_bin <= levels + 1 && data[first_bin] == 0)
637 if (first_bin > levels + 1) {
644 while (last_bin > 0 && data[last_bin] == 0)
647 if (last_bin < levels + 1)
651 for (i = first_bin; i <= last_bin; i++) {
652 positives |= (data[i] > 0);
653 negatives |= (data[i] < 0);
654 total += dt_fabsl((long double)data[i]);
657 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
658 "------------- Distribution -------------", "count") < 0)
661 for (i = first_bin; i <= last_bin; i++) {
666 (void) snprintf(c, sizeof (c), "< %d",
667 base / (uint32_t)normal);
668 err = dt_printf(dtp, fp, "%16s ", c);
669 } else if (i == levels + 1) {
670 (void) snprintf(c, sizeof (c), ">= %d",
671 base + (levels * step));
672 err = dt_printf(dtp, fp, "%16s ", c);
674 err = dt_printf(dtp, fp, "%16d ",
675 base + (i - 1) * step);
678 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal,
679 total, positives, negatives) < 0)
687 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr,
688 size_t size, uint64_t normal)
690 int i, first_bin, last_bin, bin = 1, order, levels;
691 uint16_t factor, low, high, nsteps;
692 const int64_t *data = addr;
693 int64_t value = 1, next, step;
694 char positives = 0, negatives = 0;
695 long double total = 0;
699 if (size < sizeof (uint64_t))
700 return (dt_set_errno(dtp, EDT_DMISMATCH));
703 size -= sizeof (uint64_t);
705 factor = DTRACE_LLQUANTIZE_FACTOR(arg);
706 low = DTRACE_LLQUANTIZE_LOW(arg);
707 high = DTRACE_LLQUANTIZE_HIGH(arg);
708 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
711 * We don't expect to be handed invalid llquantize() parameters here,
712 * but sanity check them (to a degree) nonetheless.
714 if (size > INT32_MAX || factor < 2 || low >= high ||
715 nsteps == 0 || factor > nsteps)
716 return (dt_set_errno(dtp, EDT_DMISMATCH));
718 levels = (int)size / sizeof (uint64_t);
721 last_bin = levels - 1;
723 while (first_bin < levels && data[first_bin] == 0)
726 if (first_bin == levels) {
733 while (last_bin > 0 && data[last_bin] == 0)
736 if (last_bin < levels - 1)
740 for (i = first_bin; i <= last_bin; i++) {
741 positives |= (data[i] > 0);
742 negatives |= (data[i] < 0);
743 total += dt_fabsl((long double)data[i]);
746 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value",
747 "------------- Distribution -------------", "count") < 0)
750 for (order = 0; order < low; order++)
753 next = value * factor;
754 step = next > nsteps ? next / nsteps : 1;
756 if (first_bin == 0) {
757 (void) snprintf(c, sizeof (c), "< %lld", (long long)value);
759 if (dt_printf(dtp, fp, "%16s ", c) < 0)
762 if (dt_print_quantline(dtp, fp, data[0], normal,
763 total, positives, negatives) < 0)
767 while (order <= high) {
768 if (bin >= first_bin && bin <= last_bin) {
769 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0)
772 if (dt_print_quantline(dtp, fp, data[bin],
773 normal, total, positives, negatives) < 0)
777 assert(value < next);
780 if ((value += step) != next)
783 next = value * factor;
784 step = next > nsteps ? next / nsteps : 1;
791 assert(last_bin == bin);
792 (void) snprintf(c, sizeof (c), ">= %lld", (long long)value);
794 if (dt_printf(dtp, fp, "%16s ", c) < 0)
797 return (dt_print_quantline(dtp, fp, data[bin], normal,
798 total, positives, negatives));
803 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
804 size_t size, uint64_t normal)
806 /* LINTED - alignment */
807 int64_t *data = (int64_t *)addr;
809 return (dt_printf(dtp, fp, " %16lld", data[0] ?
810 (long long)(data[1] / (int64_t)normal / data[0]) : 0));
815 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
816 size_t size, uint64_t normal)
818 /* LINTED - alignment */
819 uint64_t *data = (uint64_t *)addr;
821 return (dt_printf(dtp, fp, " %16llu", data[0] ?
822 (unsigned long long) dt_stddev(data, normal) : 0));
827 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr,
828 size_t nbytes, int width, int quiet, int forceraw)
831 * If the byte stream is a series of printable characters, followed by
832 * a terminating byte, we print it out as a string. Otherwise, we
833 * assume that it's something else and just print the bytes.
835 int i, j, margin = 5;
836 char *c = (char *)addr;
844 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET)
847 for (i = 0; i < nbytes; i++) {
849 * We define a "printable character" to be one for which
850 * isprint(3C) returns non-zero, isspace(3C) returns non-zero,
851 * or a character which is either backspace or the bell.
852 * Backspace and the bell are regrettably special because
853 * they fail the first two tests -- and yet they are entirely
854 * printable. These are the only two control characters that
855 * have meaning for the terminal and for which isprint(3C) and
856 * isspace(3C) return 0.
858 if (isprint(c[i]) || isspace(c[i]) ||
859 c[i] == '\b' || c[i] == '\a')
862 if (c[i] == '\0' && i > 0) {
864 * This looks like it might be a string. Before we
865 * assume that it is indeed a string, check the
866 * remainder of the byte range; if it contains
867 * additional non-nul characters, we'll assume that
868 * it's a binary stream that just happens to look like
869 * a string, and we'll print out the individual bytes.
871 for (j = i + 1; j < nbytes; j++) {
880 return (dt_printf(dtp, fp, "%s", c));
882 return (dt_printf(dtp, fp, " %-*s", width, c));
890 * The byte range is all printable characters, but there is
891 * no trailing nul byte. We'll assume that it's a string and
894 char *s = alloca(nbytes + 1);
897 return (dt_printf(dtp, fp, " %-*s", width, s));
901 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0)
904 for (i = 0; i < 16; i++)
905 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0)
908 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0)
912 for (i = 0; i < nbytes; i += 16) {
913 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0)
916 for (j = i; j < i + 16 && j < nbytes; j++) {
917 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0)
922 if (dt_printf(dtp, fp, " ") < 0)
926 if (dt_printf(dtp, fp, " ") < 0)
929 for (j = i; j < i + 16 && j < nbytes; j++) {
930 if (dt_printf(dtp, fp, "%c",
931 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0)
935 if (dt_printf(dtp, fp, "\n") < 0)
943 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
944 caddr_t addr, int depth, int size)
946 dtrace_syminfo_t dts;
949 char c[PATH_MAX * 2];
952 if (dt_printf(dtp, fp, "\n") < 0)
958 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
959 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
961 indent = _dtrace_stkindent;
963 for (i = 0; i < depth; i++) {
965 case sizeof (uint32_t):
966 /* LINTED - alignment */
967 pc = *((uint32_t *)addr);
970 case sizeof (uint64_t):
971 /* LINTED - alignment */
972 pc = *((uint64_t *)addr);
976 return (dt_set_errno(dtp, EDT_BADSTACKPC));
984 if (dt_printf(dtp, fp, "%*s", indent, "") < 0)
987 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
988 if (pc > sym.st_value) {
989 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx",
990 dts.dts_object, dts.dts_name,
991 (u_longlong_t)(pc - sym.st_value));
993 (void) snprintf(c, sizeof (c), "%s`%s",
994 dts.dts_object, dts.dts_name);
998 * We'll repeat the lookup, but this time we'll specify
999 * a NULL GElf_Sym -- indicating that we're only
1000 * interested in the containing module.
1002 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1003 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1004 dts.dts_object, (u_longlong_t)pc);
1006 (void) snprintf(c, sizeof (c), "0x%llx",
1011 if (dt_printf(dtp, fp, format, c) < 0)
1014 if (dt_printf(dtp, fp, "\n") < 0)
1022 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format,
1023 caddr_t addr, uint64_t arg)
1025 /* LINTED - alignment */
1026 uint64_t *pc = (uint64_t *)addr;
1027 uint32_t depth = DTRACE_USTACK_NFRAMES(arg);
1028 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg);
1029 const char *strbase = addr + (depth + 1) * sizeof (uint64_t);
1030 const char *str = strsize ? strbase : NULL;
1033 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2];
1034 struct ps_prochandle *P;
1044 if (dt_printf(dtp, fp, "\n") < 0)
1050 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET)
1051 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT];
1053 indent = _dtrace_stkindent;
1056 * Ultimately, we need to add an entry point in the library vector for
1057 * determining <symbol, offset> from <pid, address>. For now, if
1058 * this is a vector open, we just print the raw address or string.
1060 if (dtp->dt_vector == NULL)
1061 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1066 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1068 for (i = 0; i < depth && pc[i] != 0; i++) {
1071 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1074 if (P != NULL && Plookup_by_addr(P, pc[i],
1075 name, sizeof (name), &sym) == 0) {
1076 (void) Pobjname(P, pc[i], objname, sizeof (objname));
1078 if (pc[i] > sym.st_value) {
1079 (void) snprintf(c, sizeof (c),
1080 "%s`%s+0x%llx", dt_basename(objname), name,
1081 (u_longlong_t)(pc[i] - sym.st_value));
1083 (void) snprintf(c, sizeof (c),
1084 "%s`%s", dt_basename(objname), name);
1086 } else if (str != NULL && str[0] != '\0' && str[0] != '@' &&
1087 (P != NULL && ((map = Paddr_to_map(P, pc[i])) == NULL ||
1088 (map->pr_mflags & MA_WRITE)))) {
1090 * If the current string pointer in the string table
1091 * does not point to an empty string _and_ the program
1092 * counter falls in a writable region, we'll use the
1093 * string from the string table instead of the raw
1094 * address. This last condition is necessary because
1095 * some (broken) ustack helpers will return a string
1096 * even for a program counter that they can't
1097 * identify. If we have a string for a program
1098 * counter that falls in a segment that isn't
1099 * writable, we assume that we have fallen into this
1100 * case and we refuse to use the string.
1102 (void) snprintf(c, sizeof (c), "%s", str);
1104 if (P != NULL && Pobjname(P, pc[i], objname,
1105 sizeof (objname)) != 0) {
1106 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1107 dt_basename(objname), (u_longlong_t)pc[i]);
1109 (void) snprintf(c, sizeof (c), "0x%llx",
1110 (u_longlong_t)pc[i]);
1114 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1117 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1120 if (str != NULL && str[0] == '@') {
1122 * If the first character of the string is an "at" sign,
1123 * then the string is inferred to be an annotation --
1124 * and it is printed out beneath the frame and offset
1127 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0)
1130 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]);
1132 if ((err = dt_printf(dtp, fp, format, c)) < 0)
1135 if ((err = dt_printf(dtp, fp, "\n")) < 0)
1140 str += strlen(str) + 1;
1141 if (str - strbase >= strsize)
1147 dt_proc_unlock(dtp, P);
1148 dt_proc_release(dtp, P);
1155 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act)
1157 /* LINTED - alignment */
1158 uint64_t pid = ((uint64_t *)addr)[0];
1159 /* LINTED - alignment */
1160 uint64_t pc = ((uint64_t *)addr)[1];
1161 const char *format = " %-50s";
1165 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) {
1166 struct ps_prochandle *P;
1168 if ((P = dt_proc_grab(dtp, pid,
1169 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) {
1172 dt_proc_lock(dtp, P);
1174 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0)
1177 dt_proc_unlock(dtp, P);
1178 dt_proc_release(dtp, P);
1185 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n);
1187 return (dt_printf(dtp, fp, format, s));
1191 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1193 /* LINTED - alignment */
1194 uint64_t pid = ((uint64_t *)addr)[0];
1195 /* LINTED - alignment */
1196 uint64_t pc = ((uint64_t *)addr)[1];
1199 char objname[PATH_MAX], c[PATH_MAX * 2];
1200 struct ps_prochandle *P;
1206 * See the comment in dt_print_ustack() for the rationale for
1207 * printing raw addresses in the vectored case.
1209 if (dtp->dt_vector == NULL)
1210 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0);
1215 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */
1217 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != 0) {
1218 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname));
1220 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1223 err = dt_printf(dtp, fp, format, c);
1226 dt_proc_unlock(dtp, P);
1227 dt_proc_release(dtp, P);
1234 dt_print_memory(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr)
1236 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET);
1237 size_t nbytes = *((uintptr_t *) addr);
1239 return (dt_print_bytes(dtp, fp, addr + sizeof(uintptr_t),
1240 nbytes, 50, quiet, 1));
1243 typedef struct dt_type_cbdata {
1245 dtrace_typeinfo_t dtt;
1256 static int dt_print_type_data(dt_type_cbdata_t *, ctf_id_t);
1259 dt_print_type_member(const char *name, ctf_id_t type, ulong_t off, void *arg)
1261 dt_type_cbdata_t cbdata;
1262 dt_type_cbdata_t *cbdatap = arg;
1265 if ((ssz = ctf_type_size(cbdatap->dtt.dtt_ctfp, type)) <= 0)
1273 cbdata.addrend = cbdata.addr + ssz;
1275 return (dt_print_type_data(&cbdata, type));
1279 dt_print_type_width(const char *name, ctf_id_t type, ulong_t off, void *arg)
1281 char buf[DT_TYPE_NAMELEN];
1283 dt_type_cbdata_t *cbdatap = arg;
1284 size_t sz = strlen(name);
1286 ctf_type_name(cbdatap->dtt.dtt_ctfp, type, buf, sizeof (buf));
1288 if ((p = strchr(buf, '[')) != NULL)
1295 if (sz > cbdatap->name_width)
1296 cbdatap->name_width = sz;
1300 if (sz > cbdatap->type_width)
1301 cbdatap->type_width = sz;
1307 dt_print_type_data(dt_type_cbdata_t *cbdatap, ctf_id_t type)
1309 caddr_t addr = cbdatap->addr;
1310 caddr_t addrend = cbdatap->addrend;
1311 char buf[DT_TYPE_NAMELEN];
1314 uint_t kind = ctf_type_kind(cbdatap->dtt.dtt_ctfp, type);
1315 ssize_t ssz = ctf_type_size(cbdatap->dtt.dtt_ctfp, type);
1317 ctf_type_name(cbdatap->dtt.dtt_ctfp, type, buf, sizeof (buf));
1319 if ((p = strchr(buf, '[')) != NULL)
1324 if (cbdatap->f_type) {
1325 int type_width = roundup(cbdatap->type_width + 1, 4);
1326 int name_width = roundup(cbdatap->name_width + 1, 4);
1328 name_width -= strlen(cbdatap->name);
1330 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s%-*s%s%-*s = ",cbdatap->indent * 4,"",type_width,buf,cbdatap->name,name_width,p);
1333 while (addr < addrend) {
1334 dt_type_cbdata_t cbdata;
1335 ctf_arinfo_t arinfo;
1342 cbdata.addrend = addr + ssz;
1345 cbdata.type_width = 0;
1346 cbdata.name_width = 0;
1349 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s", cbdatap->indent * 4,"");
1353 if (ctf_type_encoding(cbdatap->dtt.dtt_ctfp, type, &cte) != 0)
1355 if ((cte.cte_format & CTF_INT_SIGNED) != 0)
1356 switch (cte.cte_bits) {
1358 if (isprint(*((char *) vp)))
1359 dt_printf(cbdatap->dtp, cbdatap->fp, "'%c', ", *((char *) vp));
1360 dt_printf(cbdatap->dtp, cbdatap->fp, "%d (0x%x);\n", *((char *) vp), *((char *) vp));
1363 dt_printf(cbdatap->dtp, cbdatap->fp, "%hd (0x%hx);\n", *((short *) vp), *((u_short *) vp));
1366 dt_printf(cbdatap->dtp, cbdatap->fp, "%d (0x%x);\n", *((int *) vp), *((u_int *) vp));
1369 dt_printf(cbdatap->dtp, cbdatap->fp, "%jd (0x%jx);\n", *((long long *) vp), *((unsigned long long *) vp));
1372 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_INTEGER: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits);
1376 switch (cte.cte_bits) {
1378 dt_printf(cbdatap->dtp, cbdatap->fp, "%u (0x%x);\n", *((uint8_t *) vp) & 0xff, *((uint8_t *) vp) & 0xff);
1381 dt_printf(cbdatap->dtp, cbdatap->fp, "%hu (0x%hx);\n", *((u_short *) vp), *((u_short *) vp));
1384 dt_printf(cbdatap->dtp, cbdatap->fp, "%u (0x%x);\n", *((u_int *) vp), *((u_int *) vp));
1387 dt_printf(cbdatap->dtp, cbdatap->fp, "%ju (0x%jx);\n", *((unsigned long long *) vp), *((unsigned long long *) vp));
1390 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_INTEGER: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits);
1395 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_FLOAT: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits);
1398 dt_printf(cbdatap->dtp, cbdatap->fp, "%p;\n", *((void **) addr));
1401 if (ctf_array_info(cbdatap->dtt.dtt_ctfp, type, &arinfo) != 0)
1403 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n%*s",cbdata.indent * 4,"");
1404 dt_print_type_data(&cbdata, arinfo.ctr_contents);
1405 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,"");
1407 case CTF_K_FUNCTION:
1408 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_FUNCTION:\n");
1412 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type,
1413 dt_print_type_width, &cbdata) != 0)
1415 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n");
1416 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type,
1417 dt_print_type_member, &cbdata) != 0)
1419 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,"");
1423 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type,
1424 dt_print_type_width, &cbdata) != 0)
1426 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n");
1427 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type,
1428 dt_print_type_member, &cbdata) != 0)
1430 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,"");
1433 dt_printf(cbdatap->dtp, cbdatap->fp, "%s;\n", ctf_enum_name(cbdatap->dtt.dtt_ctfp, type, *((int *) vp)));
1436 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type));
1438 case CTF_K_VOLATILE:
1439 if (cbdatap->f_type)
1440 dt_printf(cbdatap->dtp, cbdatap->fp, "volatile ");
1441 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type));
1444 if (cbdatap->f_type)
1445 dt_printf(cbdatap->dtp, cbdatap->fp, "const ");
1446 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type));
1448 case CTF_K_RESTRICT:
1449 if (cbdatap->f_type)
1450 dt_printf(cbdatap->dtp, cbdatap->fp, "restrict ");
1451 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type));
1465 dt_print_type(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr)
1469 dtrace_typeinfo_t dtt;
1470 dt_type_cbdata_t cbdata;
1472 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET);
1476 dt_printf(dtp, fp, "\n");
1478 /* Get the total number of bytes of data buffered. */
1479 size_t nbytes = *((uintptr_t *) addr);
1480 addr += sizeof(uintptr_t);
1483 * Get the size of the type so that we can check that it matches
1484 * the CTF data we look up and so that we can figure out how many
1485 * type elements are buffered.
1487 size_t typs = *((uintptr_t *) addr);
1488 addr += sizeof(uintptr_t);
1491 * Point to the type string in the buffer. Get it's string
1492 * length and round it up to become the offset to the start
1493 * of the buffered type data which we would like to be aligned
1496 char *strp = (char *) addr;
1497 int offset = roundup(strlen(strp) + 1, sizeof(uintptr_t));
1500 * The type string might have a format such as 'int [20]'.
1501 * Check if there is an array dimension present.
1503 if ((p = strchr(strp, '[')) != NULL) {
1504 /* Strip off the array dimension. */
1507 for (; *p != '\0' && *p != ']'; p++)
1508 num = num * 10 + *p - '0';
1510 /* No array dimension, so default. */
1513 /* Lookup the CTF type from the type string. */
1514 if (dtrace_lookup_by_type(dtp, DTRACE_OBJ_EVERY, strp, &dtt) < 0)
1517 /* Offset the buffer address to the start of the data... */
1520 ssz = ctf_type_size(dtt.dtt_ctfp, dtt.dtt_type);
1523 printf("Expected type size from buffer (%lu) to match type size looked up now (%ld)\n", (u_long) typs, (long) ssz);
1531 cbdata.addrend = addr + nbytes;
1534 cbdata.type_width = 0;
1535 cbdata.name_width = 0;
1538 return (dt_print_type_data(&cbdata, dtt.dtt_type));
1542 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1544 /* LINTED - alignment */
1545 uint64_t pc = *((uint64_t *)addr);
1546 dtrace_syminfo_t dts;
1548 char c[PATH_MAX * 2];
1553 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) {
1554 (void) snprintf(c, sizeof (c), "%s`%s",
1555 dts.dts_object, dts.dts_name);
1558 * We'll repeat the lookup, but this time we'll specify a
1559 * NULL GElf_Sym -- indicating that we're only interested in
1560 * the containing module.
1562 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1563 (void) snprintf(c, sizeof (c), "%s`0x%llx",
1564 dts.dts_object, (u_longlong_t)pc);
1566 (void) snprintf(c, sizeof (c), "0x%llx",
1571 if (dt_printf(dtp, fp, format, c) < 0)
1578 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr)
1580 /* LINTED - alignment */
1581 uint64_t pc = *((uint64_t *)addr);
1582 dtrace_syminfo_t dts;
1583 char c[PATH_MAX * 2];
1588 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) {
1589 (void) snprintf(c, sizeof (c), "%s", dts.dts_object);
1591 (void) snprintf(c, sizeof (c), "0x%llx", (u_longlong_t)pc);
1594 if (dt_printf(dtp, fp, format, c) < 0)
1600 typedef struct dt_normal {
1601 dtrace_aggvarid_t dtnd_id;
1602 uint64_t dtnd_normal;
1606 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1608 dt_normal_t *normal = arg;
1609 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1610 dtrace_aggvarid_t id = normal->dtnd_id;
1612 if (agg->dtagd_nrecs == 0)
1613 return (DTRACE_AGGWALK_NEXT);
1615 if (agg->dtagd_varid != id)
1616 return (DTRACE_AGGWALK_NEXT);
1618 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal;
1619 return (DTRACE_AGGWALK_NORMALIZE);
1623 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1629 * We (should) have two records: the aggregation ID followed by the
1630 * normalization value.
1632 addr = base + rec->dtrd_offset;
1634 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1635 return (dt_set_errno(dtp, EDT_BADNORMAL));
1637 /* LINTED - alignment */
1638 normal.dtnd_id = *((dtrace_aggvarid_t *)addr);
1641 if (rec->dtrd_action != DTRACEACT_LIBACT)
1642 return (dt_set_errno(dtp, EDT_BADNORMAL));
1644 if (rec->dtrd_arg != DT_ACT_NORMALIZE)
1645 return (dt_set_errno(dtp, EDT_BADNORMAL));
1647 addr = base + rec->dtrd_offset;
1649 switch (rec->dtrd_size) {
1650 case sizeof (uint64_t):
1651 /* LINTED - alignment */
1652 normal.dtnd_normal = *((uint64_t *)addr);
1654 case sizeof (uint32_t):
1655 /* LINTED - alignment */
1656 normal.dtnd_normal = *((uint32_t *)addr);
1658 case sizeof (uint16_t):
1659 /* LINTED - alignment */
1660 normal.dtnd_normal = *((uint16_t *)addr);
1662 case sizeof (uint8_t):
1663 normal.dtnd_normal = *((uint8_t *)addr);
1666 return (dt_set_errno(dtp, EDT_BADNORMAL));
1669 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal);
1675 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg)
1677 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1678 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1680 if (agg->dtagd_nrecs == 0)
1681 return (DTRACE_AGGWALK_NEXT);
1683 if (agg->dtagd_varid != id)
1684 return (DTRACE_AGGWALK_NEXT);
1686 return (DTRACE_AGGWALK_DENORMALIZE);
1690 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg)
1692 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1693 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg);
1695 if (agg->dtagd_nrecs == 0)
1696 return (DTRACE_AGGWALK_NEXT);
1698 if (agg->dtagd_varid != id)
1699 return (DTRACE_AGGWALK_NEXT);
1701 return (DTRACE_AGGWALK_CLEAR);
1704 typedef struct dt_trunc {
1705 dtrace_aggvarid_t dttd_id;
1706 uint64_t dttd_remaining;
1710 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg)
1712 dt_trunc_t *trunc = arg;
1713 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1714 dtrace_aggvarid_t id = trunc->dttd_id;
1716 if (agg->dtagd_nrecs == 0)
1717 return (DTRACE_AGGWALK_NEXT);
1719 if (agg->dtagd_varid != id)
1720 return (DTRACE_AGGWALK_NEXT);
1722 if (trunc->dttd_remaining == 0)
1723 return (DTRACE_AGGWALK_REMOVE);
1725 trunc->dttd_remaining--;
1726 return (DTRACE_AGGWALK_NEXT);
1730 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec)
1735 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *);
1738 * We (should) have two records: the aggregation ID followed by the
1739 * number of aggregation entries after which the aggregation is to be
1742 addr = base + rec->dtrd_offset;
1744 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t))
1745 return (dt_set_errno(dtp, EDT_BADTRUNC));
1747 /* LINTED - alignment */
1748 trunc.dttd_id = *((dtrace_aggvarid_t *)addr);
1751 if (rec->dtrd_action != DTRACEACT_LIBACT)
1752 return (dt_set_errno(dtp, EDT_BADTRUNC));
1754 if (rec->dtrd_arg != DT_ACT_TRUNC)
1755 return (dt_set_errno(dtp, EDT_BADTRUNC));
1757 addr = base + rec->dtrd_offset;
1759 switch (rec->dtrd_size) {
1760 case sizeof (uint64_t):
1761 /* LINTED - alignment */
1762 remaining = *((int64_t *)addr);
1764 case sizeof (uint32_t):
1765 /* LINTED - alignment */
1766 remaining = *((int32_t *)addr);
1768 case sizeof (uint16_t):
1769 /* LINTED - alignment */
1770 remaining = *((int16_t *)addr);
1772 case sizeof (uint8_t):
1773 remaining = *((int8_t *)addr);
1776 return (dt_set_errno(dtp, EDT_BADNORMAL));
1779 if (remaining < 0) {
1780 func = dtrace_aggregate_walk_valsorted;
1781 remaining = -remaining;
1783 func = dtrace_aggregate_walk_valrevsorted;
1786 assert(remaining >= 0);
1787 trunc.dttd_remaining = remaining;
1789 (void) func(dtp, dt_trunc_agg, &trunc);
1795 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec,
1796 caddr_t addr, size_t size, uint64_t normal)
1799 dtrace_actkind_t act = rec->dtrd_action;
1802 case DTRACEACT_STACK:
1803 return (dt_print_stack(dtp, fp, NULL, addr,
1804 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg));
1806 case DTRACEACT_USTACK:
1807 case DTRACEACT_JSTACK:
1808 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg));
1810 case DTRACEACT_USYM:
1811 case DTRACEACT_UADDR:
1812 return (dt_print_usym(dtp, fp, addr, act));
1814 case DTRACEACT_UMOD:
1815 return (dt_print_umod(dtp, fp, NULL, addr));
1818 return (dt_print_sym(dtp, fp, NULL, addr));
1821 return (dt_print_mod(dtp, fp, NULL, addr));
1823 case DTRACEAGG_QUANTIZE:
1824 return (dt_print_quantize(dtp, fp, addr, size, normal));
1826 case DTRACEAGG_LQUANTIZE:
1827 return (dt_print_lquantize(dtp, fp, addr, size, normal));
1829 case DTRACEAGG_LLQUANTIZE:
1830 return (dt_print_llquantize(dtp, fp, addr, size, normal));
1833 return (dt_print_average(dtp, fp, addr, size, normal));
1835 case DTRACEAGG_STDDEV:
1836 return (dt_print_stddev(dtp, fp, addr, size, normal));
1843 case sizeof (uint64_t):
1844 err = dt_printf(dtp, fp, " %16lld",
1845 /* LINTED - alignment */
1846 (long long)*((uint64_t *)addr) / normal);
1848 case sizeof (uint32_t):
1849 /* LINTED - alignment */
1850 err = dt_printf(dtp, fp, " %8d", *((uint32_t *)addr) /
1853 case sizeof (uint16_t):
1854 /* LINTED - alignment */
1855 err = dt_printf(dtp, fp, " %5d", *((uint16_t *)addr) /
1858 case sizeof (uint8_t):
1859 err = dt_printf(dtp, fp, " %3d", *((uint8_t *)addr) /
1863 err = dt_print_bytes(dtp, fp, addr, size, 50, 0, 0);
1871 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg)
1874 dt_print_aggdata_t *pd = arg;
1875 const dtrace_aggdata_t *aggdata = aggsdata[0];
1876 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1877 FILE *fp = pd->dtpa_fp;
1878 dtrace_hdl_t *dtp = pd->dtpa_dtp;
1879 dtrace_recdesc_t *rec;
1880 dtrace_actkind_t act;
1885 * Iterate over each record description in the key, printing the traced
1886 * data, skipping the first datum (the tuple member created by the
1889 for (i = 1; i < agg->dtagd_nrecs; i++) {
1890 rec = &agg->dtagd_rec[i];
1891 act = rec->dtrd_action;
1892 addr = aggdata->dtada_data + rec->dtrd_offset;
1893 size = rec->dtrd_size;
1895 if (DTRACEACT_ISAGG(act)) {
1900 if (dt_print_datum(dtp, fp, rec, addr, size, 1) < 0)
1903 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
1904 DTRACE_BUFDATA_AGGKEY) < 0)
1908 assert(aggact != 0);
1910 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) {
1913 aggdata = aggsdata[i];
1914 agg = aggdata->dtada_desc;
1915 rec = &agg->dtagd_rec[aggact];
1916 act = rec->dtrd_action;
1917 addr = aggdata->dtada_data + rec->dtrd_offset;
1918 size = rec->dtrd_size;
1920 assert(DTRACEACT_ISAGG(act));
1921 normal = aggdata->dtada_normal;
1923 if (dt_print_datum(dtp, fp, rec, addr, size, normal) < 0)
1926 if (dt_buffered_flush(dtp, NULL, rec, aggdata,
1927 DTRACE_BUFDATA_AGGVAL) < 0)
1930 if (!pd->dtpa_allunprint)
1931 agg->dtagd_flags |= DTRACE_AGD_PRINTED;
1934 if (dt_printf(dtp, fp, "\n") < 0)
1937 if (dt_buffered_flush(dtp, NULL, NULL, aggdata,
1938 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0)
1945 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg)
1947 dt_print_aggdata_t *pd = arg;
1948 dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1949 dtrace_aggvarid_t aggvarid = pd->dtpa_id;
1951 if (pd->dtpa_allunprint) {
1952 if (agg->dtagd_flags & DTRACE_AGD_PRINTED)
1956 * If we're not printing all unprinted aggregations, then the
1957 * aggregation variable ID denotes a specific aggregation
1958 * variable that we should print -- skip any other aggregations
1959 * that we encounter.
1961 if (agg->dtagd_nrecs == 0)
1964 if (aggvarid != agg->dtagd_varid)
1968 return (dt_print_aggs(&aggdata, 1, arg));
1972 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data,
1973 const char *option, const char *value)
1978 dtrace_setoptdata_t optdata;
1980 bzero(&optdata, sizeof (optdata));
1981 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval);
1983 if (dtrace_setopt(dtp, option, value) == 0) {
1984 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval);
1985 optdata.dtsda_probe = data;
1986 optdata.dtsda_option = option;
1987 optdata.dtsda_handle = dtp;
1989 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0)
1995 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp));
1996 len = strlen(option) + strlen(value) + strlen(errstr) + 80;
1999 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n",
2000 option, value, errstr);
2002 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0)
2009 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu,
2010 dtrace_bufdesc_t *buf, boolean_t just_one,
2011 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg)
2015 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET);
2016 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET);
2018 uint64_t tracememsize = 0;
2019 dtrace_probedata_t data;
2022 bzero(&data, sizeof (data));
2023 data.dtpda_handle = dtp;
2024 data.dtpda_cpu = cpu;
2025 data.dtpda_flow = dtp->dt_flow;
2026 data.dtpda_indent = dtp->dt_indent;
2027 data.dtpda_prefix = dtp->dt_prefix;
2029 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) {
2030 dtrace_eprobedesc_t *epd;
2033 * We're guaranteed to have an ID.
2035 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs);
2037 if (id == DTRACE_EPIDNONE) {
2039 * This is filler to assure proper alignment of the
2040 * next record; we simply ignore it.
2042 offs += sizeof (id);
2046 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc,
2047 &data.dtpda_pdesc)) != 0)
2050 epd = data.dtpda_edesc;
2051 data.dtpda_data = buf->dtbd_data + offs;
2053 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) {
2054 rval = dt_handle(dtp, &data);
2056 if (rval == DTRACE_CONSUME_NEXT)
2059 if (rval == DTRACE_CONSUME_ERROR)
2064 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid,
2067 rval = (*efunc)(&data, arg);
2070 if (data.dtpda_flow == DTRACEFLOW_ENTRY)
2071 data.dtpda_indent += 2;
2074 if (rval == DTRACE_CONSUME_NEXT)
2077 if (rval == DTRACE_CONSUME_ABORT)
2078 return (dt_set_errno(dtp, EDT_DIRABORT));
2080 if (rval != DTRACE_CONSUME_THIS)
2081 return (dt_set_errno(dtp, EDT_BADRVAL));
2083 for (i = 0; i < epd->dtepd_nrecs; i++) {
2085 dtrace_recdesc_t *rec = &epd->dtepd_rec[i];
2086 dtrace_actkind_t act = rec->dtrd_action;
2088 data.dtpda_data = buf->dtbd_data + offs +
2090 addr = data.dtpda_data;
2092 if (act == DTRACEACT_LIBACT) {
2093 uint64_t arg = rec->dtrd_arg;
2094 dtrace_aggvarid_t id;
2098 /* LINTED - alignment */
2099 id = *((dtrace_aggvarid_t *)addr);
2100 (void) dtrace_aggregate_walk(dtp,
2104 case DT_ACT_DENORMALIZE:
2105 /* LINTED - alignment */
2106 id = *((dtrace_aggvarid_t *)addr);
2107 (void) dtrace_aggregate_walk(dtp,
2108 dt_denormalize_agg, &id);
2111 case DT_ACT_FTRUNCATE:
2116 (void) ftruncate(fileno(fp), 0);
2117 (void) fseeko(fp, 0, SEEK_SET);
2120 case DT_ACT_NORMALIZE:
2121 if (i == epd->dtepd_nrecs - 1)
2122 return (dt_set_errno(dtp,
2125 if (dt_normalize(dtp,
2126 buf->dtbd_data + offs, rec) != 0)
2132 case DT_ACT_SETOPT: {
2133 uint64_t *opts = dtp->dt_options;
2134 dtrace_recdesc_t *valrec;
2139 if (i == epd->dtepd_nrecs - 1) {
2140 return (dt_set_errno(dtp,
2144 valrec = &epd->dtepd_rec[++i];
2145 valsize = valrec->dtrd_size;
2147 if (valrec->dtrd_action != act ||
2148 valrec->dtrd_arg != arg) {
2149 return (dt_set_errno(dtp,
2153 if (valsize > sizeof (uint64_t)) {
2154 val = buf->dtbd_data + offs +
2155 valrec->dtrd_offset;
2160 rv = dt_setopt(dtp, &data, addr, val);
2165 flow = (opts[DTRACEOPT_FLOWINDENT] !=
2167 quiet = (opts[DTRACEOPT_QUIET] !=
2174 if (i == epd->dtepd_nrecs - 1)
2175 return (dt_set_errno(dtp,
2179 buf->dtbd_data + offs, rec) != 0)
2190 if (act == DTRACEACT_TRACEMEM_DYNSIZE &&
2191 rec->dtrd_size == sizeof (uint64_t)) {
2192 /* LINTED - alignment */
2193 tracememsize = *((unsigned long long *)addr);
2197 rval = (*rfunc)(&data, rec, arg);
2199 if (rval == DTRACE_CONSUME_NEXT)
2202 if (rval == DTRACE_CONSUME_ABORT)
2203 return (dt_set_errno(dtp, EDT_DIRABORT));
2205 if (rval != DTRACE_CONSUME_THIS)
2206 return (dt_set_errno(dtp, EDT_BADRVAL));
2208 if (act == DTRACEACT_STACK) {
2209 int depth = rec->dtrd_arg;
2211 if (dt_print_stack(dtp, fp, NULL, addr, depth,
2212 rec->dtrd_size / depth) < 0)
2217 if (act == DTRACEACT_USTACK ||
2218 act == DTRACEACT_JSTACK) {
2219 if (dt_print_ustack(dtp, fp, NULL,
2220 addr, rec->dtrd_arg) < 0)
2225 if (act == DTRACEACT_SYM) {
2226 if (dt_print_sym(dtp, fp, NULL, addr) < 0)
2231 if (act == DTRACEACT_MOD) {
2232 if (dt_print_mod(dtp, fp, NULL, addr) < 0)
2237 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) {
2238 if (dt_print_usym(dtp, fp, addr, act) < 0)
2243 if (act == DTRACEACT_UMOD) {
2244 if (dt_print_umod(dtp, fp, NULL, addr) < 0)
2249 if (act == DTRACEACT_PRINTM) {
2250 if (dt_print_memory(dtp, fp, addr) < 0)
2255 if (act == DTRACEACT_PRINTT) {
2256 if (dt_print_type(dtp, fp, addr) < 0)
2261 if (DTRACEACT_ISPRINTFLIKE(act)) {
2263 int (*func)(dtrace_hdl_t *, FILE *, void *,
2264 const dtrace_probedata_t *,
2265 const dtrace_recdesc_t *, uint_t,
2266 const void *buf, size_t);
2268 if ((fmtdata = dt_format_lookup(dtp,
2269 rec->dtrd_format)) == NULL)
2273 case DTRACEACT_PRINTF:
2274 func = dtrace_fprintf;
2276 case DTRACEACT_PRINTA:
2277 func = dtrace_fprinta;
2279 case DTRACEACT_SYSTEM:
2280 func = dtrace_system;
2282 case DTRACEACT_FREOPEN:
2283 func = dtrace_freopen;
2287 n = (*func)(dtp, fp, fmtdata, &data,
2288 rec, epd->dtepd_nrecs - i,
2289 (uchar_t *)buf->dtbd_data + offs,
2290 buf->dtbd_size - offs);
2293 return (-1); /* errno is set for us */
2301 * If this is a DIF expression, and the record has a
2302 * format set, this indicates we have a CTF type name
2303 * associated with the data and we should try to print
2306 if (act == DTRACEACT_DIFEXPR) {
2307 const char *strdata = dt_strdata_lookup(dtp,
2309 if (strdata != NULL) {
2310 n = dtrace_print(dtp, fp, strdata,
2311 addr, rec->dtrd_size);
2314 * dtrace_print() will return -1 on
2315 * error, or return the number of bytes
2316 * consumed. It will return 0 if the
2317 * type couldn't be determined, and we
2318 * should fall through to the normal
2330 if (act == DTRACEACT_PRINTA) {
2331 dt_print_aggdata_t pd;
2332 dtrace_aggvarid_t *aggvars;
2333 int j, naggvars = 0;
2334 size_t size = ((epd->dtepd_nrecs - i) *
2335 sizeof (dtrace_aggvarid_t));
2337 if ((aggvars = dt_alloc(dtp, size)) == NULL)
2341 * This might be a printa() with multiple
2342 * aggregation variables. We need to scan
2343 * forward through the records until we find
2344 * a record from a different statement.
2346 for (j = i; j < epd->dtepd_nrecs; j++) {
2347 dtrace_recdesc_t *nrec;
2350 nrec = &epd->dtepd_rec[j];
2352 if (nrec->dtrd_uarg != rec->dtrd_uarg)
2355 if (nrec->dtrd_action != act) {
2356 return (dt_set_errno(dtp,
2360 naddr = buf->dtbd_data + offs +
2363 aggvars[naggvars++] =
2364 /* LINTED - alignment */
2365 *((dtrace_aggvarid_t *)naddr);
2369 bzero(&pd, sizeof (pd));
2373 assert(naggvars >= 1);
2375 if (naggvars == 1) {
2376 pd.dtpa_id = aggvars[0];
2377 dt_free(dtp, aggvars);
2379 if (dt_printf(dtp, fp, "\n") < 0 ||
2380 dtrace_aggregate_walk_sorted(dtp,
2381 dt_print_agg, &pd) < 0)
2386 if (dt_printf(dtp, fp, "\n") < 0 ||
2387 dtrace_aggregate_walk_joined(dtp, aggvars,
2388 naggvars, dt_print_aggs, &pd) < 0) {
2389 dt_free(dtp, aggvars);
2393 dt_free(dtp, aggvars);
2397 if (act == DTRACEACT_TRACEMEM) {
2398 if (tracememsize == 0 ||
2399 tracememsize > rec->dtrd_size) {
2400 tracememsize = rec->dtrd_size;
2403 n = dt_print_bytes(dtp, fp, addr,
2404 tracememsize, 33, quiet, 1);
2414 switch (rec->dtrd_size) {
2415 case sizeof (uint64_t):
2416 n = dt_printf(dtp, fp,
2417 quiet ? "%lld" : " %16lld",
2418 /* LINTED - alignment */
2419 *((unsigned long long *)addr));
2421 case sizeof (uint32_t):
2422 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d",
2423 /* LINTED - alignment */
2424 *((uint32_t *)addr));
2426 case sizeof (uint16_t):
2427 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d",
2428 /* LINTED - alignment */
2429 *((uint16_t *)addr));
2431 case sizeof (uint8_t):
2432 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d",
2433 *((uint8_t *)addr));
2436 n = dt_print_bytes(dtp, fp, addr,
2437 rec->dtrd_size, 33, quiet, 0);
2442 return (-1); /* errno is set for us */
2445 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0)
2446 return (-1); /* errno is set for us */
2450 * Call the record callback with a NULL record to indicate
2451 * that we're done processing this EPID.
2453 rval = (*rfunc)(&data, NULL, arg);
2455 offs += epd->dtepd_size;
2456 dtp->dt_last_epid = id;
2458 buf->dtbd_oldest = offs;
2463 dtp->dt_flow = data.dtpda_flow;
2464 dtp->dt_indent = data.dtpda_indent;
2465 dtp->dt_prefix = data.dtpda_prefix;
2467 if ((drops = buf->dtbd_drops) == 0)
2471 * Explicitly zero the drops to prevent us from processing them again.
2473 buf->dtbd_drops = 0;
2475 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops));
2479 * Reduce memory usage by shrinking the buffer if it's no more than half full.
2480 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is
2481 * only 4-byte aligned.
2484 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize)
2486 uint64_t used = buf->dtbd_size - buf->dtbd_oldest;
2487 if (used < cursize / 2) {
2488 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2489 char *newdata = dt_alloc(dtp, used + misalign);
2490 if (newdata == NULL)
2492 bzero(newdata, misalign);
2493 bcopy(buf->dtbd_data + buf->dtbd_oldest,
2494 newdata + misalign, used);
2495 dt_free(dtp, buf->dtbd_data);
2496 buf->dtbd_oldest = misalign;
2497 buf->dtbd_size = used + misalign;
2498 buf->dtbd_data = newdata;
2503 * If the ring buffer has wrapped, the data is not in order. Rearrange it
2504 * so that it is. Note, we need to preserve the alignment of the data at
2505 * dtbd_oldest, which is only 4-byte aligned.
2508 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2511 char *newdata, *ndp;
2513 if (buf->dtbd_oldest == 0)
2516 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1);
2517 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign);
2519 if (newdata == NULL)
2522 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1)));
2524 bzero(ndp, misalign);
2527 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp,
2528 buf->dtbd_size - buf->dtbd_oldest);
2529 ndp += buf->dtbd_size - buf->dtbd_oldest;
2531 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest);
2533 dt_free(dtp, buf->dtbd_data);
2534 buf->dtbd_oldest = 0;
2535 buf->dtbd_data = newdata;
2536 buf->dtbd_size += misalign;
2542 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf)
2544 dt_free(dtp, buf->dtbd_data);
2549 * Returns 0 on success, in which case *cbp will be filled in if we retrieved
2550 * data, or NULL if there is no data for this CPU.
2551 * Returns -1 on failure and sets dt_errno.
2554 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp)
2556 dtrace_optval_t size;
2557 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf));
2563 (void) dtrace_getopt(dtp, "bufsize", &size);
2564 buf->dtbd_data = dt_alloc(dtp, size);
2565 if (buf->dtbd_data == NULL) {
2569 buf->dtbd_size = size;
2570 buf->dtbd_cpu = cpu;
2573 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) {
2575 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &buf) == -1) {
2578 * If we failed with ENOENT, it may be because the
2579 * CPU was unconfigured -- this is okay. Any other
2580 * error, however, is unexpected.
2582 if (errno == ENOENT) {
2586 rval = dt_set_errno(dtp, errno);
2588 dt_put_buf(dtp, buf);
2592 error = dt_unring_buf(dtp, buf);
2594 dt_put_buf(dtp, buf);
2597 dt_realloc_buf(dtp, buf, size);
2603 typedef struct dt_begin {
2604 dtrace_consume_probe_f *dtbgn_probefunc;
2605 dtrace_consume_rec_f *dtbgn_recfunc;
2607 dtrace_handle_err_f *dtbgn_errhdlr;
2609 int dtbgn_beginonly;
2613 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg)
2615 dt_begin_t *begin = arg;
2616 dtrace_probedesc_t *pd = data->dtpda_pdesc;
2618 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2619 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2621 if (begin->dtbgn_beginonly) {
2623 return (DTRACE_CONSUME_NEXT);
2626 return (DTRACE_CONSUME_NEXT);
2630 * We have a record that we're interested in. Now call the underlying
2633 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg));
2637 dt_consume_begin_record(const dtrace_probedata_t *data,
2638 const dtrace_recdesc_t *rec, void *arg)
2640 dt_begin_t *begin = arg;
2642 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg));
2646 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg)
2648 dt_begin_t *begin = (dt_begin_t *)arg;
2649 dtrace_probedesc_t *pd = data->dteda_pdesc;
2651 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0);
2652 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0);
2654 if (begin->dtbgn_beginonly) {
2656 return (DTRACE_HANDLE_OK);
2659 return (DTRACE_HANDLE_OK);
2662 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg));
2666 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp,
2667 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2670 * There's this idea that the BEGIN probe should be processed before
2671 * everything else, and that the END probe should be processed after
2672 * anything else. In the common case, this is pretty easy to deal
2673 * with. However, a situation may arise where the BEGIN enabling and
2674 * END enabling are on the same CPU, and some enabling in the middle
2675 * occurred on a different CPU. To deal with this (blech!) we need to
2676 * consume the BEGIN buffer up until the end of the BEGIN probe, and
2677 * then set it aside. We will then process every other CPU, and then
2678 * we'll return to the BEGIN CPU and process the rest of the data
2679 * (which will inevitably include the END probe, if any). Making this
2680 * even more complicated (!) is the library's ERROR enabling. Because
2681 * this enabling is processed before we even get into the consume call
2682 * back, any ERROR firing would result in the library's ERROR enabling
2683 * being processed twice -- once in our first pass (for BEGIN probes),
2684 * and again in our second pass (for everything but BEGIN probes). To
2685 * deal with this, we interpose on the ERROR handler to assure that we
2686 * only process ERROR enablings induced by BEGIN enablings in the
2687 * first pass, and that we only process ERROR enablings _not_ induced
2688 * by BEGIN enablings in the second pass.
2692 processorid_t cpu = dtp->dt_beganon;
2694 static int max_ncpus;
2695 dtrace_bufdesc_t *buf;
2697 dtp->dt_beganon = -1;
2699 if (dt_get_buf(dtp, cpu, &buf) != 0)
2704 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) {
2706 * This is the simple case. We're either not stopped, or if
2707 * we are, we actually processed any END probes on another
2708 * CPU. We can simply consume this buffer and return.
2710 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2712 dt_put_buf(dtp, buf);
2716 begin.dtbgn_probefunc = pf;
2717 begin.dtbgn_recfunc = rf;
2718 begin.dtbgn_arg = arg;
2719 begin.dtbgn_beginonly = 1;
2722 * We need to interpose on the ERROR handler to be sure that we
2723 * only process ERRORs induced by BEGIN.
2725 begin.dtbgn_errhdlr = dtp->dt_errhdlr;
2726 begin.dtbgn_errarg = dtp->dt_errarg;
2727 dtp->dt_errhdlr = dt_consume_begin_error;
2728 dtp->dt_errarg = &begin;
2730 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2731 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2733 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2734 dtp->dt_errarg = begin.dtbgn_errarg;
2737 dt_put_buf(dtp, buf);
2742 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2744 for (i = 0; i < max_ncpus; i++) {
2745 dtrace_bufdesc_t *nbuf;
2749 if (dt_get_buf(dtp, i, &nbuf) != 0) {
2750 dt_put_buf(dtp, buf);
2756 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE,
2758 dt_put_buf(dtp, nbuf);
2760 dt_put_buf(dtp, buf);
2766 * Okay -- we're done with the other buffers. Now we want to
2767 * reconsume the first buffer -- but this time we're looking for
2768 * everything _but_ BEGIN. And of course, in order to only consume
2769 * those ERRORs _not_ associated with BEGIN, we need to reinstall our
2770 * ERROR interposition function...
2772 begin.dtbgn_beginonly = 0;
2774 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr);
2775 assert(begin.dtbgn_errarg == dtp->dt_errarg);
2776 dtp->dt_errhdlr = dt_consume_begin_error;
2777 dtp->dt_errarg = &begin;
2779 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE,
2780 dt_consume_begin_probe, dt_consume_begin_record, &begin);
2782 dtp->dt_errhdlr = begin.dtbgn_errhdlr;
2783 dtp->dt_errarg = begin.dtbgn_errarg;
2790 dt_buf_oldest(void *elem, void *arg)
2792 dtrace_bufdesc_t *buf = elem;
2793 size_t offs = buf->dtbd_oldest;
2795 while (offs < buf->dtbd_size) {
2796 dtrace_rechdr_t *dtrh =
2797 /* LINTED - alignment */
2798 (dtrace_rechdr_t *)(buf->dtbd_data + offs);
2799 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2800 offs += sizeof (dtrace_epid_t);
2802 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh));
2806 /* There are no records left; use the time the buffer was retrieved. */
2807 return (buf->dtbd_timestamp);
2811 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp,
2812 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg)
2814 dtrace_optval_t size;
2815 static int max_ncpus;
2817 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE];
2818 hrtime_t now = gethrtime();
2820 if (dtp->dt_lastswitch != 0) {
2821 if (now - dtp->dt_lastswitch < interval)
2824 dtp->dt_lastswitch += interval;
2826 dtp->dt_lastswitch = now;
2829 if (!dtp->dt_active)
2830 return (dt_set_errno(dtp, EINVAL));
2833 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
2836 pf = (dtrace_consume_probe_f *)dt_nullprobe;
2839 rf = (dtrace_consume_rec_f *)dt_nullrec;
2841 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) {
2843 * The output will not be in the order it was traced. Rather,
2844 * we will consume all of the data from each CPU's buffer in
2845 * turn. We apply special handling for the records from BEGIN
2846 * and END probes so that they are consumed first and last,
2849 * If we have just begun, we want to first process the CPU that
2850 * executed the BEGIN probe (if any).
2852 if (dtp->dt_active && dtp->dt_beganon != -1 &&
2853 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0)
2856 for (i = 0; i < max_ncpus; i++) {
2857 dtrace_bufdesc_t *buf;
2860 * If we have stopped, we want to process the CPU on
2861 * which the END probe was processed only _after_ we
2862 * have processed everything else.
2864 if (dtp->dt_stopped && (i == dtp->dt_endedon))
2867 if (dt_get_buf(dtp, i, &buf) != 0)
2874 dtp->dt_prefix = NULL;
2875 rval = dt_consume_cpu(dtp, fp, i,
2876 buf, B_FALSE, pf, rf, arg);
2877 dt_put_buf(dtp, buf);
2881 if (dtp->dt_stopped) {
2882 dtrace_bufdesc_t *buf;
2884 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0)
2889 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon,
2890 buf, B_FALSE, pf, rf, arg);
2891 dt_put_buf(dtp, buf);
2896 * The output will be in the order it was traced (or for
2897 * speculations, when it was committed). We retrieve a buffer
2898 * from each CPU and put it into a priority queue, which sorts
2899 * based on the first entry in the buffer. This is sufficient
2900 * because entries within a buffer are already sorted.
2902 * We then consume records one at a time, always consuming the
2903 * oldest record, as determined by the priority queue. When
2904 * we reach the end of the time covered by these buffers,
2905 * we need to stop and retrieve more records on the next pass.
2906 * The kernel tells us the time covered by each buffer, in
2907 * dtbd_timestamp. The first buffer's timestamp tells us the
2908 * time covered by all buffers, as subsequently retrieved
2909 * buffers will cover to a more recent time.
2912 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t));
2913 uint64_t first_timestamp = 0;
2915 dtrace_bufdesc_t *buf;
2917 bzero(drops, max_ncpus * sizeof (uint64_t));
2919 if (dtp->dt_bufq == NULL) {
2920 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2,
2921 dt_buf_oldest, NULL);
2922 if (dtp->dt_bufq == NULL) /* ENOMEM */
2926 /* Retrieve data from each CPU. */
2927 (void) dtrace_getopt(dtp, "bufsize", &size);
2928 for (i = 0; i < max_ncpus; i++) {
2929 dtrace_bufdesc_t *buf;
2931 if (dt_get_buf(dtp, i, &buf) != 0)
2934 if (first_timestamp == 0)
2935 first_timestamp = buf->dtbd_timestamp;
2936 assert(buf->dtbd_timestamp >= first_timestamp);
2938 dt_pq_insert(dtp->dt_bufq, buf);
2939 drops[i] = buf->dtbd_drops;
2940 buf->dtbd_drops = 0;
2944 /* Consume records. */
2946 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq);
2952 timestamp = dt_buf_oldest(buf, dtp);
2953 assert(timestamp >= dtp->dt_last_timestamp);
2954 dtp->dt_last_timestamp = timestamp;
2956 if (timestamp == buf->dtbd_timestamp) {
2958 * We've reached the end of the time covered
2959 * by this buffer. If this is the oldest
2960 * buffer, we must do another pass
2961 * to retrieve more data.
2963 dt_put_buf(dtp, buf);
2964 if (timestamp == first_timestamp &&
2970 if ((rval = dt_consume_cpu(dtp, fp,
2971 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0)
2973 dt_pq_insert(dtp->dt_bufq, buf);
2976 /* Consume drops. */
2977 for (i = 0; i < max_ncpus; i++) {
2978 if (drops[i] != 0) {
2979 int error = dt_handle_cpudrop(dtp, i,
2980 DTRACEDROP_PRINCIPAL, drops[i]);
2987 * Reduce memory usage by re-allocating smaller buffers
2988 * for the "remnants".
2990 while (buf = dt_pq_walk(dtp->dt_bufq, &cookie))
2991 dt_realloc_buf(dtp, buf, buf->dtbd_size);