4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
38 #include <sys/sysctl.h>
42 #define DTRACE_AHASHSIZE 32779 /* big 'ol prime */
45 * Because qsort(3C) does not allow an argument to be passed to a comparison
46 * function, the variables that affect comparison must regrettably be global;
47 * they are protected by a global static lock, dt_qsort_lock.
49 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
51 static int dt_revsort;
52 static int dt_keysort;
55 #define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1)
56 #define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1)
59 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
63 for (i = 0; i < size / sizeof (int64_t); i++)
64 existing[i] = existing[i] + new[i];
68 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
77 return (DT_GREATERTHAN);
84 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
92 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
99 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
101 int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
102 int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
105 return (DT_LESSTHAN);
108 return (DT_GREATERTHAN);
114 dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
116 uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
117 uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
120 return (DT_LESSTHAN);
123 return (DT_GREATERTHAN);
130 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
132 int64_t arg = *existing++;
133 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
136 for (i = 0; i <= levels + 1; i++)
137 existing[i] = existing[i] + new[i + 1];
141 dt_aggregate_lquantizedsum(int64_t *lquanta)
143 int64_t arg = *lquanta++;
144 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
145 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
146 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
147 long double total = (long double)lquanta[0] * (long double)(base - 1);
149 for (i = 0; i < levels; base += step, i++)
150 total += (long double)lquanta[i + 1] * (long double)base;
152 return (total + (long double)lquanta[levels + 1] *
153 (long double)(base + 1));
157 dt_aggregate_lquantizedzero(int64_t *lquanta)
159 int64_t arg = *lquanta++;
160 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
161 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
162 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
167 for (i = 0; i < levels; base += step, i++) {
171 return (lquanta[i + 1]);
175 return (lquanta[levels + 1]);
181 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
183 long double lsum = dt_aggregate_lquantizedsum(lhs);
184 long double rsum = dt_aggregate_lquantizedsum(rhs);
185 int64_t lzero, rzero;
188 return (DT_LESSTHAN);
191 return (DT_GREATERTHAN);
194 * If they're both equal, then we will compare based on the weights at
195 * zero. If the weights at zero are equal (or if zero is not within
196 * the range of the linear quantization), then this will be judged a
197 * tie and will be resolved based on the key comparison.
199 lzero = dt_aggregate_lquantizedzero(lhs);
200 rzero = dt_aggregate_lquantizedzero(rhs);
203 return (DT_LESSTHAN);
206 return (DT_GREATERTHAN);
212 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
214 int nbuckets = DTRACE_QUANTIZE_NBUCKETS;
215 long double ltotal = 0, rtotal = 0;
216 int64_t lzero, rzero;
219 for (i = 0; i < nbuckets; i++) {
220 int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
222 if (bucketval == 0) {
227 ltotal += (long double)bucketval * (long double)lhs[i];
228 rtotal += (long double)bucketval * (long double)rhs[i];
232 return (DT_LESSTHAN);
235 return (DT_GREATERTHAN);
238 * If they're both equal, then we will compare based on the weights at
239 * zero. If the weights at zero are equal, then this will be judged a
240 * tie and will be resolved based on the key comparison.
243 return (DT_LESSTHAN);
246 return (DT_GREATERTHAN);
252 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
254 uint64_t pid = data[0];
255 uint64_t *pc = &data[1];
256 struct ps_prochandle *P;
259 if (dtp->dt_vector != NULL)
262 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
265 dt_proc_lock(dtp, P);
268 if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
270 if (proc_addr2sym(P, *pc, NULL, 0, &sym) == 0)
274 dt_proc_unlock(dtp, P);
275 dt_proc_release(dtp, P);
279 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
281 uint64_t pid = data[0];
282 uint64_t *pc = &data[1];
283 struct ps_prochandle *P;
286 if (dtp->dt_vector != NULL)
289 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
292 dt_proc_lock(dtp, P);
295 if ((map = Paddr_to_map(P, *pc)) != NULL)
297 if ((map = proc_addr2map(P, *pc)) != NULL)
301 dt_proc_unlock(dtp, P);
302 dt_proc_release(dtp, P);
306 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
311 if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
316 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
321 if (dtp->dt_vector != NULL) {
323 * We don't have a way of just getting the module for a
324 * vectored open, and it doesn't seem to be worth defining
325 * one. This means that use of mod() won't get true
326 * aggregation in the postmortem case (some modules may
327 * appear more than once in aggregation output). It seems
328 * unlikely that anyone will ever notice or care...
333 for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
334 dmp = dt_list_next(dmp)) {
335 if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
336 *pc = dmp->dm_text_va;
342 static dtrace_aggvarid_t
343 dt_aggregate_aggvarid(dt_ahashent_t *ent)
345 dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
346 caddr_t data = ent->dtahe_data.dtada_data;
347 dtrace_recdesc_t *rec = agg->dtagd_rec;
350 * First, we'll check the variable ID in the aggdesc. If it's valid,
351 * we'll return it. If not, we'll use the compiler-generated ID
352 * present as the first record.
354 if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
355 return (agg->dtagd_varid);
357 agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
360 return (agg->dtagd_varid);
365 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
369 size_t offs, roffs, size, ndx;
372 dtrace_recdesc_t *rec;
373 dt_aggregate_t *agp = &dtp->dt_aggregate;
374 dtrace_aggdesc_t *agg;
375 dt_ahash_t *hash = &agp->dtat_hash;
377 dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
378 dtrace_aggdata_t *aggdata;
379 int flags = agp->dtat_flags;
384 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
386 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, &buf) == -1) {
388 if (errno == ENOENT) {
390 * If that failed with ENOENT, it may be because the
391 * CPU was unconfigured. This is okay; we'll just
392 * do nothing but return success.
397 return (dt_set_errno(dtp, errno));
400 if (buf->dtbd_drops != 0) {
401 if (dt_handle_cpudrop(dtp, cpu,
402 DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
406 if (buf->dtbd_size == 0)
409 if (hash->dtah_hash == NULL) {
412 hash->dtah_size = DTRACE_AHASHSIZE;
413 size = hash->dtah_size * sizeof (dt_ahashent_t *);
415 if ((hash->dtah_hash = malloc(size)) == NULL)
416 return (dt_set_errno(dtp, EDT_NOMEM));
418 bzero(hash->dtah_hash, size);
421 for (offs = 0; offs < buf->dtbd_size; ) {
423 * We're guaranteed to have an ID.
425 id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
428 if (id == DTRACE_AGGIDNONE) {
430 * This is filler to assure proper alignment of the
431 * next record; we simply ignore it.
437 if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
440 addr = buf->dtbd_data + offs;
441 size = agg->dtagd_size;
444 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
445 rec = &agg->dtagd_rec[j];
446 roffs = rec->dtrd_offset;
448 switch (rec->dtrd_action) {
450 dt_aggregate_usym(dtp,
451 /* LINTED - alignment */
452 (uint64_t *)&addr[roffs]);
456 dt_aggregate_umod(dtp,
457 /* LINTED - alignment */
458 (uint64_t *)&addr[roffs]);
462 /* LINTED - alignment */
463 dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
467 /* LINTED - alignment */
468 dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
475 for (i = 0; i < rec->dtrd_size; i++)
476 hashval += addr[roffs + i];
479 ndx = hashval % hash->dtah_size;
481 for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
482 if (h->dtahe_hashval != hashval)
485 if (h->dtahe_size != size)
488 aggdata = &h->dtahe_data;
489 data = aggdata->dtada_data;
491 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
492 rec = &agg->dtagd_rec[j];
493 roffs = rec->dtrd_offset;
495 for (i = 0; i < rec->dtrd_size; i++)
496 if (addr[roffs + i] != data[roffs + i])
501 * We found it. Now we need to apply the aggregating
502 * action on the data here.
504 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
505 roffs = rec->dtrd_offset;
506 /* LINTED - alignment */
507 h->dtahe_aggregate((int64_t *)&data[roffs],
508 /* LINTED - alignment */
509 (int64_t *)&addr[roffs], rec->dtrd_size);
512 * If we're keeping per CPU data, apply the aggregating
513 * action there as well.
515 if (aggdata->dtada_percpu != NULL) {
516 data = aggdata->dtada_percpu[cpu];
518 /* LINTED - alignment */
519 h->dtahe_aggregate((int64_t *)data,
520 /* LINTED - alignment */
521 (int64_t *)&addr[roffs], rec->dtrd_size);
530 * If we're here, we couldn't find an entry for this record.
532 if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
533 return (dt_set_errno(dtp, EDT_NOMEM));
534 bzero(h, sizeof (dt_ahashent_t));
535 aggdata = &h->dtahe_data;
537 if ((aggdata->dtada_data = malloc(size)) == NULL) {
539 return (dt_set_errno(dtp, EDT_NOMEM));
542 bcopy(addr, aggdata->dtada_data, size);
543 aggdata->dtada_size = size;
544 aggdata->dtada_desc = agg;
545 aggdata->dtada_handle = dtp;
546 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
547 &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
548 aggdata->dtada_normal = 1;
550 h->dtahe_hashval = hashval;
551 h->dtahe_size = size;
552 (void) dt_aggregate_aggvarid(h);
554 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
556 if (flags & DTRACE_A_PERCPU) {
557 int max_cpus = agp->dtat_maxcpu;
558 caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
560 if (percpu == NULL) {
561 free(aggdata->dtada_data);
563 return (dt_set_errno(dtp, EDT_NOMEM));
566 for (j = 0; j < max_cpus; j++) {
567 percpu[j] = malloc(rec->dtrd_size);
569 if (percpu[j] == NULL) {
573 free(aggdata->dtada_data);
575 return (dt_set_errno(dtp, EDT_NOMEM));
579 bcopy(&addr[rec->dtrd_offset],
580 percpu[j], rec->dtrd_size);
582 bzero(percpu[j], rec->dtrd_size);
586 aggdata->dtada_percpu = percpu;
589 switch (rec->dtrd_action) {
591 h->dtahe_aggregate = dt_aggregate_min;
595 h->dtahe_aggregate = dt_aggregate_max;
598 case DTRACEAGG_LQUANTIZE:
599 h->dtahe_aggregate = dt_aggregate_lquantize;
602 case DTRACEAGG_COUNT:
605 case DTRACEAGG_STDDEV:
606 case DTRACEAGG_QUANTIZE:
607 h->dtahe_aggregate = dt_aggregate_count;
611 return (dt_set_errno(dtp, EDT_BADAGG));
614 if (hash->dtah_hash[ndx] != NULL)
615 hash->dtah_hash[ndx]->dtahe_prev = h;
617 h->dtahe_next = hash->dtah_hash[ndx];
618 hash->dtah_hash[ndx] = h;
620 if (hash->dtah_all != NULL)
621 hash->dtah_all->dtahe_prevall = h;
623 h->dtahe_nextall = hash->dtah_all;
626 offs += agg->dtagd_size;
633 dtrace_aggregate_snap(dtrace_hdl_t *dtp)
636 dt_aggregate_t *agp = &dtp->dt_aggregate;
637 hrtime_t now = gethrtime();
638 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
640 if (dtp->dt_lastagg != 0) {
641 if (now - dtp->dt_lastagg < interval)
644 dtp->dt_lastagg += interval;
646 dtp->dt_lastagg = now;
650 return (dt_set_errno(dtp, EINVAL));
652 if (agp->dtat_buf.dtbd_size == 0)
655 for (i = 0; i < agp->dtat_ncpus; i++) {
656 if ((rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])))
664 dt_aggregate_hashcmp(const void *lhs, const void *rhs)
666 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
667 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
668 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
669 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
671 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
672 return (DT_LESSTHAN);
674 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
675 return (DT_GREATERTHAN);
681 dt_aggregate_varcmp(const void *lhs, const void *rhs)
683 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
684 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
685 dtrace_aggvarid_t lid, rid;
687 lid = dt_aggregate_aggvarid(lh);
688 rid = dt_aggregate_aggvarid(rh);
691 return (DT_LESSTHAN);
694 return (DT_GREATERTHAN);
700 dt_aggregate_keycmp(const void *lhs, const void *rhs)
702 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
703 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
704 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
705 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
706 dtrace_recdesc_t *lrec, *rrec;
708 int rval, i, j, keypos, nrecs;
710 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
713 nrecs = lagg->dtagd_nrecs - 1;
714 assert(nrecs == ragg->dtagd_nrecs - 1);
716 keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
718 for (i = 1; i < nrecs; i++) {
720 int ndx = i + keypos;
723 ndx = ndx - nrecs + 1;
725 lrec = &lagg->dtagd_rec[ndx];
726 rrec = &ragg->dtagd_rec[ndx];
728 ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
729 rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
731 if (lrec->dtrd_size < rrec->dtrd_size)
732 return (DT_LESSTHAN);
734 if (lrec->dtrd_size > rrec->dtrd_size)
735 return (DT_GREATERTHAN);
737 switch (lrec->dtrd_size) {
738 case sizeof (uint64_t):
739 /* LINTED - alignment */
740 lval = *((uint64_t *)ldata);
741 /* LINTED - alignment */
742 rval = *((uint64_t *)rdata);
745 case sizeof (uint32_t):
746 /* LINTED - alignment */
747 lval = *((uint32_t *)ldata);
748 /* LINTED - alignment */
749 rval = *((uint32_t *)rdata);
752 case sizeof (uint16_t):
753 /* LINTED - alignment */
754 lval = *((uint16_t *)ldata);
755 /* LINTED - alignment */
756 rval = *((uint16_t *)rdata);
759 case sizeof (uint8_t):
760 lval = *((uint8_t *)ldata);
761 rval = *((uint8_t *)rdata);
765 switch (lrec->dtrd_action) {
767 case DTRACEACT_UADDR:
769 for (j = 0; j < 2; j++) {
770 /* LINTED - alignment */
771 lval = ((uint64_t *)ldata)[j];
772 /* LINTED - alignment */
773 rval = ((uint64_t *)rdata)[j];
776 return (DT_LESSTHAN);
779 return (DT_GREATERTHAN);
785 for (j = 0; j < lrec->dtrd_size; j++) {
786 lval = ((uint8_t *)ldata)[j];
787 rval = ((uint8_t *)rdata)[j];
790 return (DT_LESSTHAN);
793 return (DT_GREATERTHAN);
801 return (DT_LESSTHAN);
804 return (DT_GREATERTHAN);
811 dt_aggregate_valcmp(const void *lhs, const void *rhs)
813 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
814 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
815 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
816 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
817 caddr_t ldata = lh->dtahe_data.dtada_data;
818 caddr_t rdata = rh->dtahe_data.dtada_data;
819 dtrace_recdesc_t *lrec, *rrec;
820 int64_t *laddr, *raddr;
823 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
826 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
827 return (DT_GREATERTHAN);
829 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
830 return (DT_LESSTHAN);
832 for (i = 0; i < lagg->dtagd_nrecs; i++) {
833 lrec = &lagg->dtagd_rec[i];
834 rrec = &ragg->dtagd_rec[i];
836 if (lrec->dtrd_offset < rrec->dtrd_offset)
837 return (DT_LESSTHAN);
839 if (lrec->dtrd_offset > rrec->dtrd_offset)
840 return (DT_GREATERTHAN);
842 if (lrec->dtrd_action < rrec->dtrd_action)
843 return (DT_LESSTHAN);
845 if (lrec->dtrd_action > rrec->dtrd_action)
846 return (DT_GREATERTHAN);
849 laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
850 raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
852 switch (lrec->dtrd_action) {
854 rval = dt_aggregate_averagecmp(laddr, raddr);
857 case DTRACEAGG_STDDEV:
858 rval = dt_aggregate_stddevcmp(laddr, raddr);
861 case DTRACEAGG_QUANTIZE:
862 rval = dt_aggregate_quantizedcmp(laddr, raddr);
865 case DTRACEAGG_LQUANTIZE:
866 rval = dt_aggregate_lquantizedcmp(laddr, raddr);
869 case DTRACEAGG_COUNT:
873 rval = dt_aggregate_countcmp(laddr, raddr);
884 dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
888 if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
892 * If we're here, the values for the two aggregation elements are
893 * equal. We already know that the key layout is the same for the two
894 * elements; we must now compare the keys themselves as a tie-breaker.
896 return (dt_aggregate_keycmp(lhs, rhs));
900 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
904 if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
907 return (dt_aggregate_varcmp(lhs, rhs));
911 dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
915 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
918 return (dt_aggregate_keycmp(lhs, rhs));
922 dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
926 if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
929 return (dt_aggregate_varcmp(lhs, rhs));
933 dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
937 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
940 return (dt_aggregate_valkeycmp(lhs, rhs));
944 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
946 return (dt_aggregate_keyvarcmp(rhs, lhs));
950 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
952 return (dt_aggregate_varkeycmp(rhs, lhs));
956 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
958 return (dt_aggregate_valvarcmp(rhs, lhs));
962 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
964 return (dt_aggregate_varvalcmp(rhs, lhs));
968 dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
970 dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
971 dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
976 * If we're sorting on keys, we need to scan until we find the
977 * last entry -- that's the representative key. (The order of
978 * the bundle is values followed by key to accommodate the
979 * default behavior of sorting by value.) If the keys are
980 * equal, we'll fall into the value comparison loop, below.
982 for (i = 0; lh[i + 1] != NULL; i++)
986 assert(rh[i + 1] == NULL);
988 if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
993 if (lh[i + 1] == NULL) {
995 * All of the values are equal; if we're sorting on
996 * keys, then we're only here because the keys were
997 * found to be equal and these records are therefore
998 * equal. If we're not sorting on keys, we'll use the
999 * key comparison from the representative key as the
1006 assert(rh[i + 1] == NULL);
1007 return (dt_aggregate_keycmp(&lh[i], &rh[i]));
1009 if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
1016 dt_aggregate_go(dtrace_hdl_t *dtp)
1018 dt_aggregate_t *agp = &dtp->dt_aggregate;
1019 dtrace_optval_t size, cpu;
1020 dtrace_bufdesc_t *buf = &agp->dtat_buf;
1023 assert(agp->dtat_maxcpu == 0);
1024 assert(agp->dtat_ncpu == 0);
1025 assert(agp->dtat_cpus == NULL);
1027 agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
1028 agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
1029 agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1031 if (agp->dtat_cpus == NULL)
1032 return (dt_set_errno(dtp, EDT_NOMEM));
1035 * Use the aggregation buffer size as reloaded from the kernel.
1037 size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1039 rval = dtrace_getopt(dtp, "aggsize", &size);
1042 if (size == 0 || size == DTRACEOPT_UNSET)
1045 buf = &agp->dtat_buf;
1046 buf->dtbd_size = size;
1048 if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1049 return (dt_set_errno(dtp, EDT_NOMEM));
1052 * Now query for the CPUs enabled.
1054 rval = dtrace_getopt(dtp, "cpu", &cpu);
1055 assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1057 if (cpu != DTRACE_CPUALL) {
1058 assert(cpu < agp->dtat_ncpu);
1059 agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1064 agp->dtat_ncpus = 0;
1065 for (i = 0; i < agp->dtat_maxcpu; i++) {
1066 if (dt_status(dtp, i) == -1)
1069 agp->dtat_cpus[agp->dtat_ncpus++] = i;
1076 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1078 dt_aggregate_t *agp = &dtp->dt_aggregate;
1079 dtrace_aggdata_t *data;
1080 dtrace_aggdesc_t *aggdesc;
1081 dtrace_recdesc_t *rec;
1085 case DTRACE_AGGWALK_NEXT:
1088 case DTRACE_AGGWALK_CLEAR: {
1089 uint32_t size, offs = 0;
1091 aggdesc = h->dtahe_data.dtada_desc;
1092 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1093 size = rec->dtrd_size;
1094 data = &h->dtahe_data;
1096 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1097 offs = sizeof (uint64_t);
1098 size -= sizeof (uint64_t);
1101 bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1103 if (data->dtada_percpu == NULL)
1106 for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1107 bzero(data->dtada_percpu[i] + offs, size);
1111 case DTRACE_AGGWALK_ERROR:
1113 * We assume that errno is already set in this case.
1115 return (dt_set_errno(dtp, errno));
1117 case DTRACE_AGGWALK_ABORT:
1118 return (dt_set_errno(dtp, EDT_DIRABORT));
1120 case DTRACE_AGGWALK_DENORMALIZE:
1121 h->dtahe_data.dtada_normal = 1;
1124 case DTRACE_AGGWALK_NORMALIZE:
1125 if (h->dtahe_data.dtada_normal == 0) {
1126 h->dtahe_data.dtada_normal = 1;
1127 return (dt_set_errno(dtp, EDT_BADRVAL));
1132 case DTRACE_AGGWALK_REMOVE: {
1133 dtrace_aggdata_t *aggdata = &h->dtahe_data;
1134 int max_cpus = agp->dtat_maxcpu;
1137 * First, remove this hash entry from its hash chain.
1139 if (h->dtahe_prev != NULL) {
1140 h->dtahe_prev->dtahe_next = h->dtahe_next;
1142 dt_ahash_t *hash = &agp->dtat_hash;
1143 size_t ndx = h->dtahe_hashval % hash->dtah_size;
1145 assert(hash->dtah_hash[ndx] == h);
1146 hash->dtah_hash[ndx] = h->dtahe_next;
1149 if (h->dtahe_next != NULL)
1150 h->dtahe_next->dtahe_prev = h->dtahe_prev;
1153 * Now remove it from the list of all hash entries.
1155 if (h->dtahe_prevall != NULL) {
1156 h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1158 dt_ahash_t *hash = &agp->dtat_hash;
1160 assert(hash->dtah_all == h);
1161 hash->dtah_all = h->dtahe_nextall;
1164 if (h->dtahe_nextall != NULL)
1165 h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1168 * We're unlinked. We can safely destroy the data.
1170 if (aggdata->dtada_percpu != NULL) {
1171 for (i = 0; i < max_cpus; i++)
1172 free(aggdata->dtada_percpu[i]);
1173 free(aggdata->dtada_percpu);
1176 free(aggdata->dtada_data);
1183 return (dt_set_errno(dtp, EDT_BADRVAL));
1190 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1191 int (*compar)(const void *, const void *))
1193 int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1194 dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1196 dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1197 dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1199 if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1200 dt_keypos = (int)keyposopt;
1205 if (compar == NULL) {
1207 compar = dt_aggregate_varvalcmp;
1209 compar = dt_aggregate_varkeycmp;
1213 qsort(base, nel, width, compar);
1221 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1223 dt_ahashent_t *h, *next;
1224 dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1226 for (h = hash->dtah_all; h != NULL; h = next) {
1228 * dt_aggwalk_rval() can potentially remove the current hash
1229 * entry; we need to load the next hash entry before calling
1232 next = h->dtahe_nextall;
1234 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1242 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1243 dtrace_aggregate_f *func, void *arg,
1244 int (*sfunc)(const void *, const void *))
1246 dt_aggregate_t *agp = &dtp->dt_aggregate;
1247 dt_ahashent_t *h, **sorted;
1248 dt_ahash_t *hash = &agp->dtat_hash;
1249 size_t i, nentries = 0;
1251 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1254 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1259 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1262 (void) pthread_mutex_lock(&dt_qsort_lock);
1264 if (sfunc == NULL) {
1265 dt_aggregate_qsort(dtp, sorted, nentries,
1266 sizeof (dt_ahashent_t *), NULL);
1269 * If we've been explicitly passed a sorting function,
1270 * we'll use that -- ignoring the values of the "aggsortrev",
1271 * "aggsortkey" and "aggsortkeypos" options.
1273 qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1276 (void) pthread_mutex_unlock(&dt_qsort_lock);
1278 for (i = 0; i < nentries; i++) {
1281 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) {
1282 dt_free(dtp, sorted);
1287 dt_free(dtp, sorted);
1292 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1293 dtrace_aggregate_f *func, void *arg)
1295 return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1299 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1300 dtrace_aggregate_f *func, void *arg)
1302 return (dt_aggregate_walk_sorted(dtp, func,
1303 arg, dt_aggregate_varkeycmp));
1307 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1308 dtrace_aggregate_f *func, void *arg)
1310 return (dt_aggregate_walk_sorted(dtp, func,
1311 arg, dt_aggregate_varvalcmp));
1315 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1316 dtrace_aggregate_f *func, void *arg)
1318 return (dt_aggregate_walk_sorted(dtp, func,
1319 arg, dt_aggregate_keyvarcmp));
1323 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1324 dtrace_aggregate_f *func, void *arg)
1326 return (dt_aggregate_walk_sorted(dtp, func,
1327 arg, dt_aggregate_valvarcmp));
1331 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1332 dtrace_aggregate_f *func, void *arg)
1334 return (dt_aggregate_walk_sorted(dtp, func,
1335 arg, dt_aggregate_varkeyrevcmp));
1339 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1340 dtrace_aggregate_f *func, void *arg)
1342 return (dt_aggregate_walk_sorted(dtp, func,
1343 arg, dt_aggregate_varvalrevcmp));
1347 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1348 dtrace_aggregate_f *func, void *arg)
1350 return (dt_aggregate_walk_sorted(dtp, func,
1351 arg, dt_aggregate_keyvarrevcmp));
1355 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1356 dtrace_aggregate_f *func, void *arg)
1358 return (dt_aggregate_walk_sorted(dtp, func,
1359 arg, dt_aggregate_valvarrevcmp));
1363 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1364 int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1366 dt_aggregate_t *agp = &dtp->dt_aggregate;
1367 dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1368 const dtrace_aggdata_t **data;
1369 dt_ahashent_t *zaggdata = NULL;
1370 dt_ahash_t *hash = &agp->dtat_hash;
1371 size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1372 dtrace_aggvarid_t max = 0, aggvar;
1373 int rval = -1, *map, *remap = NULL;
1375 dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1378 * If the sorting position is greater than the number of aggregation
1379 * variable IDs, we silently set it to 0.
1381 if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1385 * First we need to translate the specified aggregation variable IDs
1386 * into a linear map that will allow us to translate an aggregation
1387 * variable ID into its position in the specified aggvars.
1389 for (i = 0; i < naggvars; i++) {
1390 if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1391 return (dt_set_errno(dtp, EDT_BADAGGVAR));
1393 if (aggvars[i] > max)
1397 if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1400 zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1402 if (zaggdata == NULL)
1405 for (i = 0; i < naggvars; i++) {
1406 int ndx = i + sortpos;
1408 if (ndx >= naggvars)
1411 aggvar = aggvars[ndx];
1412 assert(aggvar <= max);
1416 * We have an aggregation variable that is present
1417 * more than once in the array of aggregation
1418 * variables. While it's unclear why one might want
1419 * to do this, it's legal. To support this construct,
1420 * we will allocate a remap that will indicate the
1421 * position from which this aggregation variable
1422 * should be pulled. (That is, where the remap will
1423 * map from one position to another.)
1425 if (remap == NULL) {
1426 remap = dt_zalloc(dtp, naggvars * sizeof (int));
1433 * Given that the variable is already present, assert
1434 * that following through the mapping and adjusting
1435 * for the sort position yields the same aggregation
1438 assert(aggvars[(map[aggvar] - 1 + sortpos) %
1439 naggvars] == aggvars[ndx]);
1441 remap[i] = map[aggvar];
1445 map[aggvar] = i + 1;
1449 * We need to take two passes over the data to size our allocation, so
1450 * we'll use the first pass to also fill in the zero-filled data to be
1451 * used to properly format a zero-valued aggregation.
1453 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1454 dtrace_aggvarid_t id;
1457 if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1460 if (zaggdata[ndx - 1].dtahe_size == 0) {
1461 zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1462 zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1468 if (nentries == 0) {
1470 * We couldn't find any entries; there is nothing else to do.
1477 * Before we sort the data, we're going to look for any holes in our
1478 * zero-filled data. This will occur if an aggregation variable that
1479 * we are being asked to print has not yet been assigned the result of
1480 * any aggregating action for _any_ tuple. The issue becomes that we
1481 * would like a zero value to be printed for all columns for this
1482 * aggregation, but without any record description, we don't know the
1483 * aggregating action that corresponds to the aggregation variable. To
1484 * try to find a match, we're simply going to lookup aggregation IDs
1485 * (which are guaranteed to be contiguous and to start from 1), looking
1486 * for the specified aggregation variable ID. If we find a match,
1487 * we'll use that. If we iterate over all aggregation IDs and don't
1488 * find a match, then we must be an anonymous enabling. (Anonymous
1489 * enablings can't currently derive either aggregation variable IDs or
1490 * aggregation variable names given only an aggregation ID.) In this
1491 * obscure case (anonymous enabling, multiple aggregation printa() with
1492 * some aggregations not represented for any tuple), our defined
1493 * behavior is that the zero will be printed in the format of the first
1494 * aggregation variable that contains any non-zero value.
1496 for (i = 0; i < naggvars; i++) {
1497 if (zaggdata[i].dtahe_size == 0) {
1498 dtrace_aggvarid_t aggvar;
1500 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1501 assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1503 for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1504 dtrace_aggdesc_t *agg;
1505 dtrace_aggdata_t *aggdata;
1507 if (dt_aggid_lookup(dtp, j, &agg) != 0)
1510 if (agg->dtagd_varid != aggvar)
1514 * We have our description -- now we need to
1515 * cons up the zaggdata entry for it.
1517 aggdata = &zaggdata[i].dtahe_data;
1518 aggdata->dtada_size = agg->dtagd_size;
1519 aggdata->dtada_desc = agg;
1520 aggdata->dtada_handle = dtp;
1521 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
1522 &aggdata->dtada_edesc,
1523 &aggdata->dtada_pdesc);
1524 aggdata->dtada_normal = 1;
1525 zaggdata[i].dtahe_hashval = 0;
1526 zaggdata[i].dtahe_size = agg->dtagd_size;
1530 if (zaggdata[i].dtahe_size == 0) {
1534 * We couldn't find this aggregation, meaning
1535 * that we have never seen it before for any
1536 * tuple _and_ this is an anonymous enabling.
1537 * That is, we're in the obscure case outlined
1538 * above. In this case, our defined behavior
1539 * is to format the data in the format of the
1540 * first non-zero aggregation -- of which, of
1541 * course, we know there to be at least one
1542 * (or nentries would have been zero).
1544 for (j = 0; j < naggvars; j++) {
1545 if (zaggdata[j].dtahe_size != 0)
1549 assert(j < naggvars);
1550 zaggdata[i] = zaggdata[j];
1552 data = zaggdata[i].dtahe_data.dtada_data;
1553 assert(data != NULL);
1559 * Now we need to allocate our zero-filled data for use for
1560 * aggregations that don't have a value corresponding to a given key.
1562 for (i = 0; i < naggvars; i++) {
1563 dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1564 dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1565 dtrace_recdesc_t *rec;
1569 zsize = zaggdata[i].dtahe_size;
1572 if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1574 * If we failed to allocated some zero-filled data, we
1575 * need to zero out the remaining dtada_data pointers
1576 * to prevent the wrong data from being freed below.
1578 for (j = i; j < naggvars; j++)
1579 zaggdata[j].dtahe_data.dtada_data = NULL;
1583 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1586 * First, the easy bit. To maintain compatibility with
1587 * consumers that pull the compiler-generated ID out of the
1588 * data, we put that ID at the top of the zero-filled data.
1590 rec = &aggdesc->dtagd_rec[0];
1591 /* LINTED - alignment */
1592 *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1594 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1597 * Now for the more complicated part. If (and only if) this
1598 * is an lquantize() aggregating action, zero-filled data is
1599 * not equivalent to an empty record: we must also get the
1600 * parameters for the lquantize().
1602 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1603 if (aggdata->dtada_data != NULL) {
1605 * The easier case here is if we actually have
1606 * some prototype data -- in which case we
1607 * manually dig it out of the aggregation
1610 /* LINTED - alignment */
1611 larg = *((uint64_t *)(aggdata->dtada_data +
1615 * We don't have any prototype data. As a
1616 * result, we know that we _do_ have the
1617 * compiler-generated information. (If this
1618 * were an anonymous enabling, all of our
1619 * zero-filled data would have prototype data
1620 * -- either directly or indirectly.) So as
1621 * gross as it is, we'll grovel around in the
1622 * compiler-generated information to find the
1623 * lquantize() parameters.
1625 dtrace_stmtdesc_t *sdp;
1629 sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1630 aggdesc->dtagd_rec[0].dtrd_uarg;
1631 aid = sdp->dtsd_aggdata;
1632 isp = (dt_idsig_t *)aid->di_data;
1633 assert(isp->dis_auxinfo != 0);
1634 larg = isp->dis_auxinfo;
1637 /* LINTED - alignment */
1638 *((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1641 aggdata->dtada_data = zdata;
1645 * Now that we've dealt with setting up our zero-filled data, we can
1646 * allocate our sorted array, and take another pass over the data to
1649 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1654 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1655 dtrace_aggvarid_t id;
1657 if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1663 assert(i == nentries);
1666 * We've loaded our array; now we need to sort by value to allow us
1667 * to create bundles of like value. We're going to acquire the
1668 * dt_qsort_lock here, and hold it across all of our subsequent
1669 * comparison and sorting.
1671 (void) pthread_mutex_lock(&dt_qsort_lock);
1673 qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1674 dt_aggregate_keyvarcmp);
1677 * Now we need to go through and create bundles. Because the number
1678 * of bundles is bounded by the size of the sorted array, we're going
1679 * to reuse the underlying storage. And note that "bundle" is an
1680 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1681 * its type (regrettably) "dt_ahashent_t ***". (Regrettable because
1682 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1685 bundle = (dt_ahashent_t ***)sorted;
1687 for (i = 1, start = 0; i <= nentries; i++) {
1689 dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
1693 * We have a bundle boundary. Everything from start to
1694 * (i - 1) belongs in one bundle.
1696 assert(i - start <= naggvars);
1697 bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
1699 if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
1700 (void) pthread_mutex_unlock(&dt_qsort_lock);
1704 for (j = start; j < i; j++) {
1705 dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
1708 assert(map[id] != 0);
1709 assert(map[id] - 1 < naggvars);
1710 assert(nbundle[map[id] - 1] == NULL);
1711 nbundle[map[id] - 1] = sorted[j];
1713 if (nbundle[naggvars] == NULL)
1714 nbundle[naggvars] = sorted[j];
1717 for (j = 0; j < naggvars; j++) {
1718 if (nbundle[j] != NULL)
1722 * Before we assume that this aggregation variable
1723 * isn't present (and fall back to using the
1724 * zero-filled data allocated earlier), check the
1725 * remap. If we have a remapping, we'll drop it in
1726 * here. Note that we might be remapping an
1727 * aggregation variable that isn't present for this
1728 * key; in this case, the aggregation data that we
1729 * copy will point to the zeroed data.
1731 if (remap != NULL && remap[j]) {
1732 assert(remap[j] - 1 < j);
1733 assert(nbundle[remap[j] - 1] != NULL);
1734 nbundle[j] = nbundle[remap[j] - 1];
1736 nbundle[j] = &zaggdata[j];
1740 bundle[nbundles++] = nbundle;
1745 * Now we need to re-sort based on the first value.
1747 dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
1748 dt_aggregate_bundlecmp);
1750 (void) pthread_mutex_unlock(&dt_qsort_lock);
1753 * We're done! Now we just need to go back over the sorted bundles,
1754 * calling the function.
1756 data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
1758 for (i = 0; i < nbundles; i++) {
1759 for (j = 0; j < naggvars; j++)
1762 for (j = 0; j < naggvars; j++) {
1763 int ndx = j - sortpos;
1768 assert(bundle[i][ndx] != NULL);
1769 data[j + 1] = &bundle[i][ndx]->dtahe_data;
1772 for (j = 0; j < naggvars; j++)
1773 assert(data[j + 1] != NULL);
1776 * The representative key is the last element in the bundle.
1777 * Assert that we have one, and then set it to be the first
1780 assert(bundle[i][j] != NULL);
1781 data[0] = &bundle[i][j]->dtahe_data;
1783 if ((rval = func(data, naggvars + 1, arg)) == -1)
1789 for (i = 0; i < nbundles; i++)
1790 dt_free(dtp, bundle[i]);
1792 if (zaggdata != NULL) {
1793 for (i = 0; i < naggvars; i++)
1794 dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
1797 dt_free(dtp, zaggdata);
1798 dt_free(dtp, sorted);
1799 dt_free(dtp, remap);
1806 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
1807 dtrace_aggregate_walk_f *func)
1809 dt_print_aggdata_t pd;
1813 pd.dtpa_allunprint = 1;
1816 func = dtrace_aggregate_walk_sorted;
1818 if ((*func)(dtp, dt_print_agg, &pd) == -1)
1819 return (dt_set_errno(dtp, dtp->dt_errno));
1825 dtrace_aggregate_clear(dtrace_hdl_t *dtp)
1827 dt_aggregate_t *agp = &dtp->dt_aggregate;
1828 dt_ahash_t *hash = &agp->dtat_hash;
1830 dtrace_aggdata_t *data;
1831 dtrace_aggdesc_t *aggdesc;
1832 dtrace_recdesc_t *rec;
1833 int i, max_cpus = agp->dtat_maxcpu;
1835 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1836 aggdesc = h->dtahe_data.dtada_desc;
1837 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1838 data = &h->dtahe_data;
1840 bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
1842 if (data->dtada_percpu == NULL)
1845 for (i = 0; i < max_cpus; i++)
1846 bzero(data->dtada_percpu[i], rec->dtrd_size);
1851 dt_aggregate_destroy(dtrace_hdl_t *dtp)
1853 dt_aggregate_t *agp = &dtp->dt_aggregate;
1854 dt_ahash_t *hash = &agp->dtat_hash;
1855 dt_ahashent_t *h, *next;
1856 dtrace_aggdata_t *aggdata;
1857 int i, max_cpus = agp->dtat_maxcpu;
1859 if (hash->dtah_hash == NULL) {
1860 assert(hash->dtah_all == NULL);
1862 free(hash->dtah_hash);
1864 for (h = hash->dtah_all; h != NULL; h = next) {
1865 next = h->dtahe_nextall;
1867 aggdata = &h->dtahe_data;
1869 if (aggdata->dtada_percpu != NULL) {
1870 for (i = 0; i < max_cpus; i++)
1871 free(aggdata->dtada_percpu[i]);
1872 free(aggdata->dtada_percpu);
1875 free(aggdata->dtada_data);
1879 hash->dtah_hash = NULL;
1880 hash->dtah_all = NULL;
1881 hash->dtah_size = 0;
1884 free(agp->dtat_buf.dtbd_data);
1885 free(agp->dtat_cpus);