2 * Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
13 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14 * PERFORMANCE OF THIS SOFTWARE.
17 /* $Id: stats.c,v 1.3.6.2 2009/01/29 23:47:44 tbox Exp $ */
25 #include <isc/atomic.h>
26 #include <isc/buffer.h>
27 #include <isc/magic.h>
29 #include <isc/platform.h>
30 #include <isc/print.h>
31 #include <isc/rwlock.h>
32 #include <isc/stats.h>
35 #define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
36 #define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
38 #ifndef ISC_STATS_USEMULTIFIELDS
39 #if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
40 #define ISC_STATS_USEMULTIFIELDS 1
42 #define ISC_STATS_USEMULTIFIELDS 0
44 #endif /* ISC_STATS_USEMULTIFIELDS */
46 #if ISC_STATS_USEMULTIFIELDS
52 typedef isc_uint64_t isc_stat_t;
62 unsigned int references; /* locked by lock */
65 * Locked by counterlock or unlocked if efficient rwlock is not
68 #ifdef ISC_RWLOCK_USEATOMIC
69 isc_rwlock_t counterlock;
74 * We don't want to lock the counters while we are dumping, so we first
75 * copy the current counter values into a local array. This buffer
76 * will be used as the copy destination. It's allocated on creation
77 * of the stats structure so that the dump operation won't fail due
78 * to memory allocation failure.
79 * XXX: this approach is weird for non-threaded build because the
80 * additional memory and the copy overhead could be avoided. We prefer
81 * simplicity here, however, under the assumption that this function
82 * should be only rarely called.
84 isc_uint64_t *copiedcounters;
88 create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
90 isc_result_t result = ISC_R_SUCCESS;
92 REQUIRE(statsp != NULL && *statsp == NULL);
94 stats = isc_mem_get(mctx, sizeof(*stats));
96 return (ISC_R_NOMEMORY);
98 result = isc_mutex_init(&stats->lock);
99 if (result != ISC_R_SUCCESS)
102 stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
103 if (stats->counters == NULL) {
104 result = ISC_R_NOMEMORY;
107 stats->copiedcounters = isc_mem_get(mctx,
108 sizeof(isc_uint64_t) * ncounters);
109 if (stats->copiedcounters == NULL) {
110 result = ISC_R_NOMEMORY;
114 #ifdef ISC_RWLOCK_USEATOMIC
115 result = isc_rwlock_init(&stats->counterlock, 0, 0);
116 if (result != ISC_R_SUCCESS)
117 goto clean_copiedcounters;
120 stats->references = 1;
121 memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
123 isc_mem_attach(mctx, &stats->mctx);
124 stats->ncounters = ncounters;
125 stats->magic = ISC_STATS_MAGIC;
132 isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
134 #ifdef ISC_RWLOCK_USEATOMIC
135 clean_copiedcounters:
136 isc_mem_put(mctx, stats->copiedcounters,
137 sizeof(isc_stat_t) * ncounters);
141 DESTROYLOCK(&stats->lock);
144 isc_mem_put(mctx, stats, sizeof(*stats));
150 isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
151 REQUIRE(ISC_STATS_VALID(stats));
152 REQUIRE(statsp != NULL && *statsp == NULL);
156 UNLOCK(&stats->lock);
162 isc_stats_detach(isc_stats_t **statsp) {
165 REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
172 UNLOCK(&stats->lock);
174 if (stats->references == 0) {
175 isc_mem_put(stats->mctx, stats->copiedcounters,
176 sizeof(isc_stat_t) * stats->ncounters);
177 isc_mem_put(stats->mctx, stats->counters,
178 sizeof(isc_stat_t) * stats->ncounters);
179 DESTROYLOCK(&stats->lock);
180 #ifdef ISC_RWLOCK_USEATOMIC
181 isc_rwlock_destroy(&stats->counterlock);
183 isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
188 isc_stats_ncounters(isc_stats_t *stats) {
189 REQUIRE(ISC_STATS_VALID(stats));
191 return (stats->ncounters);
195 incrementcounter(isc_stats_t *stats, int counter) {
198 #ifdef ISC_RWLOCK_USEATOMIC
200 * We use a "read" lock to prevent other threads from reading the
201 * counter while we "writing" a counter field. The write access itself
202 * is protected by the atomic operation.
204 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
207 #if ISC_STATS_USEMULTIFIELDS
208 prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
210 * If the lower 32-bit field overflows, increment the higher field.
211 * Note that it's *theoretically* possible that the lower field
212 * overlaps again before the higher field is incremented. It doesn't
213 * matter, however, because we don't read the value until
214 * isc_stats_copy() is called where the whole process is protected
215 * by the write (exclusive) lock.
217 if (prev == (isc_int32_t)0xffffffff)
218 isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
219 #elif defined(ISC_PLATFORM_HAVEXADDQ)
221 isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
224 stats->counters[counter]++;
227 #ifdef ISC_RWLOCK_USEATOMIC
228 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
233 decrementcounter(isc_stats_t *stats, int counter) {
236 #ifdef ISC_RWLOCK_USEATOMIC
237 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
240 #if ISC_STATS_USEMULTIFIELDS
241 prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
243 isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
245 #elif defined(ISC_PLATFORM_HAVEXADDQ)
247 isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
250 stats->counters[counter]--;
253 #ifdef ISC_RWLOCK_USEATOMIC
254 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
259 copy_counters(isc_stats_t *stats) {
262 #ifdef ISC_RWLOCK_USEATOMIC
264 * We use a "write" lock before "reading" the statistics counters as
267 isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
270 #if ISC_STATS_USEMULTIFIELDS
271 for (i = 0; i < stats->ncounters; i++) {
272 stats->copiedcounters[i] =
273 (isc_uint64_t)(stats->counters[i].hi) << 32 |
274 stats->counters[i].lo;
278 memcpy(stats->copiedcounters, stats->counters,
279 stats->ncounters * sizeof(isc_stat_t));
282 #ifdef ISC_RWLOCK_USEATOMIC
283 isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
288 isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
289 REQUIRE(statsp != NULL && *statsp == NULL);
291 return (create_stats(mctx, ncounters, statsp));
295 isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
296 REQUIRE(ISC_STATS_VALID(stats));
297 REQUIRE(counter < stats->ncounters);
299 incrementcounter(stats, (int)counter);
303 isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
304 REQUIRE(ISC_STATS_VALID(stats));
305 REQUIRE(counter < stats->ncounters);
307 decrementcounter(stats, (int)counter);
311 isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
312 void *arg, unsigned int options)
316 REQUIRE(ISC_STATS_VALID(stats));
318 copy_counters(stats);
320 for (i = 0; i < stats->ncounters; i++) {
321 if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
322 stats->copiedcounters[i] == 0)
324 dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);