2 * Copyright (c) 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
35 #endif /* LIBC_SCCS and not lint */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "namespace.h"
40 #include <sys/param.h>
52 #include "un-namespace.h"
59 static int alloc_segs(HTAB *, int);
60 static int flush_meta(HTAB *);
61 static int hash_access(HTAB *, ACTION, DBT *, DBT *);
62 static int hash_close(DB *);
63 static int hash_delete(const DB *, const DBT *, u_int32_t);
64 static int hash_fd(const DB *);
65 static int hash_get(const DB *, const DBT *, DBT *, u_int32_t);
66 static int hash_put(const DB *, DBT *, const DBT *, u_int32_t);
67 static void *hash_realloc(SEGMENT **, int, int);
68 static int hash_seq(const DB *, DBT *, DBT *, u_int32_t);
69 static int hash_sync(const DB *, u_int32_t);
70 static int hdestroy(HTAB *);
71 static HTAB *init_hash(HTAB *, const char *, const HASHINFO *);
72 static int init_htab(HTAB *, int);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB *);
75 static void swap_header_copy(HASHHDR *, HASHHDR *);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file, int flags, int mode,
98 const HASHINFO *info, /* Special directives for create */
104 int bpages, hdrsize, new_table, nsegs, save_errno;
106 if ((flags & O_ACCMODE) == O_WRONLY) {
111 if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp->flags = flags;
124 if ((hashp->fp = _open(file, flags | O_CLOEXEC, mode)) == -1)
125 RETURN_ERROR(errno, error0);
126 new_table = _fstat(hashp->fp, &statbuf) == 0 &&
127 statbuf.st_size == 0 && (flags & O_ACCMODE) != O_RDONLY;
132 if (!(hashp = init_hash(hashp, file, info)))
133 RETURN_ERROR(errno, error1);
135 /* Table already exists */
136 if (info && info->hash)
137 hashp->hash = info->hash;
139 hashp->hash = __default_hash;
141 hdrsize = _read(hashp->fp, &hashp->hdr, sizeof(HASHHDR));
142 #if BYTE_ORDER == LITTLE_ENDIAN
146 RETURN_ERROR(errno, error1);
147 if (hdrsize != sizeof(HASHHDR))
148 RETURN_ERROR(EFTYPE, error1);
149 /* Verify file type, versions and hash function */
150 if (hashp->MAGIC != HASHMAGIC)
151 RETURN_ERROR(EFTYPE, error1);
152 #define OLDHASHVERSION 1
153 if (hashp->VERSION != HASHVERSION &&
154 hashp->VERSION != OLDHASHVERSION)
155 RETURN_ERROR(EFTYPE, error1);
156 if ((int32_t)hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
157 RETURN_ERROR(EFTYPE, error1);
159 * Figure out how many segments we need. Max_Bucket is the
160 * maximum bucket number, so the number of buckets is
163 nsegs = howmany(hashp->MAX_BUCKET + 1, hashp->SGSIZE);
164 if (alloc_segs(hashp, nsegs))
166 * If alloc_segs fails, table will have been destroyed
167 * and errno will have been set.
170 /* Read in bitmaps */
171 bpages = (hashp->SPARES[hashp->OVFL_POINT] +
172 (hashp->BSIZE << BYTE_SHIFT) - 1) >>
173 (hashp->BSHIFT + BYTE_SHIFT);
175 hashp->nmaps = bpages;
176 (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));
179 /* Initialize Buffer Manager */
180 if (info && info->cachesize)
181 __buf_init(hashp, info->cachesize);
183 __buf_init(hashp, DEF_BUFSIZE);
185 hashp->new_file = new_table;
186 hashp->save_file = file && (hashp->flags & O_RDWR);
188 if (!(dbp = (DB *)malloc(sizeof(DB)))) {
194 dbp->internal = hashp;
195 dbp->close = hash_close;
196 dbp->del = hash_delete;
201 dbp->sync = hash_sync;
205 (void)fprintf(stderr,
206 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
208 "TABLE POINTER ", hashp,
209 "BUCKET SIZE ", hashp->BSIZE,
210 "BUCKET SHIFT ", hashp->BSHIFT,
211 "DIRECTORY SIZE ", hashp->DSIZE,
212 "SEGMENT SIZE ", hashp->SGSIZE,
213 "SEGMENT SHIFT ", hashp->SSHIFT,
214 "FILL FACTOR ", hashp->FFACTOR,
215 "MAX BUCKET ", hashp->MAX_BUCKET,
216 "OVFL POINT ", hashp->OVFL_POINT,
217 "LAST FREED ", hashp->LAST_FREED,
218 "HIGH MASK ", hashp->HIGH_MASK,
219 "LOW MASK ", hashp->LOW_MASK,
220 "NSEGS ", hashp->nsegs,
221 "NKEYS ", hashp->NKEYS);
223 #ifdef HASH_STATISTICS
224 hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;
230 (void)_close(hashp->fp);
247 hashp = (HTAB *)dbp->internal;
248 retval = hdestroy(hashp);
254 hash_fd(const DB *dbp)
261 hashp = (HTAB *)dbp->internal;
262 if (hashp->fp == -1) {
269 /************************** LOCAL CREATION ROUTINES **********************/
271 init_hash(HTAB *hashp, const char *file, const HASHINFO *info)
278 hashp->LORDER = BYTE_ORDER;
279 hashp->BSIZE = DEF_BUCKET_SIZE;
280 hashp->BSHIFT = DEF_BUCKET_SHIFT;
281 hashp->SGSIZE = DEF_SEGSIZE;
282 hashp->SSHIFT = DEF_SEGSIZE_SHIFT;
283 hashp->DSIZE = DEF_DIRSIZE;
284 hashp->FFACTOR = DEF_FFACTOR;
285 hashp->hash = __default_hash;
286 memset(hashp->SPARES, 0, sizeof(hashp->SPARES));
287 memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS));
289 /* Fix bucket size to be optimal for file system */
291 if (stat(file, &statbuf))
293 hashp->BSIZE = statbuf.st_blksize;
294 if (hashp->BSIZE > MAX_BSIZE)
295 hashp->BSIZE = MAX_BSIZE;
296 hashp->BSHIFT = __log2(hashp->BSIZE);
301 /* Round pagesize up to power of 2 */
302 hashp->BSHIFT = __log2(info->bsize);
303 hashp->BSIZE = 1 << hashp->BSHIFT;
304 if (hashp->BSIZE > MAX_BSIZE) {
310 hashp->FFACTOR = info->ffactor;
312 hashp->hash = info->hash;
316 if (info->lorder != BIG_ENDIAN &&
317 info->lorder != LITTLE_ENDIAN) {
321 hashp->LORDER = info->lorder;
324 /* init_htab should destroy the table and set errno if it fails */
325 if (init_htab(hashp, nelem))
331 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
332 * the table and set errno, so we just pass the error information along.
334 * Returns 0 on No Error
337 init_htab(HTAB *hashp, int nelem)
339 int nbuckets, nsegs, l2;
342 * Divide number of elements by the fill factor and determine a
343 * desired number of buckets. Allocate space for the next greater
344 * power of two number of buckets.
346 nelem = (nelem - 1) / hashp->FFACTOR + 1;
348 l2 = __log2(MAX(nelem, 2));
351 hashp->SPARES[l2] = l2 + 1;
352 hashp->SPARES[l2 + 1] = l2 + 1;
353 hashp->OVFL_POINT = l2;
354 hashp->LAST_FREED = 2;
356 /* First bitmap page is at: splitpoint l2 page offset 1 */
357 if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))
360 hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1;
361 hashp->HIGH_MASK = (nbuckets << 1) - 1;
362 hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >>
365 nsegs = (nbuckets - 1) / hashp->SGSIZE + 1;
366 nsegs = 1 << __log2(nsegs);
368 if (nsegs > hashp->DSIZE)
369 hashp->DSIZE = nsegs;
370 return (alloc_segs(hashp, nsegs));
373 /********************** DESTROY/CLOSE ROUTINES ************************/
376 * Flushes any changes to the file if necessary and destroys the hashp
377 * structure, freeing all allocated space.
380 hdestroy(HTAB *hashp)
386 #ifdef HASH_STATISTICS
387 (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",
388 hash_accesses, hash_collisions);
389 (void)fprintf(stderr, "hdestroy: expansions %ld\n",
391 (void)fprintf(stderr, "hdestroy: overflows %ld\n",
393 (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n",
394 hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs);
396 for (i = 0; i < NCACHED; i++)
397 (void)fprintf(stderr,
398 "spares[%d] = %d\n", i, hashp->SPARES[i]);
401 * Call on buffer manager to free buffers, and if required,
402 * write them to disk.
404 if (__buf_free(hashp, 1, hashp->save_file))
407 free(*hashp->dir); /* Free initial segments */
408 /* Free extra segments */
409 while (hashp->exsegs--)
410 free(hashp->dir[--hashp->nsegs]);
413 if (flush_meta(hashp) && !save_errno)
416 for (i = 0; i < hashp->nmaps; i++)
418 free(hashp->mapp[i]);
420 free(hashp->tmp_key);
422 free(hashp->tmp_buf);
424 if (hashp->fp != -1) {
425 if (hashp->save_file)
426 (void)_fsync(hashp->fp);
427 (void)_close(hashp->fp);
439 * Write modified pages to disk
446 hash_sync(const DB *dbp, u_int32_t flags)
458 hashp = (HTAB *)dbp->internal;
459 if (!hashp->save_file)
461 if (__buf_free(hashp, 0, 1) || flush_meta(hashp))
463 if (hashp->fp != -1 && _fsync(hashp->fp) != 0)
472 * -1 indicates that errno should be set
475 flush_meta(HTAB *hashp)
478 #if BYTE_ORDER == LITTLE_ENDIAN
483 if (!hashp->save_file)
485 hashp->MAGIC = HASHMAGIC;
486 hashp->VERSION = HASHVERSION;
487 hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY));
491 #if BYTE_ORDER == LITTLE_ENDIAN
493 swap_header_copy(&hashp->hdr, whdrp);
495 if ((wsize = pwrite(fp, whdrp, sizeof(HASHHDR), (off_t)0)) == -1)
498 if (wsize != sizeof(HASHHDR)) {
500 hashp->error = errno;
503 for (i = 0; i < NCACHED; i++)
505 if (__put_page(hashp, (char *)hashp->mapp[i],
506 hashp->BITMAPS[i], 0, 1))
511 /*******************************SEARCH ROUTINES *****************************/
513 * All the access routines return
517 * 1 to indicate an external ERROR (i.e. key not found, etc)
518 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
521 hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag)
525 hashp = (HTAB *)dbp->internal;
527 hashp->error = errno = EINVAL;
530 return (hash_access(hashp, HASH_GET, (DBT *)key, data));
534 hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag)
538 hashp = (HTAB *)dbp->internal;
539 if (flag && flag != R_NOOVERWRITE) {
540 hashp->error = errno = EINVAL;
543 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
544 hashp->error = errno = EPERM;
547 return (hash_access(hashp, flag == R_NOOVERWRITE ?
548 HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data));
552 hash_delete(const DB *dbp, const DBT *key,
553 u_int32_t flag) /* Ignored */
557 hashp = (HTAB *)dbp->internal;
558 if (flag && flag != R_CURSOR) {
559 hashp->error = errno = EINVAL;
562 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
563 hashp->error = errno = EPERM;
566 return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
570 * Assume that hashp has been set in wrapper routine.
573 hash_access(HTAB *hashp, ACTION action, DBT *key, DBT *val)
576 BUFHEAD *bufp, *save_bufp;
578 int n, ndx, off, size;
582 #ifdef HASH_STATISTICS
588 kp = (char *)key->data;
589 rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0);
594 /* Pin the bucket chain */
595 rbufp->flags |= BUF_PIN;
596 for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;)
597 if (bp[1] >= REAL_KEY) {
598 /* Real key/data pair */
599 if (size == off - *bp &&
600 memcmp(kp, rbufp->page + *bp, size) == 0)
603 #ifdef HASH_STATISTICS
608 } else if (bp[1] == OVFLPAGE) {
609 rbufp = __get_buf(hashp, *bp, rbufp, 0);
611 save_bufp->flags &= ~BUF_PIN;
615 bp = (u_int16_t *)rbufp->page;
619 } else if (bp[1] < REAL_KEY) {
621 __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0)
626 __find_last_page(hashp, &bufp))) {
631 rbufp = __get_buf(hashp, pageno, bufp, 0);
633 save_bufp->flags &= ~BUF_PIN;
637 bp = (u_int16_t *)rbufp->page;
642 save_bufp->flags &= ~BUF_PIN;
651 if (__addel(hashp, rbufp, key, val)) {
652 save_bufp->flags &= ~BUF_PIN;
655 save_bufp->flags &= ~BUF_PIN;
661 save_bufp->flags &= ~BUF_PIN;
668 save_bufp->flags &= ~BUF_PIN;
671 bp = (u_int16_t *)rbufp->page;
672 if (bp[ndx + 1] < REAL_KEY) {
673 if (__big_return(hashp, rbufp, ndx, val, 0))
676 val->data = (u_char *)rbufp->page + (int)bp[ndx + 1];
677 val->size = bp[ndx] - bp[ndx + 1];
681 if ((__delpair(hashp, rbufp, ndx)) ||
682 (__addel(hashp, rbufp, key, val))) {
683 save_bufp->flags &= ~BUF_PIN;
688 if (__delpair(hashp, rbufp, ndx))
694 save_bufp->flags &= ~BUF_PIN;
699 hash_seq(const DB *dbp, DBT *key, DBT *data, u_int32_t flag)
706 hashp = (HTAB *)dbp->internal;
707 if (flag && flag != R_FIRST && flag != R_NEXT) {
708 hashp->error = errno = EINVAL;
711 #ifdef HASH_STATISTICS
714 if ((hashp->cbucket < 0) || (flag == R_FIRST)) {
720 for (bp = NULL; !bp || !bp[0]; ) {
721 if (!(bufp = hashp->cpage)) {
722 for (bucket = hashp->cbucket;
723 bucket <= hashp->MAX_BUCKET;
724 bucket++, hashp->cndx = 1) {
725 bufp = __get_buf(hashp, bucket, NULL, 0);
729 bp = (u_int16_t *)bufp->page;
733 hashp->cbucket = bucket;
734 if ((u_int32_t)hashp->cbucket > hashp->MAX_BUCKET) {
739 bp = (u_int16_t *)hashp->cpage->page;
740 if (flag == R_NEXT || flag == 0) {
742 if (hashp->cndx > bp[0]) {
755 while (bp[hashp->cndx + 1] == OVFLPAGE) {
756 bufp = hashp->cpage =
757 __get_buf(hashp, bp[hashp->cndx], bufp, 0);
760 bp = (u_int16_t *)(bufp->page);
769 if (bp[ndx + 1] < REAL_KEY) {
770 if (__big_keydata(hashp, bufp, key, data, 1))
773 if (hashp->cpage == NULL)
775 key->data = (u_char *)hashp->cpage->page + bp[ndx];
776 key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx];
777 data->data = (u_char *)hashp->cpage->page + bp[ndx + 1];
778 data->size = bp[ndx] - bp[ndx + 1];
783 /********************************* UTILITIES ************************/
791 __expand_table(HTAB *hashp)
793 u_int32_t old_bucket, new_bucket;
794 int dirsize, new_segnum, spare_ndx;
796 #ifdef HASH_STATISTICS
799 new_bucket = ++hashp->MAX_BUCKET;
800 old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK);
802 new_segnum = new_bucket >> hashp->SSHIFT;
804 /* Check if we need a new segment */
805 if (new_segnum >= hashp->nsegs) {
806 /* Check if we need to expand directory */
807 if (new_segnum >= hashp->DSIZE) {
808 /* Reallocate directory */
809 dirsize = hashp->DSIZE * sizeof(SEGMENT *);
810 if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1))
812 hashp->DSIZE = dirsize << 1;
814 if ((hashp->dir[new_segnum] =
815 calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL)
821 * If the split point is increasing (MAX_BUCKET's log base 2
822 * * increases), we need to copy the current contents of the spare
823 * split bucket to the next bucket.
825 spare_ndx = __log2(hashp->MAX_BUCKET + 1);
826 if (spare_ndx > hashp->OVFL_POINT) {
827 hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
828 hashp->OVFL_POINT = spare_ndx;
831 if (new_bucket > hashp->HIGH_MASK) {
832 /* Starting a new doubling */
833 hashp->LOW_MASK = hashp->HIGH_MASK;
834 hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
836 /* Relocate records to the new bucket */
837 return (__split_page(hashp, old_bucket, new_bucket));
841 * If realloc guarantees that the pointer is not destroyed if the realloc
842 * fails, then this routine can go away.
845 hash_realloc(SEGMENT **p_ptr, int oldsize, int newsize)
849 if ( (p = malloc(newsize)) ) {
850 memmove(p, *p_ptr, oldsize);
851 memset((char *)p + oldsize, 0, newsize - oldsize);
859 __call_hash(HTAB *hashp, char *k, int len)
861 unsigned int n, bucket;
863 n = hashp->hash(k, len);
864 bucket = n & hashp->HIGH_MASK;
865 if (bucket > hashp->MAX_BUCKET)
866 bucket = bucket & hashp->LOW_MASK;
871 * Allocate segment table. On error, destroy the table and set errno.
873 * Returns 0 on success
876 alloc_segs(HTAB *hashp, int nsegs)
884 calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) {
886 (void)hdestroy(hashp);
890 hashp->nsegs = nsegs;
893 /* Allocate segments */
894 if ((store = calloc(nsegs << hashp->SSHIFT, sizeof(SEGMENT))) == NULL) {
896 (void)hdestroy(hashp);
900 for (i = 0; i < nsegs; i++)
901 hashp->dir[i] = &store[i << hashp->SSHIFT];
905 #if BYTE_ORDER == LITTLE_ENDIAN
907 * Hashp->hdr needs to be byteswapped.
910 swap_header_copy(HASHHDR *srcp, HASHHDR *destp)
914 P_32_COPY(srcp->magic, destp->magic);
915 P_32_COPY(srcp->version, destp->version);
916 P_32_COPY(srcp->lorder, destp->lorder);
917 P_32_COPY(srcp->bsize, destp->bsize);
918 P_32_COPY(srcp->bshift, destp->bshift);
919 P_32_COPY(srcp->dsize, destp->dsize);
920 P_32_COPY(srcp->ssize, destp->ssize);
921 P_32_COPY(srcp->sshift, destp->sshift);
922 P_32_COPY(srcp->ovfl_point, destp->ovfl_point);
923 P_32_COPY(srcp->last_freed, destp->last_freed);
924 P_32_COPY(srcp->max_bucket, destp->max_bucket);
925 P_32_COPY(srcp->high_mask, destp->high_mask);
926 P_32_COPY(srcp->low_mask, destp->low_mask);
927 P_32_COPY(srcp->ffactor, destp->ffactor);
928 P_32_COPY(srcp->nkeys, destp->nkeys);
929 P_32_COPY(srcp->hdrpages, destp->hdrpages);
930 P_32_COPY(srcp->h_charkey, destp->h_charkey);
931 for (i = 0; i < NCACHED; i++) {
932 P_32_COPY(srcp->spares[i], destp->spares[i]);
933 P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);
938 swap_header(HTAB *hashp)
945 M_32_SWAP(hdrp->magic);
946 M_32_SWAP(hdrp->version);
947 M_32_SWAP(hdrp->lorder);
948 M_32_SWAP(hdrp->bsize);
949 M_32_SWAP(hdrp->bshift);
950 M_32_SWAP(hdrp->dsize);
951 M_32_SWAP(hdrp->ssize);
952 M_32_SWAP(hdrp->sshift);
953 M_32_SWAP(hdrp->ovfl_point);
954 M_32_SWAP(hdrp->last_freed);
955 M_32_SWAP(hdrp->max_bucket);
956 M_32_SWAP(hdrp->high_mask);
957 M_32_SWAP(hdrp->low_mask);
958 M_32_SWAP(hdrp->ffactor);
959 M_32_SWAP(hdrp->nkeys);
960 M_32_SWAP(hdrp->hdrpages);
961 M_32_SWAP(hdrp->h_charkey);
962 for (i = 0; i < NCACHED; i++) {
963 M_32_SWAP(hdrp->spares[i]);
964 M_16_SWAP(hdrp->bitmaps[i]);