2 * Copyright (c) 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
35 #endif /* LIBC_SCCS and not lint */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "namespace.h"
40 #include <sys/param.h>
52 #include "un-namespace.h"
59 static int alloc_segs(HTAB *, int);
60 static int flush_meta(HTAB *);
61 static int hash_access(HTAB *, ACTION, DBT *, DBT *);
62 static int hash_close(DB *);
63 static int hash_delete(const DB *, const DBT *, u_int32_t);
64 static int hash_fd(const DB *);
65 static int hash_get(const DB *, const DBT *, DBT *, u_int32_t);
66 static int hash_put(const DB *, DBT *, const DBT *, u_int32_t);
67 static void *hash_realloc(SEGMENT **, int, int);
68 static int hash_seq(const DB *, DBT *, DBT *, u_int32_t);
69 static int hash_sync(const DB *, u_int32_t);
70 static int hdestroy(HTAB *);
71 static HTAB *init_hash(HTAB *, const char *, const HASHINFO *);
72 static int init_htab(HTAB *, int);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB *);
75 static void swap_header_copy(HASHHDR *, HASHHDR *);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file, int flags, int mode,
98 const HASHINFO *info, /* Special directives for create */
104 int bpages, hdrsize, new_table, nsegs, save_errno;
106 if ((flags & O_ACCMODE) == O_WRONLY) {
111 if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp->flags = flags;
124 if ((hashp->fp = _open(file, flags | O_CLOEXEC, mode)) == -1)
125 RETURN_ERROR(errno, error0);
126 new_table = _fstat(hashp->fp, &statbuf) == 0 &&
127 statbuf.st_size == 0 && (flags & O_ACCMODE) != O_RDONLY;
132 if (!(hashp = init_hash(hashp, file, info)))
133 RETURN_ERROR(errno, error1);
135 /* Table already exists */
136 if (info && info->hash)
137 hashp->hash = info->hash;
139 hashp->hash = __default_hash;
141 hdrsize = _read(hashp->fp, &hashp->hdr, sizeof(HASHHDR));
142 #if BYTE_ORDER == LITTLE_ENDIAN
146 RETURN_ERROR(errno, error1);
147 if (hdrsize != sizeof(HASHHDR))
148 RETURN_ERROR(EFTYPE, error1);
149 /* Verify file type, versions and hash function */
150 if (hashp->MAGIC != HASHMAGIC)
151 RETURN_ERROR(EFTYPE, error1);
152 #define OLDHASHVERSION 1
153 if (hashp->VERSION != HASHVERSION &&
154 hashp->VERSION != OLDHASHVERSION)
155 RETURN_ERROR(EFTYPE, error1);
156 if ((int32_t)hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
157 RETURN_ERROR(EFTYPE, error1);
159 * Figure out how many segments we need. Max_Bucket is the
160 * maximum bucket number, so the number of buckets is
163 nsegs = (hashp->MAX_BUCKET + 1 + hashp->SGSIZE - 1) /
165 if (alloc_segs(hashp, nsegs))
167 * If alloc_segs fails, table will have been destroyed
168 * and errno will have been set.
171 /* Read in bitmaps */
172 bpages = (hashp->SPARES[hashp->OVFL_POINT] +
173 (hashp->BSIZE << BYTE_SHIFT) - 1) >>
174 (hashp->BSHIFT + BYTE_SHIFT);
176 hashp->nmaps = bpages;
177 (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));
180 /* Initialize Buffer Manager */
181 if (info && info->cachesize)
182 __buf_init(hashp, info->cachesize);
184 __buf_init(hashp, DEF_BUFSIZE);
186 hashp->new_file = new_table;
187 hashp->save_file = file && (hashp->flags & O_RDWR);
189 if (!(dbp = (DB *)malloc(sizeof(DB)))) {
195 dbp->internal = hashp;
196 dbp->close = hash_close;
197 dbp->del = hash_delete;
202 dbp->sync = hash_sync;
206 (void)fprintf(stderr,
207 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
209 "TABLE POINTER ", hashp,
210 "BUCKET SIZE ", hashp->BSIZE,
211 "BUCKET SHIFT ", hashp->BSHIFT,
212 "DIRECTORY SIZE ", hashp->DSIZE,
213 "SEGMENT SIZE ", hashp->SGSIZE,
214 "SEGMENT SHIFT ", hashp->SSHIFT,
215 "FILL FACTOR ", hashp->FFACTOR,
216 "MAX BUCKET ", hashp->MAX_BUCKET,
217 "OVFL POINT ", hashp->OVFL_POINT,
218 "LAST FREED ", hashp->LAST_FREED,
219 "HIGH MASK ", hashp->HIGH_MASK,
220 "LOW MASK ", hashp->LOW_MASK,
221 "NSEGS ", hashp->nsegs,
222 "NKEYS ", hashp->NKEYS);
224 #ifdef HASH_STATISTICS
225 hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;
231 (void)_close(hashp->fp);
248 hashp = (HTAB *)dbp->internal;
249 retval = hdestroy(hashp);
255 hash_fd(const DB *dbp)
262 hashp = (HTAB *)dbp->internal;
263 if (hashp->fp == -1) {
270 /************************** LOCAL CREATION ROUTINES **********************/
272 init_hash(HTAB *hashp, const char *file, const HASHINFO *info)
279 hashp->LORDER = BYTE_ORDER;
280 hashp->BSIZE = DEF_BUCKET_SIZE;
281 hashp->BSHIFT = DEF_BUCKET_SHIFT;
282 hashp->SGSIZE = DEF_SEGSIZE;
283 hashp->SSHIFT = DEF_SEGSIZE_SHIFT;
284 hashp->DSIZE = DEF_DIRSIZE;
285 hashp->FFACTOR = DEF_FFACTOR;
286 hashp->hash = __default_hash;
287 memset(hashp->SPARES, 0, sizeof(hashp->SPARES));
288 memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS));
290 /* Fix bucket size to be optimal for file system */
292 if (stat(file, &statbuf))
294 hashp->BSIZE = statbuf.st_blksize;
295 if (hashp->BSIZE > MAX_BSIZE)
296 hashp->BSIZE = MAX_BSIZE;
297 hashp->BSHIFT = __log2(hashp->BSIZE);
302 /* Round pagesize up to power of 2 */
303 hashp->BSHIFT = __log2(info->bsize);
304 hashp->BSIZE = 1 << hashp->BSHIFT;
305 if (hashp->BSIZE > MAX_BSIZE) {
311 hashp->FFACTOR = info->ffactor;
313 hashp->hash = info->hash;
317 if (info->lorder != BIG_ENDIAN &&
318 info->lorder != LITTLE_ENDIAN) {
322 hashp->LORDER = info->lorder;
325 /* init_htab should destroy the table and set errno if it fails */
326 if (init_htab(hashp, nelem))
332 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
333 * the table and set errno, so we just pass the error information along.
335 * Returns 0 on No Error
338 init_htab(HTAB *hashp, int nelem)
340 int nbuckets, nsegs, l2;
343 * Divide number of elements by the fill factor and determine a
344 * desired number of buckets. Allocate space for the next greater
345 * power of two number of buckets.
347 nelem = (nelem - 1) / hashp->FFACTOR + 1;
349 l2 = __log2(MAX(nelem, 2));
352 hashp->SPARES[l2] = l2 + 1;
353 hashp->SPARES[l2 + 1] = l2 + 1;
354 hashp->OVFL_POINT = l2;
355 hashp->LAST_FREED = 2;
357 /* First bitmap page is at: splitpoint l2 page offset 1 */
358 if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))
361 hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1;
362 hashp->HIGH_MASK = (nbuckets << 1) - 1;
363 hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >>
366 nsegs = (nbuckets - 1) / hashp->SGSIZE + 1;
367 nsegs = 1 << __log2(nsegs);
369 if (nsegs > hashp->DSIZE)
370 hashp->DSIZE = nsegs;
371 return (alloc_segs(hashp, nsegs));
374 /********************** DESTROY/CLOSE ROUTINES ************************/
377 * Flushes any changes to the file if necessary and destroys the hashp
378 * structure, freeing all allocated space.
381 hdestroy(HTAB *hashp)
387 #ifdef HASH_STATISTICS
388 (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",
389 hash_accesses, hash_collisions);
390 (void)fprintf(stderr, "hdestroy: expansions %ld\n",
392 (void)fprintf(stderr, "hdestroy: overflows %ld\n",
394 (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n",
395 hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs);
397 for (i = 0; i < NCACHED; i++)
398 (void)fprintf(stderr,
399 "spares[%d] = %d\n", i, hashp->SPARES[i]);
402 * Call on buffer manager to free buffers, and if required,
403 * write them to disk.
405 if (__buf_free(hashp, 1, hashp->save_file))
408 free(*hashp->dir); /* Free initial segments */
409 /* Free extra segments */
410 while (hashp->exsegs--)
411 free(hashp->dir[--hashp->nsegs]);
414 if (flush_meta(hashp) && !save_errno)
417 for (i = 0; i < hashp->nmaps; i++)
419 free(hashp->mapp[i]);
421 free(hashp->tmp_key);
423 free(hashp->tmp_buf);
425 if (hashp->fp != -1) {
426 if (hashp->save_file)
427 (void)_fsync(hashp->fp);
428 (void)_close(hashp->fp);
440 * Write modified pages to disk
447 hash_sync(const DB *dbp, u_int32_t flags)
459 hashp = (HTAB *)dbp->internal;
460 if (!hashp->save_file)
462 if (__buf_free(hashp, 0, 1) || flush_meta(hashp))
464 if (hashp->fp != -1 && _fsync(hashp->fp) != 0)
473 * -1 indicates that errno should be set
476 flush_meta(HTAB *hashp)
479 #if BYTE_ORDER == LITTLE_ENDIAN
484 if (!hashp->save_file)
486 hashp->MAGIC = HASHMAGIC;
487 hashp->VERSION = HASHVERSION;
488 hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY));
492 #if BYTE_ORDER == LITTLE_ENDIAN
494 swap_header_copy(&hashp->hdr, whdrp);
496 if ((wsize = pwrite(fp, whdrp, sizeof(HASHHDR), (off_t)0)) == -1)
499 if (wsize != sizeof(HASHHDR)) {
501 hashp->error = errno;
504 for (i = 0; i < NCACHED; i++)
506 if (__put_page(hashp, (char *)hashp->mapp[i],
507 hashp->BITMAPS[i], 0, 1))
512 /*******************************SEARCH ROUTINES *****************************/
514 * All the access routines return
518 * 1 to indicate an external ERROR (i.e. key not found, etc)
519 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
522 hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag)
526 hashp = (HTAB *)dbp->internal;
528 hashp->error = errno = EINVAL;
531 return (hash_access(hashp, HASH_GET, (DBT *)key, data));
535 hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag)
539 hashp = (HTAB *)dbp->internal;
540 if (flag && flag != R_NOOVERWRITE) {
541 hashp->error = errno = EINVAL;
544 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
545 hashp->error = errno = EPERM;
548 return (hash_access(hashp, flag == R_NOOVERWRITE ?
549 HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data));
553 hash_delete(const DB *dbp, const DBT *key,
554 u_int32_t flag) /* Ignored */
558 hashp = (HTAB *)dbp->internal;
559 if (flag && flag != R_CURSOR) {
560 hashp->error = errno = EINVAL;
563 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
564 hashp->error = errno = EPERM;
567 return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
571 * Assume that hashp has been set in wrapper routine.
574 hash_access(HTAB *hashp, ACTION action, DBT *key, DBT *val)
577 BUFHEAD *bufp, *save_bufp;
579 int n, ndx, off, size;
583 #ifdef HASH_STATISTICS
589 kp = (char *)key->data;
590 rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0);
595 /* Pin the bucket chain */
596 rbufp->flags |= BUF_PIN;
597 for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;)
598 if (bp[1] >= REAL_KEY) {
599 /* Real key/data pair */
600 if (size == off - *bp &&
601 memcmp(kp, rbufp->page + *bp, size) == 0)
604 #ifdef HASH_STATISTICS
609 } else if (bp[1] == OVFLPAGE) {
610 rbufp = __get_buf(hashp, *bp, rbufp, 0);
612 save_bufp->flags &= ~BUF_PIN;
616 bp = (u_int16_t *)rbufp->page;
620 } else if (bp[1] < REAL_KEY) {
622 __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0)
627 __find_last_page(hashp, &bufp))) {
632 rbufp = __get_buf(hashp, pageno, bufp, 0);
634 save_bufp->flags &= ~BUF_PIN;
638 bp = (u_int16_t *)rbufp->page;
643 save_bufp->flags &= ~BUF_PIN;
652 if (__addel(hashp, rbufp, key, val)) {
653 save_bufp->flags &= ~BUF_PIN;
656 save_bufp->flags &= ~BUF_PIN;
662 save_bufp->flags &= ~BUF_PIN;
669 save_bufp->flags &= ~BUF_PIN;
672 bp = (u_int16_t *)rbufp->page;
673 if (bp[ndx + 1] < REAL_KEY) {
674 if (__big_return(hashp, rbufp, ndx, val, 0))
677 val->data = (u_char *)rbufp->page + (int)bp[ndx + 1];
678 val->size = bp[ndx] - bp[ndx + 1];
682 if ((__delpair(hashp, rbufp, ndx)) ||
683 (__addel(hashp, rbufp, key, val))) {
684 save_bufp->flags &= ~BUF_PIN;
689 if (__delpair(hashp, rbufp, ndx))
695 save_bufp->flags &= ~BUF_PIN;
700 hash_seq(const DB *dbp, DBT *key, DBT *data, u_int32_t flag)
707 hashp = (HTAB *)dbp->internal;
708 if (flag && flag != R_FIRST && flag != R_NEXT) {
709 hashp->error = errno = EINVAL;
712 #ifdef HASH_STATISTICS
715 if ((hashp->cbucket < 0) || (flag == R_FIRST)) {
721 for (bp = NULL; !bp || !bp[0]; ) {
722 if (!(bufp = hashp->cpage)) {
723 for (bucket = hashp->cbucket;
724 bucket <= hashp->MAX_BUCKET;
725 bucket++, hashp->cndx = 1) {
726 bufp = __get_buf(hashp, bucket, NULL, 0);
730 bp = (u_int16_t *)bufp->page;
734 hashp->cbucket = bucket;
735 if ((u_int32_t)hashp->cbucket > hashp->MAX_BUCKET) {
740 bp = (u_int16_t *)hashp->cpage->page;
741 if (flag == R_NEXT || flag == 0) {
743 if (hashp->cndx > bp[0]) {
756 while (bp[hashp->cndx + 1] == OVFLPAGE) {
757 bufp = hashp->cpage =
758 __get_buf(hashp, bp[hashp->cndx], bufp, 0);
761 bp = (u_int16_t *)(bufp->page);
770 if (bp[ndx + 1] < REAL_KEY) {
771 if (__big_keydata(hashp, bufp, key, data, 1))
774 if (hashp->cpage == 0)
776 key->data = (u_char *)hashp->cpage->page + bp[ndx];
777 key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx];
778 data->data = (u_char *)hashp->cpage->page + bp[ndx + 1];
779 data->size = bp[ndx] - bp[ndx + 1];
784 /********************************* UTILITIES ************************/
792 __expand_table(HTAB *hashp)
794 u_int32_t old_bucket, new_bucket;
795 int dirsize, new_segnum, spare_ndx;
797 #ifdef HASH_STATISTICS
800 new_bucket = ++hashp->MAX_BUCKET;
801 old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK);
803 new_segnum = new_bucket >> hashp->SSHIFT;
805 /* Check if we need a new segment */
806 if (new_segnum >= hashp->nsegs) {
807 /* Check if we need to expand directory */
808 if (new_segnum >= hashp->DSIZE) {
809 /* Reallocate directory */
810 dirsize = hashp->DSIZE * sizeof(SEGMENT *);
811 if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1))
813 hashp->DSIZE = dirsize << 1;
815 if ((hashp->dir[new_segnum] =
816 (SEGMENT)calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL)
822 * If the split point is increasing (MAX_BUCKET's log base 2
823 * * increases), we need to copy the current contents of the spare
824 * split bucket to the next bucket.
826 spare_ndx = __log2(hashp->MAX_BUCKET + 1);
827 if (spare_ndx > hashp->OVFL_POINT) {
828 hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
829 hashp->OVFL_POINT = spare_ndx;
832 if (new_bucket > hashp->HIGH_MASK) {
833 /* Starting a new doubling */
834 hashp->LOW_MASK = hashp->HIGH_MASK;
835 hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
837 /* Relocate records to the new bucket */
838 return (__split_page(hashp, old_bucket, new_bucket));
842 * If realloc guarantees that the pointer is not destroyed if the realloc
843 * fails, then this routine can go away.
846 hash_realloc(SEGMENT **p_ptr, int oldsize, int newsize)
850 if ( (p = malloc(newsize)) ) {
851 memmove(p, *p_ptr, oldsize);
852 memset((char *)p + oldsize, 0, newsize - oldsize);
860 __call_hash(HTAB *hashp, char *k, int len)
862 unsigned int n, bucket;
864 n = hashp->hash(k, len);
865 bucket = n & hashp->HIGH_MASK;
866 if (bucket > hashp->MAX_BUCKET)
867 bucket = bucket & hashp->LOW_MASK;
872 * Allocate segment table. On error, destroy the table and set errno.
874 * Returns 0 on success
877 alloc_segs(HTAB *hashp, int nsegs)
885 (SEGMENT *)calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) {
887 (void)hdestroy(hashp);
891 hashp->nsegs = nsegs;
894 /* Allocate segments */
895 if ((store = (SEGMENT)calloc(nsegs << hashp->SSHIFT,
896 sizeof(SEGMENT))) == NULL) {
898 (void)hdestroy(hashp);
902 for (i = 0; i < nsegs; i++)
903 hashp->dir[i] = &store[i << hashp->SSHIFT];
907 #if BYTE_ORDER == LITTLE_ENDIAN
909 * Hashp->hdr needs to be byteswapped.
912 swap_header_copy(HASHHDR *srcp, HASHHDR *destp)
916 P_32_COPY(srcp->magic, destp->magic);
917 P_32_COPY(srcp->version, destp->version);
918 P_32_COPY(srcp->lorder, destp->lorder);
919 P_32_COPY(srcp->bsize, destp->bsize);
920 P_32_COPY(srcp->bshift, destp->bshift);
921 P_32_COPY(srcp->dsize, destp->dsize);
922 P_32_COPY(srcp->ssize, destp->ssize);
923 P_32_COPY(srcp->sshift, destp->sshift);
924 P_32_COPY(srcp->ovfl_point, destp->ovfl_point);
925 P_32_COPY(srcp->last_freed, destp->last_freed);
926 P_32_COPY(srcp->max_bucket, destp->max_bucket);
927 P_32_COPY(srcp->high_mask, destp->high_mask);
928 P_32_COPY(srcp->low_mask, destp->low_mask);
929 P_32_COPY(srcp->ffactor, destp->ffactor);
930 P_32_COPY(srcp->nkeys, destp->nkeys);
931 P_32_COPY(srcp->hdrpages, destp->hdrpages);
932 P_32_COPY(srcp->h_charkey, destp->h_charkey);
933 for (i = 0; i < NCACHED; i++) {
934 P_32_COPY(srcp->spares[i], destp->spares[i]);
935 P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);
940 swap_header(HTAB *hashp)
947 M_32_SWAP(hdrp->magic);
948 M_32_SWAP(hdrp->version);
949 M_32_SWAP(hdrp->lorder);
950 M_32_SWAP(hdrp->bsize);
951 M_32_SWAP(hdrp->bshift);
952 M_32_SWAP(hdrp->dsize);
953 M_32_SWAP(hdrp->ssize);
954 M_32_SWAP(hdrp->sshift);
955 M_32_SWAP(hdrp->ovfl_point);
956 M_32_SWAP(hdrp->last_freed);
957 M_32_SWAP(hdrp->max_bucket);
958 M_32_SWAP(hdrp->high_mask);
959 M_32_SWAP(hdrp->low_mask);
960 M_32_SWAP(hdrp->ffactor);
961 M_32_SWAP(hdrp->nkeys);
962 M_32_SWAP(hdrp->hdrpages);
963 M_32_SWAP(hdrp->h_charkey);
964 for (i = 0; i < NCACHED; i++) {
965 M_32_SWAP(hdrp->spares[i]);
966 M_16_SWAP(hdrp->bitmaps[i]);