2 * Copyright (c) 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
35 #endif /* LIBC_SCCS and not lint */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include "namespace.h"
40 #include <sys/param.h>
52 #include "un-namespace.h"
59 static int alloc_segs(HTAB *, int);
60 static int flush_meta(HTAB *);
61 static int hash_access(HTAB *, ACTION, DBT *, DBT *);
62 static int hash_close(DB *);
63 static int hash_delete(const DB *, const DBT *, u_int32_t);
64 static int hash_fd(const DB *);
65 static int hash_get(const DB *, const DBT *, DBT *, u_int32_t);
66 static int hash_put(const DB *, DBT *, const DBT *, u_int32_t);
67 static void *hash_realloc(SEGMENT **, int, int);
68 static int hash_seq(const DB *, DBT *, DBT *, u_int32_t);
69 static int hash_sync(const DB *, u_int32_t);
70 static int hdestroy(HTAB *);
71 static HTAB *init_hash(HTAB *, const char *, const HASHINFO *);
72 static int init_htab(HTAB *, int);
73 #if BYTE_ORDER == LITTLE_ENDIAN
74 static void swap_header(HTAB *);
75 static void swap_header_copy(HASHHDR *, HASHHDR *);
78 /* Fast arithmetic, relying on powers of 2, */
79 #define MOD(x, y) ((x) & ((y) - 1))
81 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
88 #ifdef HASH_STATISTICS
89 int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
92 /************************** INTERFACE ROUTINES ***************************/
97 __hash_open(const char *file, int flags, int mode,
98 const HASHINFO *info, /* Special directives for create */
104 int bpages, hdrsize, new_table, nsegs, save_errno;
106 if ((flags & O_ACCMODE) == O_WRONLY) {
111 if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
116 * Even if user wants write only, we need to be able to read
117 * the actual file, so we need to open it read/write. But, the
118 * field in the hashp structure needs to be accurate so that
119 * we can check accesses.
121 hashp->flags = flags;
124 if ((hashp->fp = _open(file, flags | O_CLOEXEC, mode)) == -1)
125 RETURN_ERROR(errno, error0);
126 new_table = _fstat(hashp->fp, &statbuf) == 0 &&
127 statbuf.st_size == 0 && (flags & O_ACCMODE) != O_RDONLY;
132 if (!(hashp = init_hash(hashp, file, info)))
133 RETURN_ERROR(errno, error1);
135 /* Table already exists */
136 if (info && info->hash)
137 hashp->hash = info->hash;
139 hashp->hash = __default_hash;
141 hdrsize = _read(hashp->fp, &hashp->hdr, sizeof(HASHHDR));
142 #if BYTE_ORDER == LITTLE_ENDIAN
146 RETURN_ERROR(errno, error1);
147 if (hdrsize != sizeof(HASHHDR))
148 RETURN_ERROR(EFTYPE, error1);
149 /* Verify file type, versions and hash function */
150 if (hashp->MAGIC != HASHMAGIC)
151 RETURN_ERROR(EFTYPE, error1);
152 #define OLDHASHVERSION 1
153 if (hashp->VERSION != HASHVERSION &&
154 hashp->VERSION != OLDHASHVERSION)
155 RETURN_ERROR(EFTYPE, error1);
156 if ((int32_t)hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
157 RETURN_ERROR(EFTYPE, error1);
159 * Figure out how many segments we need. Max_Bucket is the
160 * maximum bucket number, so the number of buckets is
163 nsegs = (hashp->MAX_BUCKET + 1 + hashp->SGSIZE - 1) /
165 if (alloc_segs(hashp, nsegs))
167 * If alloc_segs fails, table will have been destroyed
168 * and errno will have been set.
171 /* Read in bitmaps */
172 bpages = (hashp->SPARES[hashp->OVFL_POINT] +
173 (hashp->BSIZE << BYTE_SHIFT) - 1) >>
174 (hashp->BSHIFT + BYTE_SHIFT);
176 hashp->nmaps = bpages;
177 (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));
180 /* Initialize Buffer Manager */
181 if (info && info->cachesize)
182 __buf_init(hashp, info->cachesize);
184 __buf_init(hashp, DEF_BUFSIZE);
186 hashp->new_file = new_table;
187 hashp->save_file = file && (hashp->flags & O_RDWR);
189 if (!(dbp = (DB *)malloc(sizeof(DB)))) {
195 dbp->internal = hashp;
196 dbp->close = hash_close;
197 dbp->del = hash_delete;
202 dbp->sync = hash_sync;
206 (void)fprintf(stderr,
207 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
209 "TABLE POINTER ", hashp,
210 "BUCKET SIZE ", hashp->BSIZE,
211 "BUCKET SHIFT ", hashp->BSHIFT,
212 "DIRECTORY SIZE ", hashp->DSIZE,
213 "SEGMENT SIZE ", hashp->SGSIZE,
214 "SEGMENT SHIFT ", hashp->SSHIFT,
215 "FILL FACTOR ", hashp->FFACTOR,
216 "MAX BUCKET ", hashp->MAX_BUCKET,
217 "OVFL POINT ", hashp->OVFL_POINT,
218 "LAST FREED ", hashp->LAST_FREED,
219 "HIGH MASK ", hashp->HIGH_MASK,
220 "LOW MASK ", hashp->LOW_MASK,
221 "NSEGS ", hashp->nsegs,
222 "NKEYS ", hashp->NKEYS);
224 #ifdef HASH_STATISTICS
225 hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;
231 (void)_close(hashp->fp);
248 hashp = (HTAB *)dbp->internal;
249 retval = hdestroy(hashp);
255 hash_fd(const DB *dbp)
262 hashp = (HTAB *)dbp->internal;
263 if (hashp->fp == -1) {
270 /************************** LOCAL CREATION ROUTINES **********************/
272 init_hash(HTAB *hashp, const char *file, const HASHINFO *info)
279 hashp->LORDER = BYTE_ORDER;
280 hashp->BSIZE = DEF_BUCKET_SIZE;
281 hashp->BSHIFT = DEF_BUCKET_SHIFT;
282 hashp->SGSIZE = DEF_SEGSIZE;
283 hashp->SSHIFT = DEF_SEGSIZE_SHIFT;
284 hashp->DSIZE = DEF_DIRSIZE;
285 hashp->FFACTOR = DEF_FFACTOR;
286 hashp->hash = __default_hash;
287 memset(hashp->SPARES, 0, sizeof(hashp->SPARES));
288 memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS));
290 /* Fix bucket size to be optimal for file system */
292 if (stat(file, &statbuf))
294 hashp->BSIZE = statbuf.st_blksize;
295 if (hashp->BSIZE > MAX_BSIZE)
296 hashp->BSIZE = MAX_BSIZE;
297 hashp->BSHIFT = __log2(hashp->BSIZE);
302 /* Round pagesize up to power of 2 */
303 hashp->BSHIFT = __log2(info->bsize);
304 hashp->BSIZE = 1 << hashp->BSHIFT;
305 if (hashp->BSIZE > MAX_BSIZE) {
311 hashp->FFACTOR = info->ffactor;
313 hashp->hash = info->hash;
317 if (info->lorder != BIG_ENDIAN &&
318 info->lorder != LITTLE_ENDIAN) {
322 hashp->LORDER = info->lorder;
325 /* init_htab should destroy the table and set errno if it fails */
326 if (init_htab(hashp, nelem))
332 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
333 * the table and set errno, so we just pass the error information along.
335 * Returns 0 on No Error
338 init_htab(HTAB *hashp, int nelem)
340 int nbuckets, nsegs, l2;
343 * Divide number of elements by the fill factor and determine a
344 * desired number of buckets. Allocate space for the next greater
345 * power of two number of buckets.
347 nelem = (nelem - 1) / hashp->FFACTOR + 1;
349 l2 = __log2(MAX(nelem, 2));
352 hashp->SPARES[l2] = l2 + 1;
353 hashp->SPARES[l2 + 1] = l2 + 1;
354 hashp->OVFL_POINT = l2;
355 hashp->LAST_FREED = 2;
357 /* First bitmap page is at: splitpoint l2 page offset 1 */
358 if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))
361 hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1;
362 hashp->HIGH_MASK = (nbuckets << 1) - 1;
363 hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >>
366 nsegs = (nbuckets - 1) / hashp->SGSIZE + 1;
367 nsegs = 1 << __log2(nsegs);
369 if (nsegs > hashp->DSIZE)
370 hashp->DSIZE = nsegs;
371 return (alloc_segs(hashp, nsegs));
374 /********************** DESTROY/CLOSE ROUTINES ************************/
377 * Flushes any changes to the file if necessary and destroys the hashp
378 * structure, freeing all allocated space.
381 hdestroy(HTAB *hashp)
387 #ifdef HASH_STATISTICS
388 (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",
389 hash_accesses, hash_collisions);
390 (void)fprintf(stderr, "hdestroy: expansions %ld\n",
392 (void)fprintf(stderr, "hdestroy: overflows %ld\n",
394 (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n",
395 hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs);
397 for (i = 0; i < NCACHED; i++)
398 (void)fprintf(stderr,
399 "spares[%d] = %d\n", i, hashp->SPARES[i]);
402 * Call on buffer manager to free buffers, and if required,
403 * write them to disk.
405 if (__buf_free(hashp, 1, hashp->save_file))
408 free(*hashp->dir); /* Free initial segments */
409 /* Free extra segments */
410 while (hashp->exsegs--)
411 free(hashp->dir[--hashp->nsegs]);
414 if (flush_meta(hashp) && !save_errno)
417 for (i = 0; i < hashp->nmaps; i++)
419 free(hashp->mapp[i]);
421 free(hashp->tmp_key);
423 free(hashp->tmp_buf);
425 if (hashp->fp != -1) {
426 (void)_fsync(hashp->fp);
427 (void)_close(hashp->fp);
439 * Write modified pages to disk
446 hash_sync(const DB *dbp, u_int32_t flags)
458 hashp = (HTAB *)dbp->internal;
459 if (!hashp->save_file)
461 if (__buf_free(hashp, 0, 1) || flush_meta(hashp))
463 if (hashp->fp != -1 && _fsync(hashp->fp) != 0)
472 * -1 indicates that errno should be set
475 flush_meta(HTAB *hashp)
478 #if BYTE_ORDER == LITTLE_ENDIAN
483 if (!hashp->save_file)
485 hashp->MAGIC = HASHMAGIC;
486 hashp->VERSION = HASHVERSION;
487 hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY));
491 #if BYTE_ORDER == LITTLE_ENDIAN
493 swap_header_copy(&hashp->hdr, whdrp);
495 if ((wsize = pwrite(fp, whdrp, sizeof(HASHHDR), (off_t)0)) == -1)
498 if (wsize != sizeof(HASHHDR)) {
500 hashp->error = errno;
503 for (i = 0; i < NCACHED; i++)
505 if (__put_page(hashp, (char *)hashp->mapp[i],
506 hashp->BITMAPS[i], 0, 1))
511 /*******************************SEARCH ROUTINES *****************************/
513 * All the access routines return
517 * 1 to indicate an external ERROR (i.e. key not found, etc)
518 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
521 hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag)
525 hashp = (HTAB *)dbp->internal;
527 hashp->error = errno = EINVAL;
530 return (hash_access(hashp, HASH_GET, (DBT *)key, data));
534 hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag)
538 hashp = (HTAB *)dbp->internal;
539 if (flag && flag != R_NOOVERWRITE) {
540 hashp->error = errno = EINVAL;
543 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
544 hashp->error = errno = EPERM;
547 return (hash_access(hashp, flag == R_NOOVERWRITE ?
548 HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data));
552 hash_delete(const DB *dbp, const DBT *key,
553 u_int32_t flag) /* Ignored */
557 hashp = (HTAB *)dbp->internal;
558 if (flag && flag != R_CURSOR) {
559 hashp->error = errno = EINVAL;
562 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
563 hashp->error = errno = EPERM;
566 return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
570 * Assume that hashp has been set in wrapper routine.
573 hash_access(HTAB *hashp, ACTION action, DBT *key, DBT *val)
576 BUFHEAD *bufp, *save_bufp;
578 int n, ndx, off, size;
582 #ifdef HASH_STATISTICS
588 kp = (char *)key->data;
589 rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0);
594 /* Pin the bucket chain */
595 rbufp->flags |= BUF_PIN;
596 for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;)
597 if (bp[1] >= REAL_KEY) {
598 /* Real key/data pair */
599 if (size == off - *bp &&
600 memcmp(kp, rbufp->page + *bp, size) == 0)
603 #ifdef HASH_STATISTICS
608 } else if (bp[1] == OVFLPAGE) {
609 rbufp = __get_buf(hashp, *bp, rbufp, 0);
611 save_bufp->flags &= ~BUF_PIN;
615 bp = (u_int16_t *)rbufp->page;
619 } else if (bp[1] < REAL_KEY) {
621 __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0)
626 __find_last_page(hashp, &bufp))) {
631 rbufp = __get_buf(hashp, pageno, bufp, 0);
633 save_bufp->flags &= ~BUF_PIN;
637 bp = (u_int16_t *)rbufp->page;
642 save_bufp->flags &= ~BUF_PIN;
651 if (__addel(hashp, rbufp, key, val)) {
652 save_bufp->flags &= ~BUF_PIN;
655 save_bufp->flags &= ~BUF_PIN;
661 save_bufp->flags &= ~BUF_PIN;
668 save_bufp->flags &= ~BUF_PIN;
671 bp = (u_int16_t *)rbufp->page;
672 if (bp[ndx + 1] < REAL_KEY) {
673 if (__big_return(hashp, rbufp, ndx, val, 0))
676 val->data = (u_char *)rbufp->page + (int)bp[ndx + 1];
677 val->size = bp[ndx] - bp[ndx + 1];
681 if ((__delpair(hashp, rbufp, ndx)) ||
682 (__addel(hashp, rbufp, key, val))) {
683 save_bufp->flags &= ~BUF_PIN;
688 if (__delpair(hashp, rbufp, ndx))
694 save_bufp->flags &= ~BUF_PIN;
699 hash_seq(const DB *dbp, DBT *key, DBT *data, u_int32_t flag)
706 hashp = (HTAB *)dbp->internal;
707 if (flag && flag != R_FIRST && flag != R_NEXT) {
708 hashp->error = errno = EINVAL;
711 #ifdef HASH_STATISTICS
714 if ((hashp->cbucket < 0) || (flag == R_FIRST)) {
720 for (bp = NULL; !bp || !bp[0]; ) {
721 if (!(bufp = hashp->cpage)) {
722 for (bucket = hashp->cbucket;
723 bucket <= hashp->MAX_BUCKET;
724 bucket++, hashp->cndx = 1) {
725 bufp = __get_buf(hashp, bucket, NULL, 0);
729 bp = (u_int16_t *)bufp->page;
733 hashp->cbucket = bucket;
734 if ((u_int32_t)hashp->cbucket > hashp->MAX_BUCKET) {
739 bp = (u_int16_t *)hashp->cpage->page;
740 if (flag == R_NEXT || flag == 0) {
742 if (hashp->cndx > bp[0]) {
755 while (bp[hashp->cndx + 1] == OVFLPAGE) {
756 bufp = hashp->cpage =
757 __get_buf(hashp, bp[hashp->cndx], bufp, 0);
760 bp = (u_int16_t *)(bufp->page);
769 if (bp[ndx + 1] < REAL_KEY) {
770 if (__big_keydata(hashp, bufp, key, data, 1))
773 if (hashp->cpage == 0)
775 key->data = (u_char *)hashp->cpage->page + bp[ndx];
776 key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx];
777 data->data = (u_char *)hashp->cpage->page + bp[ndx + 1];
778 data->size = bp[ndx] - bp[ndx + 1];
783 /********************************* UTILITIES ************************/
791 __expand_table(HTAB *hashp)
793 u_int32_t old_bucket, new_bucket;
794 int dirsize, new_segnum, spare_ndx;
796 #ifdef HASH_STATISTICS
799 new_bucket = ++hashp->MAX_BUCKET;
800 old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK);
802 new_segnum = new_bucket >> hashp->SSHIFT;
804 /* Check if we need a new segment */
805 if (new_segnum >= hashp->nsegs) {
806 /* Check if we need to expand directory */
807 if (new_segnum >= hashp->DSIZE) {
808 /* Reallocate directory */
809 dirsize = hashp->DSIZE * sizeof(SEGMENT *);
810 if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1))
812 hashp->DSIZE = dirsize << 1;
814 if ((hashp->dir[new_segnum] =
815 (SEGMENT)calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL)
821 * If the split point is increasing (MAX_BUCKET's log base 2
822 * * increases), we need to copy the current contents of the spare
823 * split bucket to the next bucket.
825 spare_ndx = __log2(hashp->MAX_BUCKET + 1);
826 if (spare_ndx > hashp->OVFL_POINT) {
827 hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
828 hashp->OVFL_POINT = spare_ndx;
831 if (new_bucket > hashp->HIGH_MASK) {
832 /* Starting a new doubling */
833 hashp->LOW_MASK = hashp->HIGH_MASK;
834 hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
836 /* Relocate records to the new bucket */
837 return (__split_page(hashp, old_bucket, new_bucket));
841 * If realloc guarantees that the pointer is not destroyed if the realloc
842 * fails, then this routine can go away.
845 hash_realloc(SEGMENT **p_ptr, int oldsize, int newsize)
849 if ( (p = malloc(newsize)) ) {
850 memmove(p, *p_ptr, oldsize);
851 memset((char *)p + oldsize, 0, newsize - oldsize);
859 __call_hash(HTAB *hashp, char *k, int len)
861 unsigned int n, bucket;
863 n = hashp->hash(k, len);
864 bucket = n & hashp->HIGH_MASK;
865 if (bucket > hashp->MAX_BUCKET)
866 bucket = bucket & hashp->LOW_MASK;
871 * Allocate segment table. On error, destroy the table and set errno.
873 * Returns 0 on success
876 alloc_segs(HTAB *hashp, int nsegs)
884 (SEGMENT *)calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) {
886 (void)hdestroy(hashp);
890 hashp->nsegs = nsegs;
893 /* Allocate segments */
894 if ((store = (SEGMENT)calloc(nsegs << hashp->SSHIFT,
895 sizeof(SEGMENT))) == NULL) {
897 (void)hdestroy(hashp);
901 for (i = 0; i < nsegs; i++)
902 hashp->dir[i] = &store[i << hashp->SSHIFT];
906 #if BYTE_ORDER == LITTLE_ENDIAN
908 * Hashp->hdr needs to be byteswapped.
911 swap_header_copy(HASHHDR *srcp, HASHHDR *destp)
915 P_32_COPY(srcp->magic, destp->magic);
916 P_32_COPY(srcp->version, destp->version);
917 P_32_COPY(srcp->lorder, destp->lorder);
918 P_32_COPY(srcp->bsize, destp->bsize);
919 P_32_COPY(srcp->bshift, destp->bshift);
920 P_32_COPY(srcp->dsize, destp->dsize);
921 P_32_COPY(srcp->ssize, destp->ssize);
922 P_32_COPY(srcp->sshift, destp->sshift);
923 P_32_COPY(srcp->ovfl_point, destp->ovfl_point);
924 P_32_COPY(srcp->last_freed, destp->last_freed);
925 P_32_COPY(srcp->max_bucket, destp->max_bucket);
926 P_32_COPY(srcp->high_mask, destp->high_mask);
927 P_32_COPY(srcp->low_mask, destp->low_mask);
928 P_32_COPY(srcp->ffactor, destp->ffactor);
929 P_32_COPY(srcp->nkeys, destp->nkeys);
930 P_32_COPY(srcp->hdrpages, destp->hdrpages);
931 P_32_COPY(srcp->h_charkey, destp->h_charkey);
932 for (i = 0; i < NCACHED; i++) {
933 P_32_COPY(srcp->spares[i], destp->spares[i]);
934 P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);
939 swap_header(HTAB *hashp)
946 M_32_SWAP(hdrp->magic);
947 M_32_SWAP(hdrp->version);
948 M_32_SWAP(hdrp->lorder);
949 M_32_SWAP(hdrp->bsize);
950 M_32_SWAP(hdrp->bshift);
951 M_32_SWAP(hdrp->dsize);
952 M_32_SWAP(hdrp->ssize);
953 M_32_SWAP(hdrp->sshift);
954 M_32_SWAP(hdrp->ovfl_point);
955 M_32_SWAP(hdrp->last_freed);
956 M_32_SWAP(hdrp->max_bucket);
957 M_32_SWAP(hdrp->high_mask);
958 M_32_SWAP(hdrp->low_mask);
959 M_32_SWAP(hdrp->ffactor);
960 M_32_SWAP(hdrp->nkeys);
961 M_32_SWAP(hdrp->hdrpages);
962 M_32_SWAP(hdrp->h_charkey);
963 for (i = 0; i < NCACHED; i++) {
964 M_32_SWAP(hdrp->spares[i]);
965 M_16_SWAP(hdrp->bitmaps[i]);