2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1990, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if defined(LIBC_SCCS) && !defined(lint)
36 static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
37 #endif /* LIBC_SCCS and not lint */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include "namespace.h"
42 #include <sys/param.h>
54 #include "un-namespace.h"
61 static int alloc_segs(HTAB *, int);
62 static int flush_meta(HTAB *);
63 static int hash_access(HTAB *, ACTION, DBT *, DBT *);
64 static int hash_close(DB *);
65 static int hash_delete(const DB *, const DBT *, u_int32_t);
66 static int hash_fd(const DB *);
67 static int hash_get(const DB *, const DBT *, DBT *, u_int32_t);
68 static int hash_put(const DB *, DBT *, const DBT *, u_int32_t);
69 static void *hash_realloc(SEGMENT **, int, int);
70 static int hash_seq(const DB *, DBT *, DBT *, u_int32_t);
71 static int hash_sync(const DB *, u_int32_t);
72 static int hdestroy(HTAB *);
73 static HTAB *init_hash(HTAB *, const char *, const HASHINFO *);
74 static int init_htab(HTAB *, int);
75 #if BYTE_ORDER == LITTLE_ENDIAN
76 static void swap_header(HTAB *);
77 static void swap_header_copy(HASHHDR *, HASHHDR *);
80 /* Fast arithmetic, relying on powers of 2, */
81 #define MOD(x, y) ((x) & ((y) - 1))
83 #define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
90 #ifdef HASH_STATISTICS
91 int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
94 /************************** INTERFACE ROUTINES ***************************/
99 __hash_open(const char *file, int flags, int mode,
100 const HASHINFO *info, /* Special directives for create */
106 int bpages, hdrsize, new_table, nsegs, save_errno;
108 if ((flags & O_ACCMODE) == O_WRONLY) {
113 if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
118 * Even if user wants write only, we need to be able to read
119 * the actual file, so we need to open it read/write. But, the
120 * field in the hashp structure needs to be accurate so that
121 * we can check accesses.
123 hashp->flags = flags;
126 if ((hashp->fp = _open(file, flags | O_CLOEXEC, mode)) == -1)
127 RETURN_ERROR(errno, error0);
128 new_table = _fstat(hashp->fp, &statbuf) == 0 &&
129 statbuf.st_size == 0 && (flags & O_ACCMODE) != O_RDONLY;
134 if (!(hashp = init_hash(hashp, file, info)))
135 RETURN_ERROR(errno, error1);
137 /* Table already exists */
138 if (info && info->hash)
139 hashp->hash = info->hash;
141 hashp->hash = __default_hash;
143 hdrsize = _read(hashp->fp, &hashp->hdr, sizeof(HASHHDR));
144 #if BYTE_ORDER == LITTLE_ENDIAN
148 RETURN_ERROR(errno, error1);
149 if (hdrsize != sizeof(HASHHDR))
150 RETURN_ERROR(EFTYPE, error1);
151 /* Verify file type, versions and hash function */
152 if (hashp->MAGIC != HASHMAGIC)
153 RETURN_ERROR(EFTYPE, error1);
154 #define OLDHASHVERSION 1
155 if (hashp->VERSION != HASHVERSION &&
156 hashp->VERSION != OLDHASHVERSION)
157 RETURN_ERROR(EFTYPE, error1);
158 if ((int32_t)hashp->hash(CHARKEY, sizeof(CHARKEY)) != hashp->H_CHARKEY)
159 RETURN_ERROR(EFTYPE, error1);
161 * Figure out how many segments we need. Max_Bucket is the
162 * maximum bucket number, so the number of buckets is
165 nsegs = howmany(hashp->MAX_BUCKET + 1, hashp->SGSIZE);
166 if (alloc_segs(hashp, nsegs))
168 * If alloc_segs fails, table will have been destroyed
169 * and errno will have been set.
172 /* Read in bitmaps */
173 bpages = (hashp->SPARES[hashp->OVFL_POINT] +
174 (hashp->BSIZE << BYTE_SHIFT) - 1) >>
175 (hashp->BSHIFT + BYTE_SHIFT);
177 hashp->nmaps = bpages;
178 (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));
181 /* Initialize Buffer Manager */
182 if (info && info->cachesize)
183 __buf_init(hashp, info->cachesize);
185 __buf_init(hashp, DEF_BUFSIZE);
187 hashp->new_file = new_table;
188 hashp->save_file = file && (hashp->flags & O_RDWR);
190 if (!(dbp = (DB *)malloc(sizeof(DB)))) {
196 dbp->internal = hashp;
197 dbp->close = hash_close;
198 dbp->del = hash_delete;
203 dbp->sync = hash_sync;
207 (void)fprintf(stderr,
208 "%s\n%s%p\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
210 "TABLE POINTER ", hashp,
211 "BUCKET SIZE ", hashp->BSIZE,
212 "BUCKET SHIFT ", hashp->BSHIFT,
213 "DIRECTORY SIZE ", hashp->DSIZE,
214 "SEGMENT SIZE ", hashp->SGSIZE,
215 "SEGMENT SHIFT ", hashp->SSHIFT,
216 "FILL FACTOR ", hashp->FFACTOR,
217 "MAX BUCKET ", hashp->MAX_BUCKET,
218 "OVFL POINT ", hashp->OVFL_POINT,
219 "LAST FREED ", hashp->LAST_FREED,
220 "HIGH MASK ", hashp->HIGH_MASK,
221 "LOW MASK ", hashp->LOW_MASK,
222 "NSEGS ", hashp->nsegs,
223 "NKEYS ", hashp->NKEYS);
225 #ifdef HASH_STATISTICS
226 hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;
232 (void)_close(hashp->fp);
249 hashp = (HTAB *)dbp->internal;
250 retval = hdestroy(hashp);
256 hash_fd(const DB *dbp)
263 hashp = (HTAB *)dbp->internal;
264 if (hashp->fp == -1) {
271 /************************** LOCAL CREATION ROUTINES **********************/
273 init_hash(HTAB *hashp, const char *file, const HASHINFO *info)
280 hashp->LORDER = BYTE_ORDER;
281 hashp->BSIZE = DEF_BUCKET_SIZE;
282 hashp->BSHIFT = DEF_BUCKET_SHIFT;
283 hashp->SGSIZE = DEF_SEGSIZE;
284 hashp->SSHIFT = DEF_SEGSIZE_SHIFT;
285 hashp->DSIZE = DEF_DIRSIZE;
286 hashp->FFACTOR = DEF_FFACTOR;
287 hashp->hash = __default_hash;
288 memset(hashp->SPARES, 0, sizeof(hashp->SPARES));
289 memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS));
291 /* Fix bucket size to be optimal for file system */
293 if (stat(file, &statbuf))
295 hashp->BSIZE = statbuf.st_blksize;
296 if (hashp->BSIZE > MAX_BSIZE)
297 hashp->BSIZE = MAX_BSIZE;
298 hashp->BSHIFT = __log2(hashp->BSIZE);
303 /* Round pagesize up to power of 2 */
304 hashp->BSHIFT = __log2(info->bsize);
305 hashp->BSIZE = 1 << hashp->BSHIFT;
306 if (hashp->BSIZE > MAX_BSIZE) {
312 hashp->FFACTOR = info->ffactor;
314 hashp->hash = info->hash;
318 if (info->lorder != BIG_ENDIAN &&
319 info->lorder != LITTLE_ENDIAN) {
323 hashp->LORDER = info->lorder;
326 /* init_htab should destroy the table and set errno if it fails */
327 if (init_htab(hashp, nelem))
333 * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
334 * the table and set errno, so we just pass the error information along.
336 * Returns 0 on No Error
339 init_htab(HTAB *hashp, int nelem)
341 int nbuckets, nsegs, l2;
344 * Divide number of elements by the fill factor and determine a
345 * desired number of buckets. Allocate space for the next greater
346 * power of two number of buckets.
348 nelem = (nelem - 1) / hashp->FFACTOR + 1;
350 l2 = __log2(MAX(nelem, 2));
353 hashp->SPARES[l2] = l2 + 1;
354 hashp->SPARES[l2 + 1] = l2 + 1;
355 hashp->OVFL_POINT = l2;
356 hashp->LAST_FREED = 2;
358 /* First bitmap page is at: splitpoint l2 page offset 1 */
359 if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))
362 hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1;
363 hashp->HIGH_MASK = (nbuckets << 1) - 1;
364 hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >>
367 nsegs = (nbuckets - 1) / hashp->SGSIZE + 1;
368 nsegs = 1 << __log2(nsegs);
370 if (nsegs > hashp->DSIZE)
371 hashp->DSIZE = nsegs;
372 return (alloc_segs(hashp, nsegs));
375 /********************** DESTROY/CLOSE ROUTINES ************************/
378 * Flushes any changes to the file if necessary and destroys the hashp
379 * structure, freeing all allocated space.
382 hdestroy(HTAB *hashp)
388 #ifdef HASH_STATISTICS
389 (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",
390 hash_accesses, hash_collisions);
391 (void)fprintf(stderr, "hdestroy: expansions %ld\n",
393 (void)fprintf(stderr, "hdestroy: overflows %ld\n",
395 (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n",
396 hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs);
398 for (i = 0; i < NCACHED; i++)
399 (void)fprintf(stderr,
400 "spares[%d] = %d\n", i, hashp->SPARES[i]);
403 * Call on buffer manager to free buffers, and if required,
404 * write them to disk.
406 if (__buf_free(hashp, 1, hashp->save_file))
409 free(*hashp->dir); /* Free initial segments */
410 /* Free extra segments */
411 while (hashp->exsegs--)
412 free(hashp->dir[--hashp->nsegs]);
415 if (flush_meta(hashp) && !save_errno)
418 for (i = 0; i < hashp->nmaps; i++)
420 free(hashp->mapp[i]);
422 free(hashp->tmp_key);
424 free(hashp->tmp_buf);
426 if (hashp->fp != -1) {
427 if (hashp->save_file)
428 (void)_fsync(hashp->fp);
429 (void)_close(hashp->fp);
441 * Write modified pages to disk
448 hash_sync(const DB *dbp, u_int32_t flags)
460 hashp = (HTAB *)dbp->internal;
461 if (!hashp->save_file)
463 if (__buf_free(hashp, 0, 1) || flush_meta(hashp))
465 if (hashp->fp != -1 && _fsync(hashp->fp) != 0)
474 * -1 indicates that errno should be set
477 flush_meta(HTAB *hashp)
480 #if BYTE_ORDER == LITTLE_ENDIAN
485 if (!hashp->save_file)
487 hashp->MAGIC = HASHMAGIC;
488 hashp->VERSION = HASHVERSION;
489 hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY));
493 #if BYTE_ORDER == LITTLE_ENDIAN
495 swap_header_copy(&hashp->hdr, whdrp);
497 if ((wsize = pwrite(fp, whdrp, sizeof(HASHHDR), (off_t)0)) == -1)
500 if (wsize != sizeof(HASHHDR)) {
502 hashp->error = errno;
505 for (i = 0; i < NCACHED; i++)
507 if (__put_page(hashp, (char *)hashp->mapp[i],
508 hashp->BITMAPS[i], 0, 1))
513 /*******************************SEARCH ROUTINES *****************************/
515 * All the access routines return
519 * 1 to indicate an external ERROR (i.e. key not found, etc)
520 * -1 to indicate an internal ERROR (i.e. out of memory, etc)
523 hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag)
527 hashp = (HTAB *)dbp->internal;
529 hashp->error = errno = EINVAL;
532 return (hash_access(hashp, HASH_GET, (DBT *)key, data));
536 hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag)
540 hashp = (HTAB *)dbp->internal;
541 if (flag && flag != R_NOOVERWRITE) {
542 hashp->error = errno = EINVAL;
545 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
546 hashp->error = errno = EPERM;
549 return (hash_access(hashp, flag == R_NOOVERWRITE ?
550 HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data));
554 hash_delete(const DB *dbp, const DBT *key,
555 u_int32_t flag) /* Ignored */
559 hashp = (HTAB *)dbp->internal;
560 if (flag && flag != R_CURSOR) {
561 hashp->error = errno = EINVAL;
564 if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
565 hashp->error = errno = EPERM;
568 return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
572 * Assume that hashp has been set in wrapper routine.
575 hash_access(HTAB *hashp, ACTION action, DBT *key, DBT *val)
578 BUFHEAD *bufp, *save_bufp;
580 int n, ndx, off, size;
584 #ifdef HASH_STATISTICS
590 kp = (char *)key->data;
591 rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0);
596 /* Pin the bucket chain */
597 rbufp->flags |= BUF_PIN;
598 for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;)
599 if (bp[1] >= REAL_KEY) {
600 /* Real key/data pair */
601 if (size == off - *bp &&
602 memcmp(kp, rbufp->page + *bp, size) == 0)
605 #ifdef HASH_STATISTICS
610 } else if (bp[1] == OVFLPAGE) {
611 rbufp = __get_buf(hashp, *bp, rbufp, 0);
613 save_bufp->flags &= ~BUF_PIN;
617 bp = (u_int16_t *)rbufp->page;
621 } else if (bp[1] < REAL_KEY) {
623 __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0)
628 __find_last_page(hashp, &bufp))) {
633 rbufp = __get_buf(hashp, pageno, bufp, 0);
635 save_bufp->flags &= ~BUF_PIN;
639 bp = (u_int16_t *)rbufp->page;
644 save_bufp->flags &= ~BUF_PIN;
653 if (__addel(hashp, rbufp, key, val)) {
654 save_bufp->flags &= ~BUF_PIN;
657 save_bufp->flags &= ~BUF_PIN;
663 save_bufp->flags &= ~BUF_PIN;
670 save_bufp->flags &= ~BUF_PIN;
673 bp = (u_int16_t *)rbufp->page;
674 if (bp[ndx + 1] < REAL_KEY) {
675 if (__big_return(hashp, rbufp, ndx, val, 0))
678 val->data = (u_char *)rbufp->page + (int)bp[ndx + 1];
679 val->size = bp[ndx] - bp[ndx + 1];
683 if ((__delpair(hashp, rbufp, ndx)) ||
684 (__addel(hashp, rbufp, key, val))) {
685 save_bufp->flags &= ~BUF_PIN;
690 if (__delpair(hashp, rbufp, ndx))
696 save_bufp->flags &= ~BUF_PIN;
701 hash_seq(const DB *dbp, DBT *key, DBT *data, u_int32_t flag)
708 hashp = (HTAB *)dbp->internal;
709 if (flag && flag != R_FIRST && flag != R_NEXT) {
710 hashp->error = errno = EINVAL;
713 #ifdef HASH_STATISTICS
716 if ((hashp->cbucket < 0) || (flag == R_FIRST)) {
722 for (bp = NULL; !bp || !bp[0]; ) {
723 if (!(bufp = hashp->cpage)) {
724 for (bucket = hashp->cbucket;
725 bucket <= hashp->MAX_BUCKET;
726 bucket++, hashp->cndx = 1) {
727 bufp = __get_buf(hashp, bucket, NULL, 0);
731 bp = (u_int16_t *)bufp->page;
735 hashp->cbucket = bucket;
736 if ((u_int32_t)hashp->cbucket > hashp->MAX_BUCKET) {
741 bp = (u_int16_t *)hashp->cpage->page;
742 if (flag == R_NEXT || flag == 0) {
744 if (hashp->cndx > bp[0]) {
757 while (bp[hashp->cndx + 1] == OVFLPAGE) {
758 bufp = hashp->cpage =
759 __get_buf(hashp, bp[hashp->cndx], bufp, 0);
762 bp = (u_int16_t *)(bufp->page);
771 if (bp[ndx + 1] < REAL_KEY) {
772 if (__big_keydata(hashp, bufp, key, data, 1))
775 if (hashp->cpage == NULL)
777 key->data = (u_char *)hashp->cpage->page + bp[ndx];
778 key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx];
779 data->data = (u_char *)hashp->cpage->page + bp[ndx + 1];
780 data->size = bp[ndx] - bp[ndx + 1];
785 /********************************* UTILITIES ************************/
793 __expand_table(HTAB *hashp)
795 u_int32_t old_bucket, new_bucket;
796 int dirsize, new_segnum, spare_ndx;
798 #ifdef HASH_STATISTICS
801 new_bucket = ++hashp->MAX_BUCKET;
802 old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK);
804 new_segnum = new_bucket >> hashp->SSHIFT;
806 /* Check if we need a new segment */
807 if (new_segnum >= hashp->nsegs) {
808 /* Check if we need to expand directory */
809 if (new_segnum >= hashp->DSIZE) {
810 /* Reallocate directory */
811 dirsize = hashp->DSIZE * sizeof(SEGMENT *);
812 if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1))
814 hashp->DSIZE = dirsize << 1;
816 if ((hashp->dir[new_segnum] =
817 calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL)
823 * If the split point is increasing (MAX_BUCKET's log base 2
824 * * increases), we need to copy the current contents of the spare
825 * split bucket to the next bucket.
827 spare_ndx = __log2(hashp->MAX_BUCKET + 1);
828 if (spare_ndx > hashp->OVFL_POINT) {
829 hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
830 hashp->OVFL_POINT = spare_ndx;
833 if (new_bucket > hashp->HIGH_MASK) {
834 /* Starting a new doubling */
835 hashp->LOW_MASK = hashp->HIGH_MASK;
836 hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
838 /* Relocate records to the new bucket */
839 return (__split_page(hashp, old_bucket, new_bucket));
843 * If realloc guarantees that the pointer is not destroyed if the realloc
844 * fails, then this routine can go away.
847 hash_realloc(SEGMENT **p_ptr, int oldsize, int newsize)
851 if ( (p = malloc(newsize)) ) {
852 memmove(p, *p_ptr, oldsize);
853 memset((char *)p + oldsize, 0, newsize - oldsize);
861 __call_hash(HTAB *hashp, char *k, int len)
863 unsigned int n, bucket;
865 n = hashp->hash(k, len);
866 bucket = n & hashp->HIGH_MASK;
867 if (bucket > hashp->MAX_BUCKET)
868 bucket = bucket & hashp->LOW_MASK;
873 * Allocate segment table. On error, destroy the table and set errno.
875 * Returns 0 on success
878 alloc_segs(HTAB *hashp, int nsegs)
886 calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) {
888 (void)hdestroy(hashp);
892 hashp->nsegs = nsegs;
895 /* Allocate segments */
896 if ((store = calloc(nsegs << hashp->SSHIFT, sizeof(SEGMENT))) == NULL) {
898 (void)hdestroy(hashp);
902 for (i = 0; i < nsegs; i++)
903 hashp->dir[i] = &store[i << hashp->SSHIFT];
907 #if BYTE_ORDER == LITTLE_ENDIAN
909 * Hashp->hdr needs to be byteswapped.
912 swap_header_copy(HASHHDR *srcp, HASHHDR *destp)
916 P_32_COPY(srcp->magic, destp->magic);
917 P_32_COPY(srcp->version, destp->version);
918 P_32_COPY(srcp->lorder, destp->lorder);
919 P_32_COPY(srcp->bsize, destp->bsize);
920 P_32_COPY(srcp->bshift, destp->bshift);
921 P_32_COPY(srcp->dsize, destp->dsize);
922 P_32_COPY(srcp->ssize, destp->ssize);
923 P_32_COPY(srcp->sshift, destp->sshift);
924 P_32_COPY(srcp->ovfl_point, destp->ovfl_point);
925 P_32_COPY(srcp->last_freed, destp->last_freed);
926 P_32_COPY(srcp->max_bucket, destp->max_bucket);
927 P_32_COPY(srcp->high_mask, destp->high_mask);
928 P_32_COPY(srcp->low_mask, destp->low_mask);
929 P_32_COPY(srcp->ffactor, destp->ffactor);
930 P_32_COPY(srcp->nkeys, destp->nkeys);
931 P_32_COPY(srcp->hdrpages, destp->hdrpages);
932 P_32_COPY(srcp->h_charkey, destp->h_charkey);
933 for (i = 0; i < NCACHED; i++) {
934 P_32_COPY(srcp->spares[i], destp->spares[i]);
935 P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);
940 swap_header(HTAB *hashp)
947 M_32_SWAP(hdrp->magic);
948 M_32_SWAP(hdrp->version);
949 M_32_SWAP(hdrp->lorder);
950 M_32_SWAP(hdrp->bsize);
951 M_32_SWAP(hdrp->bshift);
952 M_32_SWAP(hdrp->dsize);
953 M_32_SWAP(hdrp->ssize);
954 M_32_SWAP(hdrp->sshift);
955 M_32_SWAP(hdrp->ovfl_point);
956 M_32_SWAP(hdrp->last_freed);
957 M_32_SWAP(hdrp->max_bucket);
958 M_32_SWAP(hdrp->high_mask);
959 M_32_SWAP(hdrp->low_mask);
960 M_32_SWAP(hdrp->ffactor);
961 M_32_SWAP(hdrp->nkeys);
962 M_32_SWAP(hdrp->hdrpages);
963 M_32_SWAP(hdrp->h_charkey);
964 for (i = 0; i < NCACHED; i++) {
965 M_32_SWAP(hdrp->spares[i]);
966 M_16_SWAP(hdrp->bitmaps[i]);