2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if defined(LIBC_SCCS) && !defined(lint)
36 static char sccsid[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
37 #endif /* LIBC_SCCS and not lint */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 * Hybrid exponential search/linear search merge sort with hybrid
43 * natural/pairwise first pass. Requires about .3% more comparisons
44 * for random data than LSMS with pairwise first pass alone.
45 * It works for objects as small as two bytes.
49 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
51 /* #define NATURAL to get hybrid natural merge.
52 * (The default is pairwise merging.)
55 #include <sys/types.h>
61 #ifdef I_AM_MERGESORT_B
62 #include "block_abi.h"
63 #define DECLARE_CMP DECLARE_BLOCK(int, cmp, const void *, const void *)
64 typedef DECLARE_BLOCK(int, cmp_t, const void *, const void *);
65 #define CMP(x, y) CALL_BLOCK(cmp, x, y)
67 typedef int (*cmp_t)(const void *, const void *);
68 #define CMP(x, y) cmp(x, y)
71 static void setup(u_char *, u_char *, size_t, size_t, cmp_t);
72 static void insertionsort(u_char *, size_t, size_t, cmp_t);
74 #define ISIZE sizeof(int)
75 #define PSIZE sizeof(u_char *)
76 #define ICOPY_LIST(src, dst, last) \
78 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
80 #define ICOPY_ELT(src, dst, i) \
82 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
85 #define CCOPY_LIST(src, dst, last) \
89 #define CCOPY_ELT(src, dst, i) \
95 * Find the next possible pointer head. (Trickery for forcing an array
96 * to do double duty as a linked list when objects do not align with word
99 /* Assumption: PSIZE is a power of 2. */
100 #define EVAL(p) (u_char **) \
102 (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
104 #ifdef I_AM_MERGESORT_B
105 int mergesort_b(void *, size_t, size_t, cmp_t);
107 int mergesort(void *, size_t, size_t, cmp_t);
111 * Arguments are as for qsort.
114 #ifdef I_AM_MERGESORT_B
115 mergesort_b(void *base, size_t nmemb, size_t size, cmp_t cmp)
117 mergesort(void *base, size_t nmemb, size_t size, cmp_t cmp)
123 u_char *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
124 u_char *list2, *list1, *p2, *p, *last, **p1;
126 if (size < PSIZE / 2) { /* Pointers must fit into 2 * size. */
136 * Stupid subtraction for the Cray.
139 if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
142 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
146 setup(list1, list2, nmemb, size, cmp);
147 last = list2 + nmemb * size;
149 while (*EVAL(list2) != last) {
152 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
155 f2 = l1 = list1 + (p2 - list2);
158 l2 = list1 + (p2 - list2);
159 while (f1 < l1 && f2 < l2) {
160 if (CMP(f1, f2) <= 0) {
169 if (!big) { /* here i = 0 */
170 while ((b += size) < t && CMP(q, b) >sense)
176 EXPONENTIAL: for (i = size; ; i <<= 1)
177 if ((p = (b + i)) >= t) {
178 if ((p = t - size) > b &&
184 } else if (CMP(q, p) <= sense) {
192 i = (((t - b) / size) >> 1) * size;
193 if (CMP(q, p = b + i) <= sense)
199 FASTCASE: while (i > size)
201 p = b + (i >>= 1)) <= sense)
210 ICOPY_LIST(f2, tp2, b);
211 ICOPY_ELT(f1, tp2, i);
213 CCOPY_LIST(f2, tp2, b);
214 CCOPY_ELT(f1, tp2, i);
218 ICOPY_LIST(f1, tp2, b);
219 ICOPY_ELT(f2, tp2, i);
221 CCOPY_LIST(f1, tp2, b);
222 CCOPY_ELT(f2, tp2, i);
228 ICOPY_LIST(f2, tp2, l2);
230 CCOPY_LIST(f2, tp2, l2);
231 } else if (f1 < l1) {
233 ICOPY_LIST(f1, tp2, l1);
235 CCOPY_LIST(f1, tp2, l1);
239 tp2 = list1; /* swap list1, list2 */
242 last = list2 + nmemb*size;
245 memmove(list2, list1, nmemb*size);
252 #define swap(a, b) { \
256 tmp = *a; *a++ = *s; *s++ = tmp; \
260 #define reverse(bot, top) { \
265 tmp = *bot; *bot++ = *s; *s++ = tmp; \
272 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
273 * increasing order, list2 in a corresponding linked list. Checks for runs
274 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
275 * is defined. Otherwise simple pairwise merging is used.)
278 setup(u_char *list1, u_char *list2, size_t n, size_t size, cmp_t cmp)
280 int i, length, size2, tmp, sense;
281 u_char *f1, *f2, *s, *l2, *last, *p2;
285 insertionsort(list1, n, size, cmp);
286 *EVAL(list2) = (u_char*) list2 + n*size;
290 * Avoid running pointers out of bounds; limit n to evens
294 insertionsort(list1 + (n - i) * size, i, size, cmp);
295 last = list1 + size * (n - i);
296 *EVAL(list2 + (last - list1)) = list2 + n * size;
301 sense = (CMP(f1, f1 + size) > 0);
302 for (; f1 < last; sense = !sense) {
304 /* Find pairs with same sense. */
305 for (f2 = f1 + size2; f2 < last; f2 += size2) {
306 if ((CMP(f2, f2+ size) > 0) != sense)
310 if (length < THRESHOLD) { /* Pairwise merge */
312 p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
314 swap (f1, f1 + size);
315 } while ((f1 += size2) < f2);
316 } else { /* Natural merge */
318 for (f2 = f1 + size2; f2 < l2; f2 += size2) {
319 if ((CMP(f2-size, f2) > 0) != sense) {
320 p2 = *EVAL(p2) = f2 - list1 + list2;
322 reverse(f1, f2-size);
327 reverse (f1, f2-size);
329 if (f2 < last || CMP(f2 - size, f2) > 0)
330 p2 = *EVAL(p2) = f2 - list1 + list2;
332 p2 = *EVAL(p2) = list2 + n*size;
335 #else /* pairwise merge only. */
336 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
337 p2 = *EVAL(p2) = p2 + size2;
338 if (CMP (f1, f1 + size) > 0)
345 * This is to avoid out-of-bounds addresses in sorting the
349 insertionsort(u_char *a, size_t n, size_t size, cmp_t cmp)
351 u_char *ai, *s, *t, *u, tmp;
354 for (ai = a+size; --n >= 1; ai += size)
355 for (t = ai; t > a; t -= size) {