2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012 Oleg Moskalenko <mom040267@gmail.com>
5 * Copyright (C) 2012 Gabor Kovesdan <gabor@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
37 #if defined(SORT_THREADS)
39 #include <semaphore.h>
48 #include "radixsort.h"
50 #define DEFAULT_SORT_FUNC_RADIXSORT mergesort
52 #define TINY_NODE(sl) ((sl)->tosort_num < 65)
53 #define SMALL_NODE(sl) ((sl)->tosort_num < 5)
55 /* are we sorting in reverse order ? */
56 static bool reverse_sort;
58 /* sort sub-levels array size */
59 static const size_t slsz = 256 * sizeof(struct sort_level*);
61 /* one sort level structure */
64 struct sort_level **sublevels;
65 struct sort_list_item **leaves;
66 struct sort_list_item **sorted;
67 struct sort_list_item **tosort;
72 size_t start_position;
78 /* stack of sort levels ready to be sorted */
80 struct level_stack *next;
81 struct sort_level *sl;
84 static struct level_stack *g_ls;
86 #if defined(SORT_THREADS)
87 /* stack guarding mutex */
88 static pthread_cond_t g_ls_cond;
89 static pthread_mutex_t g_ls_mutex;
91 /* counter: how many items are left */
92 static size_t sort_left;
95 /* semaphore to count threads */
99 * Decrement items counter
102 sort_left_dec(size_t n)
104 pthread_mutex_lock(&g_ls_mutex);
106 if (sort_left == 0 && nthreads > 1)
107 pthread_cond_broadcast(&g_ls_cond);
108 pthread_mutex_unlock(&g_ls_mutex);
112 * Do we have something to sort ?
114 * This routine does not need to be locked.
121 ret = (sort_left > 0);
128 #define sort_left_dec(n)
130 #endif /* SORT_THREADS */
133 _push_ls(struct level_stack *ls)
141 * Push sort level to the stack
144 push_ls(struct sort_level *sl)
146 struct level_stack *new_ls;
148 new_ls = sort_malloc(sizeof(struct level_stack));
151 #if defined(SORT_THREADS)
153 pthread_mutex_lock(&g_ls_mutex);
155 pthread_cond_signal(&g_ls_cond);
156 pthread_mutex_unlock(&g_ls_mutex);
163 * Pop sort level from the stack (single-threaded style)
165 static inline struct sort_level*
168 struct sort_level *sl;
171 struct level_stack *saved_ls;
183 #if defined(SORT_THREADS)
186 * Pop sort level from the stack (multi-threaded style)
188 static inline struct sort_level*
191 struct level_stack *saved_ls;
192 struct sort_level *sl;
194 pthread_mutex_lock(&g_ls_mutex);
206 if (have_sort_left() == 0)
208 pthread_cond_wait(&g_ls_cond, &g_ls_mutex);
211 pthread_mutex_unlock(&g_ls_mutex);
218 #endif /* defined(SORT_THREADS) */
221 add_to_sublevel(struct sort_level *sl, struct sort_list_item *item, size_t indx)
223 struct sort_level *ssl;
225 ssl = sl->sublevels[indx];
228 ssl = sort_malloc(sizeof(struct sort_level));
229 memset(ssl, 0, sizeof(struct sort_level));
231 ssl->level = sl->level + 1;
232 sl->sublevels[indx] = ssl;
237 if (++(ssl->tosort_num) > ssl->tosort_sz) {
238 ssl->tosort_sz = ssl->tosort_num + 128;
239 ssl->tosort = sort_realloc(ssl->tosort,
240 sizeof(struct sort_list_item*) * (ssl->tosort_sz));
243 ssl->tosort[ssl->tosort_num - 1] = item;
247 add_leaf(struct sort_level *sl, struct sort_list_item *item)
250 if (++(sl->leaves_num) > sl->leaves_sz) {
251 sl->leaves_sz = sl->leaves_num + 128;
252 sl->leaves = sort_realloc(sl->leaves,
253 (sizeof(struct sort_list_item*) * (sl->leaves_sz)));
255 sl->leaves[sl->leaves_num - 1] = item;
259 get_wc_index(struct sort_list_item *sli, size_t level)
261 const size_t wcfact = (MB_CUR_MAX == 1) ? 1 : sizeof(wchar_t);
262 const struct key_value *kv;
263 const struct bwstring *bws;
265 kv = get_key_from_keys_array(&sli->ka, 0);
268 if ((BWSLEN(bws) * wcfact > level)) {
272 * Sort wchar strings a byte at a time, rather than a single
273 * byte from each wchar.
275 res = (wchar_t)BWS_GET(bws, level / wcfact);
276 /* Sort most-significant byte first. */
277 if (level % wcfact < wcfact - 1)
278 res = (res >> (8 * (wcfact - 1 - (level % wcfact))));
287 place_item(struct sort_level *sl, size_t item)
289 struct sort_list_item *sli;
292 sli = sl->tosort[item];
293 c = get_wc_index(sli, sl->level);
298 add_to_sublevel(sl, sli, c);
302 free_sort_level(struct sort_level *sl)
307 sort_free(sl->leaves);
310 sort_free(sl->tosort);
313 struct sort_level *slc;
318 for (size_t i = 0; i < sln; ++i) {
319 slc = sl->sublevels[i];
321 free_sort_level(slc);
324 sort_free(sl->sublevels);
332 run_sort_level_next(struct sort_level *sl)
334 const size_t wcfact = (MB_CUR_MAX == 1) ? 1 : sizeof(wchar_t);
335 struct sort_level *slc;
336 size_t i, sln, tosort_num;
339 sort_free(sl->sublevels);
340 sl->sublevels = NULL;
343 switch (sl->tosort_num) {
347 sl->sorted[sl->start_position] = sl->tosort[0];
352 * Radixsort only processes a single byte at a time. In wchar
353 * mode, this can be a subset of the length of a character.
354 * list_coll_offset() offset is in units of wchar, not bytes.
355 * So to calculate the offset, we must divide by
356 * sizeof(wchar_t) and round down to the index of the first
357 * character this level references.
359 if (list_coll_offset(&(sl->tosort[0]), &(sl->tosort[1]),
360 sl->level / wcfact) > 0) {
361 sl->sorted[sl->start_position++] = sl->tosort[1];
362 sl->sorted[sl->start_position] = sl->tosort[0];
364 sl->sorted[sl->start_position++] = sl->tosort[0];
365 sl->sorted[sl->start_position] = sl->tosort[1];
371 if (TINY_NODE(sl) || (sl->level > 15)) {
375 * Collate comparison offset is in units of
376 * character-width, so we must divide the level (bytes)
377 * by operating character width (wchar_t or char). See
378 * longer comment above.
380 func = get_list_call_func(sl->level / wcfact);
382 sl->leaves = sl->tosort;
383 sl->leaves_num = sl->tosort_num;
384 sl->leaves_sz = sl->leaves_num;
385 sl->leaves = sort_realloc(sl->leaves,
386 (sizeof(struct sort_list_item *) *
393 if (sort_opts_vals.sflag) {
394 if (mergesort(sl->leaves, sl->leaves_num,
395 sizeof(struct sort_list_item *),
396 (int(*)(const void *, const void *)) func) == -1)
398 err(2, "Radix sort error 3");
400 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
401 sizeof(struct sort_list_item *),
402 (int(*)(const void *, const void *)) func);
404 memcpy(sl->sorted + sl->start_position,
405 sl->leaves, sl->leaves_num *
406 sizeof(struct sort_list_item*));
408 sort_left_dec(sl->leaves_num);
412 sl->tosort_sz = sl->tosort_num;
413 sl->tosort = sort_realloc(sl->tosort,
414 sizeof(struct sort_list_item*) * (sl->tosort_sz));
419 sl->sublevels = sort_malloc(slsz);
420 memset(sl->sublevels, 0, slsz);
424 tosort_num = sl->tosort_num;
425 for (i = 0; i < tosort_num; ++i)
428 sort_free(sl->tosort);
433 if (sl->leaves_num > 1) {
435 if (sort_opts_vals.sflag) {
436 mergesort(sl->leaves, sl->leaves_num,
437 sizeof(struct sort_list_item *),
438 (int(*)(const void *, const void *)) list_coll);
440 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
441 sizeof(struct sort_list_item *),
442 (int(*)(const void *, const void *)) list_coll);
444 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
445 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
446 sizeof(struct sort_list_item *),
447 (int(*)(const void *, const void *)) list_coll_by_str_only);
451 sl->leaves_sz = sl->leaves_num;
452 sl->leaves = sort_realloc(sl->leaves, (sizeof(struct sort_list_item *) *
456 memcpy(sl->sorted + sl->start_position, sl->leaves,
457 sl->leaves_num * sizeof(struct sort_list_item*));
458 sl->start_position += sl->leaves_num;
459 sort_left_dec(sl->leaves_num);
461 sort_free(sl->leaves);
468 for (i = 0; i < sln; ++i) {
469 slc = sl->sublevels[i];
472 slc->sorted = sl->sorted;
473 slc->start_position = sl->start_position;
474 sl->start_position += slc->tosort_num;
476 run_sort_level_next(slc);
479 sl->sublevels[i] = NULL;
488 for (i = 0; i < sln; ++i) {
490 slc = sl->sublevels[n];
493 slc->sorted = sl->sorted;
494 slc->start_position = sl->start_position;
495 sl->start_position += slc->tosort_num;
497 run_sort_level_next(slc);
500 sl->sublevels[n] = NULL;
504 memcpy(sl->sorted + sl->start_position, sl->leaves,
505 sl->leaves_num * sizeof(struct sort_list_item*));
506 sort_left_dec(sl->leaves_num);
514 * Single-threaded sort cycle
517 run_sort_cycle_st(void)
519 struct sort_level *slc;
526 run_sort_level_next(slc);
530 #if defined(SORT_THREADS)
533 * Multi-threaded sort cycle
536 run_sort_cycle_mt(void)
538 struct sort_level *slc;
544 run_sort_level_next(slc);
549 * Sort cycle thread (in multi-threaded mode)
552 sort_thread(void* arg)
560 #endif /* defined(SORT_THREADS) */
563 run_top_sort_level(struct sort_level *sl)
565 struct sort_level *slc;
567 reverse_sort = sort_opts_vals.kflag ? keys[0].sm.rflag :
568 default_sort_mods->rflag;
570 sl->start_position = 0;
572 sl->sublevels = sort_malloc(slsz);
573 memset(sl->sublevels, 0, slsz);
575 for (size_t i = 0; i < sl->tosort_num; ++i)
578 if (sl->leaves_num > 1) {
580 if (sort_opts_vals.sflag) {
581 mergesort(sl->leaves, sl->leaves_num,
582 sizeof(struct sort_list_item *),
583 (int(*)(const void *, const void *)) list_coll);
585 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
586 sizeof(struct sort_list_item *),
587 (int(*)(const void *, const void *)) list_coll);
589 } else if (!sort_opts_vals.sflag && sort_opts_vals.complex_sort) {
590 DEFAULT_SORT_FUNC_RADIXSORT(sl->leaves, sl->leaves_num,
591 sizeof(struct sort_list_item *),
592 (int(*)(const void *, const void *)) list_coll_by_str_only);
597 memcpy(sl->tosort + sl->start_position, sl->leaves,
598 sl->leaves_num * sizeof(struct sort_list_item*));
599 sl->start_position += sl->leaves_num;
600 sort_left_dec(sl->leaves_num);
602 for (size_t i = 0; i < sl->sln; ++i) {
603 slc = sl->sublevels[i];
606 slc->sorted = sl->tosort;
607 slc->start_position = sl->start_position;
608 sl->start_position += slc->tosort_num;
610 sl->sublevels[i] = NULL;
617 for (size_t i = 0; i < sl->sln; ++i) {
620 slc = sl->sublevels[n];
623 slc->sorted = sl->tosort;
624 slc->start_position = sl->start_position;
625 sl->start_position += slc->tosort_num;
627 sl->sublevels[n] = NULL;
631 memcpy(sl->tosort + sl->start_position, sl->leaves,
632 sl->leaves_num * sizeof(struct sort_list_item*));
634 sort_left_dec(sl->leaves_num);
637 #if defined(SORT_THREADS)
641 #if defined(SORT_THREADS)
645 for(i = 0; i < nthreads; ++i) {
649 pthread_attr_init(&attr);
650 pthread_attr_setdetachstate(&attr, PTHREAD_DETACHED);
653 int res = pthread_create(&pth, &attr,
657 if (errno == EAGAIN) {
664 pthread_attr_destroy(&attr);
667 for (i = 0; i < nthreads; ++i)
670 #endif /* defined(SORT_THREADS) */
674 run_sort(struct sort_list_item **base, size_t nmemb)
676 struct sort_level *sl;
678 #if defined(SORT_THREADS)
679 size_t nthreads_save = nthreads;
680 if (nmemb < MT_SORT_THRESHOLD)
684 pthread_mutexattr_t mattr;
686 pthread_mutexattr_init(&mattr);
687 pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_ADAPTIVE_NP);
689 pthread_mutex_init(&g_ls_mutex, &mattr);
690 pthread_cond_init(&g_ls_cond, NULL);
692 pthread_mutexattr_destroy(&mattr);
694 sem_init(&mtsem, 0, 0);
699 sl = sort_malloc(sizeof(struct sort_level));
700 memset(sl, 0, sizeof(struct sort_level));
703 sl->tosort_num = nmemb;
704 sl->tosort_sz = nmemb;
706 #if defined(SORT_THREADS)
710 run_top_sort_level(sl);
714 #if defined(SORT_THREADS)
717 pthread_mutex_destroy(&g_ls_mutex);
719 nthreads = nthreads_save;
724 rxsort(struct sort_list_item **base, size_t nmemb)
727 run_sort(base, nmemb);