0
+ − 1 /* Plug-compatible replacement for UNIX qsort.
+ − 2 Copyright (C) 1989 Free Software Foundation, Inc.
+ − 3 Written by Douglas C. Schmidt (schmidt@ics.uci.edu)
+ − 4
+ − 5 This file is part of GNU CC.
+ − 6
+ − 7 GNU QSORT is free software; you can redistribute it and/or modify
+ − 8 it under the terms of the GNU General Public License as published by
+ − 9 the Free Software Foundation; either version 2, or (at your option)
+ − 10 any later version.
+ − 11
+ − 12 GNU QSORT is distributed in the hope that it will be useful,
+ − 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
+ − 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ − 15 GNU General Public License for more details.
+ − 16
+ − 17 You should have received a copy of the GNU General Public License
+ − 18 along with GNU QSORT; see the file COPYING. If not, write to
+ − 19 the Free the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ − 20 Boston, MA 02111-1307, USA. */
+ − 21
+ − 22 /* Synched up with: FSF 19.28. */
+ − 23
+ − 24 #ifdef sparc
+ − 25 #include <alloca.h>
+ − 26 #endif
+ − 27
+ − 28 /* Invoke the comparison function, returns either 0, < 0, or > 0. */
+ − 29 #define CMP(A,B) ((*cmp)((A),(B)))
+ − 30
+ − 31 /* Byte-wise swap two items of size SIZE. */
+ − 32 #define SWAP(A,B,SIZE) do {int sz = (SIZE); char *a = (A); char *b = (B); \
+ − 33 do { char _temp = *a;*a++ = *b;*b++ = _temp;} while (--sz);} while (0)
+ − 34
+ − 35 /* Copy SIZE bytes from item B to item A. */
+ − 36 #define COPY(A,B,SIZE) {int sz = (SIZE); do { *(A)++ = *(B)++; } while (--sz); }
+ − 37
+ − 38 /* This should be replaced by a standard ANSI macro. */
+ − 39 #define BYTES_PER_WORD 8
+ − 40
+ − 41 /* The next 4 #defines implement a very fast in-line stack abstraction. */
+ − 42 #define STACK_SIZE (BYTES_PER_WORD * sizeof (long))
+ − 43 #define PUSH(LOW,HIGH) do {top->lo = LOW;top++->hi = HIGH;} while (0)
+ − 44 #define POP(LOW,HIGH) do {LOW = (--top)->lo;HIGH = top->hi;} while (0)
+ − 45 #define STACK_NOT_EMPTY (stack < top)
+ − 46
+ − 47 /* Discontinue quicksort algorithm when partition gets below this size.
+ − 48 This particular magic number was chosen to work best on a Sun 4/260. */
+ − 49 #define MAX_THRESH 4
+ − 50
+ − 51 /* Stack node declarations used to store unfulfilled partition obligations. */
+ − 52 typedef struct
+ − 53 {
+ − 54 char *lo;
+ − 55 char *hi;
+ − 56 } stack_node;
+ − 57
+ − 58 /* Order size using quicksort. This implementation incorporates
+ − 59 four optimizations discussed in Sedgewick:
+ − 60
+ − 61 1. Non-recursive, using an explicit stack of pointer that store the
+ − 62 next array partition to sort. To save time, this maximum amount
+ − 63 of space required to store an array of MAX_INT is allocated on the
+ − 64 stack. Assuming a 32-bit integer, this needs only 32 *
+ − 65 sizeof (stack_node) == 136 bits. Pretty cheap, actually.
+ − 66
444
+ − 67 2. Choose the pivot element using a median-of-three decision tree.
0
+ − 68 This reduces the probability of selecting a bad pivot value and
+ − 69 eliminates certain extraneous comparisons.
+ − 70
+ − 71 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
+ − 72 insertion sort to order the MAX_THRESH items within each partition.
+ − 73 This is a big win, since insertion sort is faster for small, mostly
+ − 74 sorted array segments.
+ − 75
+ − 76 4. The larger of the two sub-partitions is always pushed onto the
+ − 77 stack first, with the algorithm then concentrating on the
+ − 78 smaller partition. This *guarantees* no more than log (n)
+ − 79 stack size is needed (actually O(1) in this case)! */
+ − 80
+ − 81 int
+ − 82 qsort (base_ptr, total_elems, size, cmp)
+ − 83 char *base_ptr;
+ − 84 int total_elems;
+ − 85 int size;
+ − 86 int (*cmp)();
+ − 87 {
+ − 88 /* Allocating SIZE bytes for a pivot buffer facilitates a better
+ − 89 algorithm below since we can do comparisons directly on the pivot. */
+ − 90 char *pivot_buffer = (char *) alloca (size);
+ − 91 int max_thresh = MAX_THRESH * size;
+ − 92
+ − 93 if (total_elems > MAX_THRESH)
+ − 94 {
+ − 95 char *lo = base_ptr;
+ − 96 char *hi = lo + size * (total_elems - 1);
+ − 97 stack_node stack[STACK_SIZE]; /* Largest size needed for 32-bit int!!! */
+ − 98 stack_node *top = stack + 1;
+ − 99
+ − 100 while (STACK_NOT_EMPTY)
+ − 101 {
+ − 102 char *left_ptr;
+ − 103 char *right_ptr;
+ − 104 {
+ − 105 char *pivot = pivot_buffer;
+ − 106 {
+ − 107 /* Select median value from among LO, MID, and HI. Rearrange
+ − 108 LO and HI so the three values are sorted. This lowers the
+ − 109 probability of picking a pathological pivot value and
+ − 110 skips a comparison for both the LEFT_PTR and RIGHT_PTR. */
+ − 111
+ − 112 char *mid = lo + size * ((hi - lo) / size >> 1);
+ − 113
+ − 114 if (CMP (mid, lo) < 0)
+ − 115 SWAP (mid, lo, size);
+ − 116 if (CMP (hi, mid) < 0)
+ − 117 SWAP (mid, hi, size);
+ − 118 else
+ − 119 goto jump_over;
+ − 120 if (CMP (mid, lo) < 0)
+ − 121 SWAP (mid, lo, size);
+ − 122 jump_over:
+ − 123 COPY (pivot, mid, size);
+ − 124 pivot = pivot_buffer;
+ − 125 }
+ − 126 left_ptr = lo + size;
+ − 127 right_ptr = hi - size;
+ − 128
+ − 129 /* Here's the famous ``collapse the walls'' section of quicksort.
+ − 130 Gotta like those tight inner loops! They are the main reason
+ − 131 that this algorithm runs much faster than others. */
+ − 132 do
+ − 133 {
+ − 134 while (CMP (left_ptr, pivot) < 0)
+ − 135 left_ptr += size;
+ − 136
+ − 137 while (CMP (pivot, right_ptr) < 0)
+ − 138 right_ptr -= size;
+ − 139
+ − 140 if (left_ptr < right_ptr)
+ − 141 {
+ − 142 SWAP (left_ptr, right_ptr, size);
+ − 143 left_ptr += size;
+ − 144 right_ptr -= size;
+ − 145 }
+ − 146 else if (left_ptr == right_ptr)
+ − 147 {
+ − 148 left_ptr += size;
+ − 149 right_ptr -= size;
+ − 150 break;
+ − 151 }
+ − 152 }
+ − 153 while (left_ptr <= right_ptr);
+ − 154
+ − 155 }
+ − 156
+ − 157 /* Set up pointers for next iteration. First determine whether
+ − 158 left and right partitions are below the threshold size. If so,
+ − 159 ignore one or both. Otherwise, push the larger partition's
+ − 160 bounds on the stack and continue sorting the smaller one. */
+ − 161
+ − 162 if ((right_ptr - lo) <= max_thresh)
+ − 163 {
+ − 164 if ((hi - left_ptr) <= max_thresh) /* Ignore both small partitions. */
+ − 165 POP (lo, hi);
+ − 166 else /* Ignore small left partition. */
+ − 167 lo = left_ptr;
+ − 168 }
+ − 169 else if ((hi - left_ptr) <= max_thresh) /* Ignore small right partition. */
+ − 170 hi = right_ptr;
+ − 171 else if ((right_ptr - lo) > (hi - left_ptr)) /* Push larger left partition indices. */
+ − 172 {
+ − 173 PUSH (lo, right_ptr);
+ − 174 lo = left_ptr;
+ − 175 }
+ − 176 else /* Push larger right partition indices. */
+ − 177 {
+ − 178 PUSH (left_ptr, hi);
+ − 179 hi = right_ptr;
+ − 180 }
+ − 181 }
+ − 182 }
+ − 183
+ − 184 /* Once the BASE_PTR array is partially sorted by quicksort the rest
+ − 185 is completely sorted using insertion sort, since this is efficient
+ − 186 for partitions below MAX_THRESH size. BASE_PTR points to the beginning
+ − 187 of the array to sort, and END_PTR points at the very last element in
+ − 188 the array (*not* one beyond it!). */
+ − 189
+ − 190 #define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+ − 191
+ − 192 {
+ − 193 char *end_ptr = base_ptr + size * (total_elems - 1);
+ − 194 char *run_ptr;
+ − 195 char *tmp_ptr = base_ptr;
+ − 196 char *thresh = MIN (end_ptr, base_ptr + max_thresh);
+ − 197
+ − 198 /* Find smallest element in first threshold and place it at the
+ − 199 array's beginning. This is the smallest array element,
+ − 200 and the operation speeds up insertion sort's inner loop. */
+ − 201
+ − 202 for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
+ − 203 if (CMP (run_ptr, tmp_ptr) < 0)
+ − 204 tmp_ptr = run_ptr;
+ − 205
+ − 206 if (tmp_ptr != base_ptr)
+ − 207 SWAP (tmp_ptr, base_ptr, size);
+ − 208
+ − 209 /* Insertion sort, running from left-hand-side up to `right-hand-side.'
+ − 210 Pretty much straight out of the original GNU qsort routine. */
+ − 211
+ − 212 for (run_ptr = base_ptr + size; (tmp_ptr = run_ptr += size) <= end_ptr; )
+ − 213 {
+ − 214
+ − 215 while (CMP (run_ptr, tmp_ptr -= size) < 0)
+ − 216 ;
+ − 217
+ − 218 if ((tmp_ptr += size) != run_ptr)
+ − 219 {
+ − 220 char *trav;
+ − 221
+ − 222 for (trav = run_ptr + size; --trav >= run_ptr;)
+ − 223 {
+ − 224 char c = *trav;
+ − 225 char *hi, *lo;
+ − 226
+ − 227 for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
+ − 228 *hi = *lo;
+ − 229 *hi = c;
+ − 230 }
+ − 231 }
+ − 232
+ − 233 }
+ − 234 }
+ − 235 return 1;
+ − 236 }
+ − 237