1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. We divide the input into sorted runs using replacement
15 * selection, in the form of a priority tree implemented as a heap
16 * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17 * merge, Knuth's Algorithm 5.4.2D. The logical "tapes" used by Algorithm D
18 * are implemented by logtape.c, which avoids space wastage by recycling
19 * disk space as soon as each block is read from its "tape".
21 * We do not form the initial runs using Knuth's recommended replacement
22 * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23 * number of records in memory at all times. Since we are dealing with
24 * tuples that may vary considerably in size, we want to be able to vary
25 * the number of records kept in memory to ensure full utilization of the
26 * allowed sort memory space. So, we keep the tuples in a variable-size
27 * heap, with the next record to go out at the top of the heap. Like
28 * Algorithm 5.4.1R, each record is stored with the run number that it
29 * must go into, and we use (run number, key) as the ordering key for the
30 * heap. When the run number at the top of the heap changes, we know that
31 * no more records of the prior run are left in the heap.
33 * The approximate amount of memory allowed for any one sort operation
34 * is specified in kilobytes by the caller (most pass work_mem). Initially,
35 * we absorb tuples and simply store them in an unsorted array as long as
36 * we haven't exceeded workMem. If we reach the end of the input without
37 * exceeding workMem, we sort the array using qsort() and subsequently return
38 * tuples just by scanning the tuple array sequentially. If we do exceed
39 * workMem, we construct a heap using Algorithm H and begin to emit tuples
40 * into sorted runs in temporary tapes, emitting just enough tuples at each
41 * step to get back within the workMem limit. Whenever the run number at
42 * the top of the heap changes, we begin a new run with a new output tape
43 * (selected per Algorithm D). After the end of the input is reached,
44 * we dump out remaining tuples in memory into a final run (or two),
45 * then merge the runs using Algorithm D.
47 * When merging runs, we use a heap containing just the frontmost tuple from
48 * each source run; we repeatedly output the smallest tuple and insert the
49 * next tuple from its source tape (if any). When the heap empties, the merge
50 * is complete. The basic merge algorithm thus needs very little memory ---
51 * only M tuples for an M-way merge, and M is constrained to a small number.
52 * However, we can still make good use of our full workMem allocation by
53 * pre-reading additional tuples from each source tape. Without prereading,
54 * our access pattern to the temporary file would be very erratic; on average
55 * we'd read one block from each of M source tapes during the same time that
56 * we're writing M blocks to the output tape, so there is no sequentiality of
57 * access at all, defeating the read-ahead methods used by most Unix kernels.
58 * Worse, the output tape gets written into a very random sequence of blocks
59 * of the temp file, ensuring that things will be even worse when it comes
60 * time to read that tape. A straightforward merge pass thus ends up doing a
61 * lot of waiting for disk seeks. We can improve matters by prereading from
62 * each source tape sequentially, loading about workMem/M bytes from each tape
63 * in turn. Then we run the merge algorithm, writing but not reading until
64 * one of the preloaded tuple series runs out. Then we switch back to preread
65 * mode, fill memory again, and repeat. This approach helps to localize both
66 * read and write accesses.
68 * When the caller requests random access to the sort result, we form
69 * the final sorted run on a logical tape which is then "frozen", so
70 * that we can access it randomly. When the caller does not need random
71 * access, we return from tuplesort_performsort() as soon as we are down
72 * to one run per logical tape. The final merge is then performed
73 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74 * saves one cycle of writing all the data out to disk and reading it in.
76 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
79 * tape drives are expensive beasts, and in particular that there will always
80 * be many more runs than tape drives. In our implementation a "tape drive"
81 * doesn't cost much more than a few Kb of memory buffers, so we can afford
82 * to have lots of them. In particular, if we can have as many tape drives
83 * as sorted runs, we can eliminate any repeated I/O at all. In the current
84 * code we determine the number of tapes M on the basis of workMem: we want
85 * workMem/M to be large enough that we read a fair amount of data each time
86 * we preread from a tape, so as to maintain the locality of access described
87 * above. Nonetheless, with large workMem we can have many tapes.
90 * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
91 * Portions Copyright (c) 1994, Regents of the University of California
94 * src/backend/utils/sort/tuplesort.c
96 *-------------------------------------------------------------------------
103 #include "access/nbtree.h"
104 #include "catalog/index.h"
105 #include "commands/tablespace.h"
106 #include "executor/executor.h"
107 #include "miscadmin.h"
108 #include "pg_trace.h"
109 #include "utils/datum.h"
110 #include "utils/logtape.h"
111 #include "utils/lsyscache.h"
112 #include "utils/memutils.h"
113 #include "utils/pg_rusage.h"
114 #include "utils/rel.h"
115 #include "utils/sortsupport.h"
116 #include "utils/tuplesort.h"
119 /* sort-type codes for sort__start probes */
123 #define CLUSTER_SORT 3
127 bool trace_sort = false;
130 #ifdef DEBUG_BOUNDED_SORT
131 bool optimize_bounded_sort = true;
136 * The objects we actually sort are SortTuple structs. These contain
137 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
138 * which is a separate palloc chunk --- we assume it is just one chunk and
139 * can be freed by a simple pfree(). SortTuples also contain the tuple's
140 * first key column in Datum/nullflag format, and an index integer.
142 * Storing the first key column lets us save heap_getattr or index_getattr
143 * calls during tuple comparisons. We could extract and save all the key
144 * columns not just the first, but this would increase code complexity and
145 * overhead, and wouldn't actually save any comparison cycles in the common
146 * case where the first key determines the comparison result. Note that
147 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
149 * When sorting single Datums, the data value is represented directly by
150 * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
151 * then datum1 points to a separately palloc'd data value that is also pointed
152 * to by the "tuple" pointer; otherwise "tuple" is NULL.
154 * While building initial runs, tupindex holds the tuple's run number. During
155 * merge passes, we re-use it to hold the input tape number that each tuple in
156 * the heap was read from, or to hold the index of the next tuple pre-read
157 * from the same tape in the case of pre-read entries. tupindex goes unused
158 * if the sort occurs entirely in memory.
162 void *tuple; /* the tuple proper */
163 Datum datum1; /* value of first key column */
164 bool isnull1; /* is first key column NULL? */
165 int tupindex; /* see notes above */
170 * Possible states of a Tuplesort object. These denote the states that
171 * persist between calls of Tuplesort routines.
175 TSS_INITIAL, /* Loading tuples; still within memory limit */
176 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
177 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
178 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
179 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
180 TSS_FINALMERGE /* Performing final merge on-the-fly */
184 * Parameters for calculation of number of tapes to use --- see inittapes()
185 * and tuplesort_merge_order().
187 * In this calculation we assume that each tape will cost us about 3 blocks
188 * worth of buffer space (which is an underestimate for very large data
189 * volumes, but it's probably close enough --- see logtape.c).
191 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
192 * tape during a preread cycle (see discussion at top of file).
194 #define MINORDER 6 /* minimum merge order */
195 #define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
196 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
198 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
199 Tuplesortstate *state);
202 * Private state of a Tuplesort operation.
204 struct Tuplesortstate
206 TupSortStatus status; /* enumerated value as shown above */
207 int nKeys; /* number of columns in sort key */
208 bool randomAccess; /* did caller request random access? */
209 bool bounded; /* did caller specify a maximum number of
210 * tuples to return? */
211 bool boundUsed; /* true if we made use of a bounded heap */
212 int bound; /* if bounded, the maximum number of tuples */
213 long availMem; /* remaining memory available, in bytes */
214 long allowedMem; /* total memory allowed, in bytes */
215 int maxTapes; /* number of tapes (Knuth's T) */
216 int tapeRange; /* maxTapes-1 (Knuth's P) */
217 MemoryContext sortcontext; /* memory context holding all sort data */
218 LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
221 * These function pointers decouple the routines that must know what kind
222 * of tuple we are sorting from the routines that don't need to know it.
223 * They are set up by the tuplesort_begin_xxx routines.
225 * Function to compare two tuples; result is per qsort() convention, ie:
226 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
227 * qsort_arg_comparator.
229 SortTupleComparator comparetup;
232 * Function to copy a supplied input tuple into palloc'd space and set up
233 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
234 * state->availMem must be decreased by the amount of space used for the
235 * tuple copy (note the SortTuple struct itself is not counted).
237 void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
240 * Function to write a stored tuple onto tape. The representation of the
241 * tuple on tape need not be the same as it is in memory; requirements on
242 * the tape representation are given below. After writing the tuple,
243 * pfree() the out-of-line data (not the SortTuple struct!), and increase
244 * state->availMem by the amount of memory space thereby released.
246 void (*writetup) (Tuplesortstate *state, int tapenum,
250 * Function to read a stored tuple from tape back into memory. 'len' is
251 * the already-read length of the stored tuple. Create a palloc'd copy,
252 * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
253 * decrease state->availMem by the amount of memory space consumed.
255 void (*readtup) (Tuplesortstate *state, SortTuple *stup,
256 int tapenum, unsigned int len);
259 * Function to reverse the sort direction from its current state. (We
260 * could dispense with this if we wanted to enforce that all variants
261 * represent the sort key information alike.)
263 void (*reversedirection) (Tuplesortstate *state);
266 * This array holds the tuples now in sort memory. If we are in state
267 * INITIAL, the tuples are in no particular order; if we are in state
268 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
269 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
270 * H. (Note that memtupcount only counts the tuples that are part of the
271 * heap --- during merge passes, memtuples[] entries beyond tapeRange are
272 * never in the heap and are used to hold pre-read tuples.) In state
273 * SORTEDONTAPE, the array is not used.
275 SortTuple *memtuples; /* array of SortTuple structs */
276 int memtupcount; /* number of tuples currently present */
277 int memtupsize; /* allocated length of memtuples array */
280 * While building initial runs, this is the current output run number
281 * (starting at 0). Afterwards, it is the number of initial runs we made.
286 * Unless otherwise noted, all pointer variables below are pointers to
287 * arrays of length maxTapes, holding per-tape data.
291 * These variables are only used during merge passes. mergeactive[i] is
292 * true if we are reading an input run from (actual) tape number i and
293 * have not yet exhausted that run. mergenext[i] is the memtuples index
294 * of the next pre-read tuple (next to be loaded into the heap) for tape
295 * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
296 * points to the last pre-read tuple from each tape. mergeavailslots[i]
297 * is the number of unused memtuples[] slots reserved for tape i, and
298 * mergeavailmem[i] is the amount of unused space allocated for tape i.
299 * mergefreelist and mergefirstfree keep track of unused locations in the
300 * memtuples[] array. The memtuples[].tupindex fields link together
301 * pre-read tuples for each tape as well as recycled locations in
302 * mergefreelist. It is OK to use 0 as a null link in these lists, because
303 * memtuples[0] is part of the merge heap and is never a pre-read tuple.
305 bool *mergeactive; /* active input run source? */
306 int *mergenext; /* first preread tuple for each source */
307 int *mergelast; /* last preread tuple for each source */
308 int *mergeavailslots; /* slots left for prereading each tape */
309 long *mergeavailmem; /* availMem for prereading each tape */
310 int mergefreelist; /* head of freelist of recycled slots */
311 int mergefirstfree; /* first slot never used in this merge */
314 * Variables for Algorithm D. Note that destTape is a "logical" tape
315 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
316 * "logical" and "actual" tape numbers straight!
318 int Level; /* Knuth's l */
319 int destTape; /* current output tape (Knuth's j, less 1) */
320 int *tp_fib; /* Target Fibonacci run counts (A[]) */
321 int *tp_runs; /* # of real runs on each tape */
322 int *tp_dummy; /* # of dummy runs for each tape (D[]) */
323 int *tp_tapenum; /* Actual tape numbers (TAPE[]) */
324 int activeTapes; /* # of active input tapes in merge pass */
327 * These variables are used after completion of sorting to keep track of
328 * the next tuple to return. (In the tape case, the tape's current read
329 * position is also critical state.)
331 int result_tape; /* actual tape number of finished output */
332 int current; /* array index (only used if SORTEDINMEM) */
333 bool eof_reached; /* reached EOF (needed for cursors) */
335 /* markpos_xxx holds marked position for mark and restore */
336 long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
337 int markpos_offset; /* saved "current", or offset in tape block */
338 bool markpos_eof; /* saved "eof_reached" */
341 * These variables are specific to the MinimalTuple case; they are set by
342 * tuplesort_begin_heap and used only by the MinimalTuple routines.
345 SortSupport sortKeys; /* array of length nKeys */
348 * This variable is shared by the single-key MinimalTuple case and the
349 * Datum case. Otherwise it's NULL.
354 * These variables are specific to the CLUSTER case; they are set by
355 * tuplesort_begin_cluster. Note CLUSTER also uses tupDesc and
358 IndexInfo *indexInfo; /* info about index being used for reference */
359 EState *estate; /* for evaluating index expressions */
362 * These variables are specific to the IndexTuple case; they are set by
363 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
365 Relation indexRel; /* index being built */
367 /* These are specific to the index_btree subcase: */
368 ScanKey indexScanKey;
369 bool enforceUnique; /* complain if we find duplicate tuples */
371 /* These are specific to the index_hash subcase: */
372 uint32 hash_mask; /* mask for sortable part of hash code */
375 * These variables are specific to the Datum case; they are set by
376 * tuplesort_begin_datum and used only by the DatumTuple routines.
379 /* we need typelen and byval in order to know how to copy the Datums. */
384 * Resource snapshot for time of sort start.
391 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
392 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
393 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
394 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
395 #define REVERSEDIRECTION(state) ((*(state)->reversedirection) (state))
396 #define LACKMEM(state) ((state)->availMem < 0)
397 #define USEMEM(state,amt) ((state)->availMem -= (amt))
398 #define FREEMEM(state,amt) ((state)->availMem += (amt))
401 * NOTES about on-tape representation of tuples:
403 * We require the first "unsigned int" of a stored tuple to be the total size
404 * on-tape of the tuple, including itself (so it is never zero; an all-zero
405 * unsigned int is used to delimit runs). The remainder of the stored tuple
406 * may or may not match the in-memory representation of the tuple ---
407 * any conversion needed is the job of the writetup and readtup routines.
409 * If state->randomAccess is true, then the stored representation of the
410 * tuple must be followed by another "unsigned int" that is a copy of the
411 * length --- so the total tape space used is actually sizeof(unsigned int)
412 * more than the stored length value. This allows read-backwards. When
413 * randomAccess is not true, the write/read routines may omit the extra
416 * writetup is expected to write both length words as well as the tuple
417 * data. When readtup is called, the tape is positioned just after the
418 * front length word; readtup must read the tuple data and advance past
419 * the back length word (if present).
421 * The write/read routines can make use of the tuple description data
422 * stored in the Tuplesortstate record, if needed. They are also expected
423 * to adjust state->availMem by the amount of memory space (not tape space!)
424 * released or consumed. There is no error return from either writetup
425 * or readtup; they should ereport() on failure.
428 * NOTES about memory consumption calculations:
430 * We count space allocated for tuples against the workMem limit, plus
431 * the space used by the variable-size memtuples array. Fixed-size space
432 * is not counted; it's small enough to not be interesting.
434 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
435 * rather than the originally-requested size. This is important since
436 * palloc can add substantial overhead. It's not a complete answer since
437 * we won't count any wasted space in palloc allocation blocks, but it's
438 * a lot better than what we were doing before 7.3.
441 /* When using this macro, beware of double evaluation of len */
442 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
444 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
445 elog(ERROR, "unexpected end of data"); \
449 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
450 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
451 static void inittapes(Tuplesortstate *state);
452 static void selectnewtape(Tuplesortstate *state);
453 static void mergeruns(Tuplesortstate *state);
454 static void mergeonerun(Tuplesortstate *state);
455 static void beginmerge(Tuplesortstate *state);
456 static void mergepreread(Tuplesortstate *state);
457 static void mergeprereadone(Tuplesortstate *state, int srcTape);
458 static void dumptuples(Tuplesortstate *state, bool alltuples);
459 static void make_bounded_heap(Tuplesortstate *state);
460 static void sort_bounded_heap(Tuplesortstate *state);
461 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
462 int tupleindex, bool checkIndex);
463 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
464 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
465 static void markrunend(Tuplesortstate *state, int tapenum);
466 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
467 Tuplesortstate *state);
468 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
469 static void writetup_heap(Tuplesortstate *state, int tapenum,
471 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
472 int tapenum, unsigned int len);
473 static void reversedirection_heap(Tuplesortstate *state);
474 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
475 Tuplesortstate *state);
476 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
477 static void writetup_cluster(Tuplesortstate *state, int tapenum,
479 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
480 int tapenum, unsigned int len);
481 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
482 Tuplesortstate *state);
483 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
484 Tuplesortstate *state);
485 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
486 static void writetup_index(Tuplesortstate *state, int tapenum,
488 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
489 int tapenum, unsigned int len);
490 static void reversedirection_index_btree(Tuplesortstate *state);
491 static void reversedirection_index_hash(Tuplesortstate *state);
492 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
493 Tuplesortstate *state);
494 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
495 static void writetup_datum(Tuplesortstate *state, int tapenum,
497 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
498 int tapenum, unsigned int len);
499 static void reversedirection_datum(Tuplesortstate *state);
500 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
503 * Special versions of qsort just for SortTuple objects. We have one for the
504 * single-key case (qsort_ssup) and one for multi-key cases (qsort_tuple).
506 #include "qsort_tuple.c"
510 * tuplesort_begin_xxx
512 * Initialize for a tuple sort operation.
514 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
515 * zero or more times, then call tuplesort_performsort when all the tuples
516 * have been supplied. After performsort, retrieve the tuples in sorted
517 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
518 * access was requested, rescan, markpos, and restorepos can also be called.)
519 * Call tuplesort_end to terminate the operation and release memory/disk space.
521 * Each variant of tuplesort_begin has a workMem parameter specifying the
522 * maximum number of kilobytes of RAM to use before spilling data to disk.
523 * (The normal value of this parameter is work_mem, but some callers use
524 * other values.) Each variant also has a randomAccess parameter specifying
525 * whether the caller needs non-sequential access to the sort result.
528 static Tuplesortstate *
529 tuplesort_begin_common(int workMem, bool randomAccess)
531 Tuplesortstate *state;
532 MemoryContext sortcontext;
533 MemoryContext oldcontext;
536 * Create a working memory context for this sort operation. All data
537 * needed by the sort will live inside this context.
539 sortcontext = AllocSetContextCreate(CurrentMemoryContext,
541 ALLOCSET_DEFAULT_MINSIZE,
542 ALLOCSET_DEFAULT_INITSIZE,
543 ALLOCSET_DEFAULT_MAXSIZE);
546 * Make the Tuplesortstate within the per-sort context. This way, we
547 * don't need a separate pfree() operation for it at shutdown.
549 oldcontext = MemoryContextSwitchTo(sortcontext);
551 state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
555 pg_rusage_init(&state->ru_start);
558 state->status = TSS_INITIAL;
559 state->randomAccess = randomAccess;
560 state->bounded = false;
561 state->boundUsed = false;
562 state->allowedMem = workMem * 1024L;
563 state->availMem = state->allowedMem;
564 state->sortcontext = sortcontext;
565 state->tapeset = NULL;
567 state->memtupcount = 0;
568 state->memtupsize = 1024; /* initial guess */
569 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
571 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
573 /* workMem must be large enough for the minimal memtuples array */
575 elog(ERROR, "insufficient memory allowed for sort");
577 state->currentRun = 0;
580 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
581 * inittapes(), if needed
584 state->result_tape = -1; /* flag that result tape has not been formed */
586 MemoryContextSwitchTo(oldcontext);
592 tuplesort_begin_heap(TupleDesc tupDesc,
593 int nkeys, AttrNumber *attNums,
594 Oid *sortOperators, Oid *sortCollations,
595 bool *nullsFirstFlags,
596 int workMem, bool randomAccess)
598 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
599 MemoryContext oldcontext;
602 oldcontext = MemoryContextSwitchTo(state->sortcontext);
604 AssertArg(nkeys > 0);
609 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
610 nkeys, workMem, randomAccess ? 't' : 'f');
613 state->nKeys = nkeys;
615 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
616 false, /* no unique check */
621 state->comparetup = comparetup_heap;
622 state->copytup = copytup_heap;
623 state->writetup = writetup_heap;
624 state->readtup = readtup_heap;
625 state->reversedirection = reversedirection_heap;
627 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
629 /* Prepare SortSupport data for each column */
630 state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
632 for (i = 0; i < nkeys; i++)
634 SortSupport sortKey = state->sortKeys + i;
636 AssertArg(attNums[i] != 0);
637 AssertArg(sortOperators[i] != 0);
639 sortKey->ssup_cxt = CurrentMemoryContext;
640 sortKey->ssup_collation = sortCollations[i];
641 sortKey->ssup_nulls_first = nullsFirstFlags[i];
642 sortKey->ssup_attno = attNums[i];
644 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
648 state->onlyKey = state->sortKeys;
650 MemoryContextSwitchTo(oldcontext);
656 tuplesort_begin_cluster(TupleDesc tupDesc,
658 int workMem, bool randomAccess)
660 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
661 MemoryContext oldcontext;
663 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
665 oldcontext = MemoryContextSwitchTo(state->sortcontext);
670 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
671 RelationGetNumberOfAttributes(indexRel),
672 workMem, randomAccess ? 't' : 'f');
675 state->nKeys = RelationGetNumberOfAttributes(indexRel);
677 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
678 false, /* no unique check */
683 state->comparetup = comparetup_cluster;
684 state->copytup = copytup_cluster;
685 state->writetup = writetup_cluster;
686 state->readtup = readtup_cluster;
687 state->reversedirection = reversedirection_index_btree;
689 state->indexInfo = BuildIndexInfo(indexRel);
690 state->indexScanKey = _bt_mkscankey_nodata(indexRel);
692 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
694 if (state->indexInfo->ii_Expressions != NULL)
696 TupleTableSlot *slot;
697 ExprContext *econtext;
700 * We will need to use FormIndexDatum to evaluate the index
701 * expressions. To do that, we need an EState, as well as a
702 * TupleTableSlot to put the table tuples into. The econtext's
703 * scantuple has to point to that slot, too.
705 state->estate = CreateExecutorState();
706 slot = MakeSingleTupleTableSlot(tupDesc);
707 econtext = GetPerTupleExprContext(state->estate);
708 econtext->ecxt_scantuple = slot;
711 MemoryContextSwitchTo(oldcontext);
717 tuplesort_begin_index_btree(Relation indexRel,
719 int workMem, bool randomAccess)
721 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
722 MemoryContext oldcontext;
724 oldcontext = MemoryContextSwitchTo(state->sortcontext);
729 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
730 enforceUnique ? 't' : 'f',
731 workMem, randomAccess ? 't' : 'f');
734 state->nKeys = RelationGetNumberOfAttributes(indexRel);
736 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
742 state->comparetup = comparetup_index_btree;
743 state->copytup = copytup_index;
744 state->writetup = writetup_index;
745 state->readtup = readtup_index;
746 state->reversedirection = reversedirection_index_btree;
748 state->indexRel = indexRel;
749 state->indexScanKey = _bt_mkscankey_nodata(indexRel);
750 state->enforceUnique = enforceUnique;
752 MemoryContextSwitchTo(oldcontext);
758 tuplesort_begin_index_hash(Relation indexRel,
760 int workMem, bool randomAccess)
762 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
763 MemoryContext oldcontext;
765 oldcontext = MemoryContextSwitchTo(state->sortcontext);
770 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
772 workMem, randomAccess ? 't' : 'f');
775 state->nKeys = 1; /* Only one sort column, the hash code */
777 state->comparetup = comparetup_index_hash;
778 state->copytup = copytup_index;
779 state->writetup = writetup_index;
780 state->readtup = readtup_index;
781 state->reversedirection = reversedirection_index_hash;
783 state->indexRel = indexRel;
785 state->hash_mask = hash_mask;
787 MemoryContextSwitchTo(oldcontext);
793 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
795 int workMem, bool randomAccess)
797 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
798 MemoryContext oldcontext;
802 oldcontext = MemoryContextSwitchTo(state->sortcontext);
807 "begin datum sort: workMem = %d, randomAccess = %c",
808 workMem, randomAccess ? 't' : 'f');
811 state->nKeys = 1; /* always a one-column sort */
813 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
814 false, /* no unique check */
819 state->comparetup = comparetup_datum;
820 state->copytup = copytup_datum;
821 state->writetup = writetup_datum;
822 state->readtup = readtup_datum;
823 state->reversedirection = reversedirection_datum;
825 state->datumType = datumType;
827 /* Prepare SortSupport data */
828 state->onlyKey = (SortSupport) palloc0(sizeof(SortSupportData));
830 state->onlyKey->ssup_cxt = CurrentMemoryContext;
831 state->onlyKey->ssup_collation = sortCollation;
832 state->onlyKey->ssup_nulls_first = nullsFirstFlag;
834 PrepareSortSupportFromOrderingOp(sortOperator, state->onlyKey);
836 /* lookup necessary attributes of the datum type */
837 get_typlenbyval(datumType, &typlen, &typbyval);
838 state->datumTypeLen = typlen;
839 state->datumTypeByVal = typbyval;
841 MemoryContextSwitchTo(oldcontext);
847 * tuplesort_set_bound
849 * Advise tuplesort that at most the first N result tuples are required.
851 * Must be called before inserting any tuples. (Actually, we could allow it
852 * as long as the sort hasn't spilled to disk, but there seems no need for
853 * delayed calls at the moment.)
855 * This is a hint only. The tuplesort may still return more tuples than
859 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
861 /* Assert we're called before loading any tuples */
862 Assert(state->status == TSS_INITIAL);
863 Assert(state->memtupcount == 0);
864 Assert(!state->bounded);
866 #ifdef DEBUG_BOUNDED_SORT
867 /* Honor GUC setting that disables the feature (for easy testing) */
868 if (!optimize_bounded_sort)
872 /* We want to be able to compute bound * 2, so limit the setting */
873 if (bound > (int64) (INT_MAX / 2))
876 state->bounded = true;
877 state->bound = (int) bound;
883 * Release resources and clean up.
885 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
886 * pointing to garbage. Be careful not to attempt to use or free such
887 * pointers afterwards!
890 tuplesort_end(Tuplesortstate *state)
892 /* context swap probably not needed, but let's be safe */
893 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
899 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
901 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
905 * Delete temporary "tape" files, if any.
907 * Note: want to include this in reported total cost of sort, hence need
908 * for two #ifdef TRACE_SORT sections.
911 LogicalTapeSetClose(state->tapeset);
917 elog(LOG, "external sort ended, %ld disk blocks used: %s",
918 spaceUsed, pg_rusage_show(&state->ru_start));
920 elog(LOG, "internal sort ended, %ld KB used: %s",
921 spaceUsed, pg_rusage_show(&state->ru_start));
924 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
928 * If you disabled TRACE_SORT, you can still probe sort__done, but you
929 * ain't getting space-used stats.
931 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
934 /* Free any execution state created for CLUSTER case */
935 if (state->estate != NULL)
937 ExprContext *econtext = GetPerTupleExprContext(state->estate);
939 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
940 FreeExecutorState(state->estate);
943 MemoryContextSwitchTo(oldcontext);
946 * Free the per-sort memory context, thereby releasing all working memory,
947 * including the Tuplesortstate struct itself.
949 MemoryContextDelete(state->sortcontext);
953 * Grow the memtuples[] array, if possible within our memory constraint.
954 * Return TRUE if able to enlarge the array, FALSE if not.
956 * At each increment we double the size of the array. When we are short
957 * on memory we could consider smaller increases, but because availMem
958 * moves around with tuple addition/removal, this might result in thrashing.
959 * Small increases in the array size are likely to be pretty inefficient.
962 grow_memtuples(Tuplesortstate *state)
965 * We need to be sure that we do not cause LACKMEM to become true, else
966 * the space management algorithm will go nuts. We assume here that the
967 * memory chunk overhead associated with the memtuples array is constant
968 * and so there will be no unexpected addition to what we ask for. (The
969 * minimum array size established in tuplesort_begin_common is large
970 * enough to force palloc to treat it as a separate chunk, so this
971 * assumption should be good. But let's check it.)
973 if (state->availMem <= (long) (state->memtupsize * sizeof(SortTuple)))
977 * On a 64-bit machine, allowedMem could be high enough to get us into
978 * trouble with MaxAllocSize, too.
980 if ((Size) (state->memtupsize * 2) >= MaxAllocSize / sizeof(SortTuple))
983 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
984 state->memtupsize *= 2;
985 state->memtuples = (SortTuple *)
986 repalloc(state->memtuples,
987 state->memtupsize * sizeof(SortTuple));
988 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
990 elog(ERROR, "unexpected out-of-memory situation during sort");
995 * Accept one tuple while collecting input data for sort.
997 * Note that the input data is always copied; the caller need not save it.
1000 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1002 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1006 * Copy the given tuple into memory we control, and decrease availMem.
1007 * Then call the common code.
1009 COPYTUP(state, &stup, (void *) slot);
1011 puttuple_common(state, &stup);
1013 MemoryContextSwitchTo(oldcontext);
1017 * Accept one tuple while collecting input data for sort.
1019 * Note that the input data is always copied; the caller need not save it.
1022 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1024 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1028 * Copy the given tuple into memory we control, and decrease availMem.
1029 * Then call the common code.
1031 COPYTUP(state, &stup, (void *) tup);
1033 puttuple_common(state, &stup);
1035 MemoryContextSwitchTo(oldcontext);
1039 * Accept one index tuple while collecting input data for sort.
1041 * Note that the input tuple is always copied; the caller need not save it.
1044 tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple)
1046 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1050 * Copy the given tuple into memory we control, and decrease availMem.
1051 * Then call the common code.
1053 COPYTUP(state, &stup, (void *) tuple);
1055 puttuple_common(state, &stup);
1057 MemoryContextSwitchTo(oldcontext);
1061 * Accept one Datum while collecting input data for sort.
1063 * If the Datum is pass-by-ref type, the value will be copied.
1066 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1068 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1072 * If it's a pass-by-reference value, copy it into memory we control, and
1073 * decrease availMem. Then call the common code.
1075 if (isNull || state->datumTypeByVal)
1078 stup.isnull1 = isNull;
1079 stup.tuple = NULL; /* no separate storage */
1083 stup.datum1 = datumCopy(val, false, state->datumTypeLen);
1084 stup.isnull1 = false;
1085 stup.tuple = DatumGetPointer(stup.datum1);
1086 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1089 puttuple_common(state, &stup);
1091 MemoryContextSwitchTo(oldcontext);
1095 * Shared code for tuple and datum cases.
1098 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1100 switch (state->status)
1105 * Save the tuple into the unsorted array. First, grow the array
1106 * as needed. Note that we try to grow the array when there is
1107 * still one free slot remaining --- if we fail, there'll still be
1108 * room to store the incoming tuple, and then we'll switch to
1109 * tape-based operation.
1111 if (state->memtupcount >= state->memtupsize - 1)
1113 (void) grow_memtuples(state);
1114 Assert(state->memtupcount < state->memtupsize);
1116 state->memtuples[state->memtupcount++] = *tuple;
1119 * Check if it's time to switch over to a bounded heapsort. We do
1120 * so if the input tuple count exceeds twice the desired tuple
1121 * count (this is a heuristic for where heapsort becomes cheaper
1122 * than a quicksort), or if we've just filled workMem and have
1123 * enough tuples to meet the bound.
1125 * Note that once we enter TSS_BOUNDED state we will always try to
1126 * complete the sort that way. In the worst case, if later input
1127 * tuples are larger than earlier ones, this might cause us to
1128 * exceed workMem significantly.
1130 if (state->bounded &&
1131 (state->memtupcount > state->bound * 2 ||
1132 (state->memtupcount > state->bound && LACKMEM(state))))
1136 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1138 pg_rusage_show(&state->ru_start));
1140 make_bounded_heap(state);
1145 * Done if we still fit in available memory and have array slots.
1147 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1151 * Nope; time to switch to tape-based operation.
1156 * Dump tuples until we are back under the limit.
1158 dumptuples(state, false);
1164 * We don't want to grow the array here, so check whether the new
1165 * tuple can be discarded before putting it in. This should be a
1166 * good speed optimization, too, since when there are many more
1167 * input tuples than the bound, most input tuples can be discarded
1168 * with just this one comparison. Note that because we currently
1169 * have the sort direction reversed, we must check for <= not >=.
1171 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1173 /* new tuple <= top of the heap, so we can discard it */
1174 free_sort_tuple(state, tuple);
1175 CHECK_FOR_INTERRUPTS();
1179 /* discard top of heap, sift up, insert new tuple */
1180 free_sort_tuple(state, &state->memtuples[0]);
1181 tuplesort_heap_siftup(state, false);
1182 tuplesort_heap_insert(state, tuple, 0, false);
1189 * Insert the tuple into the heap, with run number currentRun if
1190 * it can go into the current run, else run number currentRun+1.
1191 * The tuple can go into the current run if it is >= the first
1192 * not-yet-output tuple. (Actually, it could go into the current
1193 * run if it is >= the most recently output tuple ... but that
1194 * would require keeping around the tuple we last output, and it's
1195 * simplest to let writetup free each tuple as soon as it's
1198 * Note there will always be at least one tuple in the heap at
1199 * this point; see dumptuples.
1201 Assert(state->memtupcount > 0);
1202 if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1203 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1205 tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
1208 * If we are over the memory limit, dump tuples till we're under.
1210 dumptuples(state, false);
1214 elog(ERROR, "invalid tuplesort state");
1220 * All tuples have been provided; finish the sort.
1223 tuplesort_performsort(Tuplesortstate *state)
1225 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1229 elog(LOG, "performsort starting: %s",
1230 pg_rusage_show(&state->ru_start));
1233 switch (state->status)
1238 * We were able to accumulate all the tuples within the allowed
1239 * amount of memory. Just qsort 'em and we're done.
1241 if (state->memtupcount > 1)
1243 /* Can we use the single-key sort function? */
1244 if (state->onlyKey != NULL)
1245 qsort_ssup(state->memtuples, state->memtupcount,
1248 qsort_tuple(state->memtuples,
1254 state->eof_reached = false;
1255 state->markpos_offset = 0;
1256 state->markpos_eof = false;
1257 state->status = TSS_SORTEDINMEM;
1263 * We were able to accumulate all the tuples required for output
1264 * in memory, using a heap to eliminate excess tuples. Now we
1265 * have to transform the heap to a properly-sorted array.
1267 sort_bounded_heap(state);
1269 state->eof_reached = false;
1270 state->markpos_offset = 0;
1271 state->markpos_eof = false;
1272 state->status = TSS_SORTEDINMEM;
1278 * Finish tape-based sort. First, flush all tuples remaining in
1279 * memory out to tape; then merge until we have a single remaining
1280 * run (or, if !randomAccess, one run per tape). Note that
1281 * mergeruns sets the correct state->status.
1283 dumptuples(state, true);
1285 state->eof_reached = false;
1286 state->markpos_block = 0L;
1287 state->markpos_offset = 0;
1288 state->markpos_eof = false;
1292 elog(ERROR, "invalid tuplesort state");
1299 if (state->status == TSS_FINALMERGE)
1300 elog(LOG, "performsort done (except %d-way final merge): %s",
1302 pg_rusage_show(&state->ru_start));
1304 elog(LOG, "performsort done: %s",
1305 pg_rusage_show(&state->ru_start));
1309 MemoryContextSwitchTo(oldcontext);
1313 * Internal routine to fetch the next tuple in either forward or back
1314 * direction into *stup. Returns FALSE if no more tuples.
1315 * If *should_free is set, the caller must pfree stup.tuple when done with it.
1318 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1319 SortTuple *stup, bool *should_free)
1321 unsigned int tuplen;
1323 switch (state->status)
1325 case TSS_SORTEDINMEM:
1326 Assert(forward || state->randomAccess);
1327 *should_free = false;
1330 if (state->current < state->memtupcount)
1332 *stup = state->memtuples[state->current++];
1335 state->eof_reached = true;
1338 * Complain if caller tries to retrieve more tuples than
1339 * originally asked for in a bounded sort. This is because
1340 * returning EOF here might be the wrong thing.
1342 if (state->bounded && state->current >= state->bound)
1343 elog(ERROR, "retrieved too many tuples in a bounded sort");
1349 if (state->current <= 0)
1353 * if all tuples are fetched already then we return last
1354 * tuple, else - tuple before last returned.
1356 if (state->eof_reached)
1357 state->eof_reached = false;
1360 state->current--; /* last returned tuple */
1361 if (state->current <= 0)
1364 *stup = state->memtuples[state->current - 1];
1369 case TSS_SORTEDONTAPE:
1370 Assert(forward || state->randomAccess);
1371 *should_free = true;
1374 if (state->eof_reached)
1376 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1378 READTUP(state, stup, state->result_tape, tuplen);
1383 state->eof_reached = true;
1391 * if all tuples are fetched already then we return last tuple,
1392 * else - tuple before last returned.
1394 if (state->eof_reached)
1397 * Seek position is pointing just past the zero tuplen at the
1398 * end of file; back up to fetch last tuple's ending length
1399 * word. If seek fails we must have a completely empty file.
1401 if (!LogicalTapeBackspace(state->tapeset,
1403 2 * sizeof(unsigned int)))
1405 state->eof_reached = false;
1410 * Back up and fetch previously-returned tuple's ending length
1411 * word. If seek fails, assume we are at start of file.
1413 if (!LogicalTapeBackspace(state->tapeset,
1415 sizeof(unsigned int)))
1417 tuplen = getlen(state, state->result_tape, false);
1420 * Back up to get ending length word of tuple before it.
1422 if (!LogicalTapeBackspace(state->tapeset,
1424 tuplen + 2 * sizeof(unsigned int)))
1427 * If that fails, presumably the prev tuple is the first
1428 * in the file. Back up so that it becomes next to read
1429 * in forward direction (not obviously right, but that is
1430 * what in-memory case does).
1432 if (!LogicalTapeBackspace(state->tapeset,
1434 tuplen + sizeof(unsigned int)))
1435 elog(ERROR, "bogus tuple length in backward scan");
1440 tuplen = getlen(state, state->result_tape, false);
1443 * Now we have the length of the prior tuple, back up and read it.
1444 * Note: READTUP expects we are positioned after the initial
1445 * length word of the tuple, so back up to that point.
1447 if (!LogicalTapeBackspace(state->tapeset,
1450 elog(ERROR, "bogus tuple length in backward scan");
1451 READTUP(state, stup, state->result_tape, tuplen);
1454 case TSS_FINALMERGE:
1456 *should_free = true;
1459 * This code should match the inner loop of mergeonerun().
1461 if (state->memtupcount > 0)
1463 int srcTape = state->memtuples[0].tupindex;
1468 *stup = state->memtuples[0];
1469 /* returned tuple is no longer counted in our memory space */
1472 tuplen = GetMemoryChunkSpace(stup->tuple);
1473 state->availMem += tuplen;
1474 state->mergeavailmem[srcTape] += tuplen;
1476 tuplesort_heap_siftup(state, false);
1477 if ((tupIndex = state->mergenext[srcTape]) == 0)
1480 * out of preloaded data on this tape, try to read more
1482 * Unlike mergeonerun(), we only preload from the single
1483 * tape that's run dry. See mergepreread() comments.
1485 mergeprereadone(state, srcTape);
1488 * if still no data, we've reached end of run on this tape
1490 if ((tupIndex = state->mergenext[srcTape]) == 0)
1493 /* pull next preread tuple from list, insert in heap */
1494 newtup = &state->memtuples[tupIndex];
1495 state->mergenext[srcTape] = newtup->tupindex;
1496 if (state->mergenext[srcTape] == 0)
1497 state->mergelast[srcTape] = 0;
1498 tuplesort_heap_insert(state, newtup, srcTape, false);
1499 /* put the now-unused memtuples entry on the freelist */
1500 newtup->tupindex = state->mergefreelist;
1501 state->mergefreelist = tupIndex;
1502 state->mergeavailslots[srcTape]++;
1508 elog(ERROR, "invalid tuplesort state");
1509 return false; /* keep compiler quiet */
1514 * Fetch the next tuple in either forward or back direction.
1515 * If successful, put tuple in slot and return TRUE; else, clear the slot
1519 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
1520 TupleTableSlot *slot)
1522 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1526 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1529 MemoryContextSwitchTo(oldcontext);
1533 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
1538 ExecClearTuple(slot);
1544 * Fetch the next tuple in either forward or back direction.
1545 * Returns NULL if no more tuples. If *should_free is set, the
1546 * caller must pfree the returned tuple when done with it.
1549 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
1551 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1554 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1557 MemoryContextSwitchTo(oldcontext);
1563 * Fetch the next index tuple in either forward or back direction.
1564 * Returns NULL if no more tuples. If *should_free is set, the
1565 * caller must pfree the returned tuple when done with it.
1568 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
1571 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1574 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1577 MemoryContextSwitchTo(oldcontext);
1579 return (IndexTuple) stup.tuple;
1583 * Fetch the next Datum in either forward or back direction.
1584 * Returns FALSE if no more datums.
1586 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1587 * and is now owned by the caller.
1590 tuplesort_getdatum(Tuplesortstate *state, bool forward,
1591 Datum *val, bool *isNull)
1593 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1597 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1599 MemoryContextSwitchTo(oldcontext);
1603 if (stup.isnull1 || state->datumTypeByVal)
1606 *isNull = stup.isnull1;
1613 *val = datumCopy(stup.datum1, false, state->datumTypeLen);
1617 MemoryContextSwitchTo(oldcontext);
1623 * tuplesort_merge_order - report merge order we'll use for given memory
1624 * (note: "merge order" just means the number of input tapes in the merge).
1626 * This is exported for use by the planner. allowedMem is in bytes.
1629 tuplesort_merge_order(long allowedMem)
1634 * We need one tape for each merge input, plus another one for the output,
1635 * and each of these tapes needs buffer space. In addition we want
1636 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
1639 * Note: you might be thinking we need to account for the memtuples[]
1640 * array in this calculation, but we effectively treat that as part of the
1641 * MERGE_BUFFER_SIZE workspace.
1643 mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
1644 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
1646 /* Even in minimum memory, use at least a MINORDER merge */
1647 mOrder = Max(mOrder, MINORDER);
1653 * inittapes - initialize for tape sorting.
1655 * This is called only if we have found we don't have room to sort in memory.
1658 inittapes(Tuplesortstate *state)
1665 /* Compute number of tapes to use: merge order plus 1 */
1666 maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
1669 * We must have at least 2*maxTapes slots in the memtuples[] array, else
1670 * we'd not have room for merge heap plus preread. It seems unlikely that
1671 * this case would ever occur, but be safe.
1673 maxTapes = Min(maxTapes, state->memtupsize / 2);
1675 state->maxTapes = maxTapes;
1676 state->tapeRange = maxTapes - 1;
1680 elog(LOG, "switching to external sort with %d tapes: %s",
1681 maxTapes, pg_rusage_show(&state->ru_start));
1685 * Decrease availMem to reflect the space needed for tape buffers; but
1686 * don't decrease it to the point that we have no room for tuples. (That
1687 * case is only likely to occur if sorting pass-by-value Datums; in all
1688 * other scenarios the memtuples[] array is unlikely to occupy more than
1689 * half of allowedMem. In the pass-by-value case it's not important to
1690 * account for tuple space, so we don't care if LACKMEM becomes
1693 tapeSpace = maxTapes * TAPE_BUFFER_OVERHEAD;
1694 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
1695 USEMEM(state, tapeSpace);
1698 * Make sure that the temp file(s) underlying the tape set are created in
1699 * suitable temp tablespaces.
1701 PrepareTempTablespaces();
1704 * Create the tape set and allocate the per-tape data arrays.
1706 state->tapeset = LogicalTapeSetCreate(maxTapes);
1708 state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
1709 state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
1710 state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
1711 state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
1712 state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long));
1713 state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
1714 state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
1715 state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
1716 state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
1719 * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
1720 * marked as belonging to run number zero.
1722 * NOTE: we pass false for checkIndex since there's no point in comparing
1723 * indexes in this step, even though we do intend the indexes to be part
1724 * of the sort key...
1726 ntuples = state->memtupcount;
1727 state->memtupcount = 0; /* make the heap empty */
1728 for (j = 0; j < ntuples; j++)
1730 /* Must copy source tuple to avoid possible overwrite */
1731 SortTuple stup = state->memtuples[j];
1733 tuplesort_heap_insert(state, &stup, 0, false);
1735 Assert(state->memtupcount == ntuples);
1737 state->currentRun = 0;
1740 * Initialize variables of Algorithm D (step D1).
1742 for (j = 0; j < maxTapes; j++)
1744 state->tp_fib[j] = 1;
1745 state->tp_runs[j] = 0;
1746 state->tp_dummy[j] = 1;
1747 state->tp_tapenum[j] = j;
1749 state->tp_fib[state->tapeRange] = 0;
1750 state->tp_dummy[state->tapeRange] = 0;
1753 state->destTape = 0;
1755 state->status = TSS_BUILDRUNS;
1759 * selectnewtape -- select new tape for new initial run.
1761 * This is called after finishing a run when we know another run
1762 * must be started. This implements steps D3, D4 of Algorithm D.
1765 selectnewtape(Tuplesortstate *state)
1770 /* Step D3: advance j (destTape) */
1771 if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
1776 if (state->tp_dummy[state->destTape] != 0)
1778 state->destTape = 0;
1782 /* Step D4: increase level */
1784 a = state->tp_fib[0];
1785 for (j = 0; j < state->tapeRange; j++)
1787 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
1788 state->tp_fib[j] = a + state->tp_fib[j + 1];
1790 state->destTape = 0;
1794 * mergeruns -- merge all the completed initial runs.
1796 * This implements steps D5, D6 of Algorithm D. All input data has
1797 * already been written to initial runs on tape (see dumptuples).
1800 mergeruns(Tuplesortstate *state)
1807 Assert(state->status == TSS_BUILDRUNS);
1808 Assert(state->memtupcount == 0);
1811 * If we produced only one initial run (quite likely if the total data
1812 * volume is between 1X and 2X workMem), we can just use that tape as the
1813 * finished output, rather than doing a useless merge. (This obvious
1814 * optimization is not in Knuth's algorithm.)
1816 if (state->currentRun == 1)
1818 state->result_tape = state->tp_tapenum[state->destTape];
1819 /* must freeze and rewind the finished output tape */
1820 LogicalTapeFreeze(state->tapeset, state->result_tape);
1821 state->status = TSS_SORTEDONTAPE;
1825 /* End of step D2: rewind all output tapes to prepare for merging */
1826 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1827 LogicalTapeRewind(state->tapeset, tapenum, false);
1832 * At this point we know that tape[T] is empty. If there's just one
1833 * (real or dummy) run left on each input tape, then only one merge
1834 * pass remains. If we don't have to produce a materialized sorted
1835 * tape, we can stop at this point and do the final merge on-the-fly.
1837 if (!state->randomAccess)
1839 bool allOneRun = true;
1841 Assert(state->tp_runs[state->tapeRange] == 0);
1842 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1844 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
1852 /* Tell logtape.c we won't be writing anymore */
1853 LogicalTapeSetForgetFreeSpace(state->tapeset);
1854 /* Initialize for the final merge pass */
1856 state->status = TSS_FINALMERGE;
1861 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
1862 while (state->tp_runs[state->tapeRange - 1] ||
1863 state->tp_dummy[state->tapeRange - 1])
1865 bool allDummy = true;
1867 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1869 if (state->tp_dummy[tapenum] == 0)
1878 state->tp_dummy[state->tapeRange]++;
1879 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1880 state->tp_dummy[tapenum]--;
1886 /* Step D6: decrease level */
1887 if (--state->Level == 0)
1889 /* rewind output tape T to use as new input */
1890 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
1892 /* rewind used-up input tape P, and prepare it for write pass */
1893 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
1895 state->tp_runs[state->tapeRange - 1] = 0;
1898 * reassign tape units per step D6; note we no longer care about A[]
1900 svTape = state->tp_tapenum[state->tapeRange];
1901 svDummy = state->tp_dummy[state->tapeRange];
1902 svRuns = state->tp_runs[state->tapeRange];
1903 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
1905 state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
1906 state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
1907 state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
1909 state->tp_tapenum[0] = svTape;
1910 state->tp_dummy[0] = svDummy;
1911 state->tp_runs[0] = svRuns;
1915 * Done. Knuth says that the result is on TAPE[1], but since we exited
1916 * the loop without performing the last iteration of step D6, we have not
1917 * rearranged the tape unit assignment, and therefore the result is on
1918 * TAPE[T]. We need to do it this way so that we can freeze the final
1919 * output tape while rewinding it. The last iteration of step D6 would be
1920 * a waste of cycles anyway...
1922 state->result_tape = state->tp_tapenum[state->tapeRange];
1923 LogicalTapeFreeze(state->tapeset, state->result_tape);
1924 state->status = TSS_SORTEDONTAPE;
1928 * Merge one run from each input tape, except ones with dummy runs.
1930 * This is the inner loop of Algorithm D step D5. We know that the
1931 * output tape is TAPE[T].
1934 mergeonerun(Tuplesortstate *state)
1936 int destTape = state->tp_tapenum[state->tapeRange];
1944 * Start the merge by loading one tuple from each active source tape into
1945 * the heap. We can also decrease the input run/dummy run counts.
1950 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
1951 * out, and replacing it with next tuple from same tape (if there is
1954 while (state->memtupcount > 0)
1956 /* write the tuple to destTape */
1957 priorAvail = state->availMem;
1958 srcTape = state->memtuples[0].tupindex;
1959 WRITETUP(state, destTape, &state->memtuples[0]);
1960 /* writetup adjusted total free space, now fix per-tape space */
1961 spaceFreed = state->availMem - priorAvail;
1962 state->mergeavailmem[srcTape] += spaceFreed;
1963 /* compact the heap */
1964 tuplesort_heap_siftup(state, false);
1965 if ((tupIndex = state->mergenext[srcTape]) == 0)
1967 /* out of preloaded data on this tape, try to read more */
1968 mergepreread(state);
1969 /* if still no data, we've reached end of run on this tape */
1970 if ((tupIndex = state->mergenext[srcTape]) == 0)
1973 /* pull next preread tuple from list, insert in heap */
1974 tup = &state->memtuples[tupIndex];
1975 state->mergenext[srcTape] = tup->tupindex;
1976 if (state->mergenext[srcTape] == 0)
1977 state->mergelast[srcTape] = 0;
1978 tuplesort_heap_insert(state, tup, srcTape, false);
1979 /* put the now-unused memtuples entry on the freelist */
1980 tup->tupindex = state->mergefreelist;
1981 state->mergefreelist = tupIndex;
1982 state->mergeavailslots[srcTape]++;
1986 * When the heap empties, we're done. Write an end-of-run marker on the
1987 * output tape, and increment its count of real runs.
1989 markrunend(state, destTape);
1990 state->tp_runs[state->tapeRange]++;
1994 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
1995 pg_rusage_show(&state->ru_start));
2000 * beginmerge - initialize for a merge pass
2002 * We decrease the counts of real and dummy runs for each tape, and mark
2003 * which tapes contain active input runs in mergeactive[]. Then, load
2004 * as many tuples as we can from each active input tape, and finally
2005 * fill the merge heap with the first tuple from each active tape.
2008 beginmerge(Tuplesortstate *state)
2016 /* Heap should be empty here */
2017 Assert(state->memtupcount == 0);
2019 /* Adjust run counts and mark the active tapes */
2020 memset(state->mergeactive, 0,
2021 state->maxTapes * sizeof(*state->mergeactive));
2023 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2025 if (state->tp_dummy[tapenum] > 0)
2026 state->tp_dummy[tapenum]--;
2029 Assert(state->tp_runs[tapenum] > 0);
2030 state->tp_runs[tapenum]--;
2031 srcTape = state->tp_tapenum[tapenum];
2032 state->mergeactive[srcTape] = true;
2036 state->activeTapes = activeTapes;
2038 /* Clear merge-pass state variables */
2039 memset(state->mergenext, 0,
2040 state->maxTapes * sizeof(*state->mergenext));
2041 memset(state->mergelast, 0,
2042 state->maxTapes * sizeof(*state->mergelast));
2043 state->mergefreelist = 0; /* nothing in the freelist */
2044 state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
2047 * Initialize space allocation to let each active input tape have an equal
2048 * share of preread space.
2050 Assert(activeTapes > 0);
2051 slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2052 Assert(slotsPerTape > 0);
2053 spacePerTape = state->availMem / activeTapes;
2054 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2056 if (state->mergeactive[srcTape])
2058 state->mergeavailslots[srcTape] = slotsPerTape;
2059 state->mergeavailmem[srcTape] = spacePerTape;
2064 * Preread as many tuples as possible (and at least one) from each active
2067 mergepreread(state);
2069 /* Load the merge heap with the first tuple from each input tape */
2070 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2072 int tupIndex = state->mergenext[srcTape];
2077 tup = &state->memtuples[tupIndex];
2078 state->mergenext[srcTape] = tup->tupindex;
2079 if (state->mergenext[srcTape] == 0)
2080 state->mergelast[srcTape] = 0;
2081 tuplesort_heap_insert(state, tup, srcTape, false);
2082 /* put the now-unused memtuples entry on the freelist */
2083 tup->tupindex = state->mergefreelist;
2084 state->mergefreelist = tupIndex;
2085 state->mergeavailslots[srcTape]++;
2091 * mergepreread - load tuples from merge input tapes
2093 * This routine exists to improve sequentiality of reads during a merge pass,
2094 * as explained in the header comments of this file. Load tuples from each
2095 * active source tape until the tape's run is exhausted or it has used up
2096 * its fair share of available memory. In any case, we guarantee that there
2097 * is at least one preread tuple available from each unexhausted input tape.
2099 * We invoke this routine at the start of a merge pass for initial load,
2100 * and then whenever any tape's preread data runs out. Note that we load
2101 * as much data as possible from all tapes, not just the one that ran out.
2102 * This is because logtape.c works best with a usage pattern that alternates
2103 * between reading a lot of data and writing a lot of data, so whenever we
2104 * are forced to read, we should fill working memory completely.
2106 * In FINALMERGE state, we *don't* use this routine, but instead just preread
2107 * from the single tape that ran dry. There's no read/write alternation in
2108 * that state and so no point in scanning through all the tapes to fix one.
2109 * (Moreover, there may be quite a lot of inactive tapes in that state, since
2110 * we might have had many fewer runs than tapes. In a regular tape-to-tape
2111 * merge we can expect most of the tapes to be active.)
2114 mergepreread(Tuplesortstate *state)
2118 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2119 mergeprereadone(state, srcTape);
2123 * mergeprereadone - load tuples from one merge input tape
2125 * Read tuples from the specified tape until it has used up its free memory
2126 * or array slots; but ensure that we have at least one tuple, if any are
2130 mergeprereadone(Tuplesortstate *state, int srcTape)
2132 unsigned int tuplen;
2138 if (!state->mergeactive[srcTape])
2139 return; /* tape's run is already exhausted */
2140 priorAvail = state->availMem;
2141 state->availMem = state->mergeavailmem[srcTape];
2142 while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) ||
2143 state->mergenext[srcTape] == 0)
2145 /* read next tuple, if any */
2146 if ((tuplen = getlen(state, srcTape, true)) == 0)
2148 state->mergeactive[srcTape] = false;
2151 READTUP(state, &stup, srcTape, tuplen);
2152 /* find a free slot in memtuples[] for it */
2153 tupIndex = state->mergefreelist;
2155 state->mergefreelist = state->memtuples[tupIndex].tupindex;
2158 tupIndex = state->mergefirstfree++;
2159 Assert(tupIndex < state->memtupsize);
2161 state->mergeavailslots[srcTape]--;
2162 /* store tuple, append to list for its tape */
2164 state->memtuples[tupIndex] = stup;
2165 if (state->mergelast[srcTape])
2166 state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
2168 state->mergenext[srcTape] = tupIndex;
2169 state->mergelast[srcTape] = tupIndex;
2171 /* update per-tape and global availmem counts */
2172 spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
2173 state->mergeavailmem[srcTape] = state->availMem;
2174 state->availMem = priorAvail - spaceUsed;
2178 * dumptuples - remove tuples from heap and write to tape
2180 * This is used during initial-run building, but not during merging.
2182 * When alltuples = false, dump only enough tuples to get under the
2183 * availMem limit (and leave at least one tuple in the heap in any case,
2184 * since puttuple assumes it always has a tuple to compare to). We also
2185 * insist there be at least one free slot in the memtuples[] array.
2187 * When alltuples = true, dump everything currently in memory.
2188 * (This case is only used at end of input data.)
2190 * If we empty the heap, close out the current run and return (this should
2191 * only happen at end of input data). If we see that the tuple run number
2192 * at the top of the heap has changed, start a new run.
2195 dumptuples(Tuplesortstate *state, bool alltuples)
2198 (LACKMEM(state) && state->memtupcount > 1) ||
2199 state->memtupcount >= state->memtupsize)
2202 * Dump the heap's frontmost entry, and sift up to remove it from the
2205 Assert(state->memtupcount > 0);
2206 WRITETUP(state, state->tp_tapenum[state->destTape],
2207 &state->memtuples[0]);
2208 tuplesort_heap_siftup(state, true);
2211 * If the heap is empty *or* top run number has changed, we've
2212 * finished the current run.
2214 if (state->memtupcount == 0 ||
2215 state->currentRun != state->memtuples[0].tupindex)
2217 markrunend(state, state->tp_tapenum[state->destTape]);
2218 state->currentRun++;
2219 state->tp_runs[state->destTape]++;
2220 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2224 elog(LOG, "finished writing%s run %d to tape %d: %s",
2225 (state->memtupcount == 0) ? " final" : "",
2226 state->currentRun, state->destTape,
2227 pg_rusage_show(&state->ru_start));
2231 * Done if heap is empty, else prepare for new run.
2233 if (state->memtupcount == 0)
2235 Assert(state->currentRun == state->memtuples[0].tupindex);
2236 selectnewtape(state);
2242 * tuplesort_rescan - rewind and replay the scan
2245 tuplesort_rescan(Tuplesortstate *state)
2247 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2249 Assert(state->randomAccess);
2251 switch (state->status)
2253 case TSS_SORTEDINMEM:
2255 state->eof_reached = false;
2256 state->markpos_offset = 0;
2257 state->markpos_eof = false;
2259 case TSS_SORTEDONTAPE:
2260 LogicalTapeRewind(state->tapeset,
2263 state->eof_reached = false;
2264 state->markpos_block = 0L;
2265 state->markpos_offset = 0;
2266 state->markpos_eof = false;
2269 elog(ERROR, "invalid tuplesort state");
2273 MemoryContextSwitchTo(oldcontext);
2277 * tuplesort_markpos - saves current position in the merged sort file
2280 tuplesort_markpos(Tuplesortstate *state)
2282 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2284 Assert(state->randomAccess);
2286 switch (state->status)
2288 case TSS_SORTEDINMEM:
2289 state->markpos_offset = state->current;
2290 state->markpos_eof = state->eof_reached;
2292 case TSS_SORTEDONTAPE:
2293 LogicalTapeTell(state->tapeset,
2295 &state->markpos_block,
2296 &state->markpos_offset);
2297 state->markpos_eof = state->eof_reached;
2300 elog(ERROR, "invalid tuplesort state");
2304 MemoryContextSwitchTo(oldcontext);
2308 * tuplesort_restorepos - restores current position in merged sort file to
2309 * last saved position
2312 tuplesort_restorepos(Tuplesortstate *state)
2314 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2316 Assert(state->randomAccess);
2318 switch (state->status)
2320 case TSS_SORTEDINMEM:
2321 state->current = state->markpos_offset;
2322 state->eof_reached = state->markpos_eof;
2324 case TSS_SORTEDONTAPE:
2325 if (!LogicalTapeSeek(state->tapeset,
2327 state->markpos_block,
2328 state->markpos_offset))
2329 elog(ERROR, "tuplesort_restorepos failed");
2330 state->eof_reached = state->markpos_eof;
2333 elog(ERROR, "invalid tuplesort state");
2337 MemoryContextSwitchTo(oldcontext);
2341 * tuplesort_get_stats - extract summary statistics
2343 * This can be called after tuplesort_performsort() finishes to obtain
2344 * printable summary information about how the sort was performed.
2345 * spaceUsed is measured in kilobytes.
2348 tuplesort_get_stats(Tuplesortstate *state,
2349 const char **sortMethod,
2350 const char **spaceType,
2354 * Note: it might seem we should provide both memory and disk usage for a
2355 * disk-based sort. However, the current code doesn't track memory space
2356 * accurately once we have begun to return tuples to the caller (since we
2357 * don't account for pfree's the caller is expected to do), so we cannot
2358 * rely on availMem in a disk sort. This does not seem worth the overhead
2359 * to fix. Is it worth creating an API for the memory context code to
2360 * tell us how much is actually used in sortcontext?
2364 *spaceType = "Disk";
2365 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
2369 *spaceType = "Memory";
2370 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
2373 switch (state->status)
2375 case TSS_SORTEDINMEM:
2376 if (state->boundUsed)
2377 *sortMethod = "top-N heapsort";
2379 *sortMethod = "quicksort";
2381 case TSS_SORTEDONTAPE:
2382 *sortMethod = "external sort";
2384 case TSS_FINALMERGE:
2385 *sortMethod = "external merge";
2388 *sortMethod = "still in progress";
2395 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2397 * Compare two SortTuples. If checkIndex is true, use the tuple index
2398 * as the front of the sort key; otherwise, no.
2401 #define HEAPCOMPARE(tup1,tup2) \
2402 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2403 ((tup1)->tupindex) - ((tup2)->tupindex) : \
2404 COMPARETUP(state, tup1, tup2))
2407 * Convert the existing unordered array of SortTuples to a bounded heap,
2408 * discarding all but the smallest "state->bound" tuples.
2410 * When working with a bounded heap, we want to keep the largest entry
2411 * at the root (array entry zero), instead of the smallest as in the normal
2412 * sort case. This allows us to discard the largest entry cheaply.
2413 * Therefore, we temporarily reverse the sort direction.
2415 * We assume that all entries in a bounded heap will always have tupindex
2416 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2417 * the direction of comparison for tupindexes.
2420 make_bounded_heap(Tuplesortstate *state)
2422 int tupcount = state->memtupcount;
2425 Assert(state->status == TSS_INITIAL);
2426 Assert(state->bounded);
2427 Assert(tupcount >= state->bound);
2429 /* Reverse sort direction so largest entry will be at root */
2430 REVERSEDIRECTION(state);
2432 state->memtupcount = 0; /* make the heap empty */
2433 for (i = 0; i < tupcount; i++)
2435 if (state->memtupcount >= state->bound &&
2436 COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2438 /* New tuple would just get thrown out, so skip it */
2439 free_sort_tuple(state, &state->memtuples[i]);
2440 CHECK_FOR_INTERRUPTS();
2444 /* Insert next tuple into heap */
2445 /* Must copy source tuple to avoid possible overwrite */
2446 SortTuple stup = state->memtuples[i];
2448 tuplesort_heap_insert(state, &stup, 0, false);
2450 /* If heap too full, discard largest entry */
2451 if (state->memtupcount > state->bound)
2453 free_sort_tuple(state, &state->memtuples[0]);
2454 tuplesort_heap_siftup(state, false);
2459 Assert(state->memtupcount == state->bound);
2460 state->status = TSS_BOUNDED;
2464 * Convert the bounded heap to a properly-sorted array
2467 sort_bounded_heap(Tuplesortstate *state)
2469 int tupcount = state->memtupcount;
2471 Assert(state->status == TSS_BOUNDED);
2472 Assert(state->bounded);
2473 Assert(tupcount == state->bound);
2476 * We can unheapify in place because each sift-up will remove the largest
2477 * entry, which we can promptly store in the newly freed slot at the end.
2478 * Once we're down to a single-entry heap, we're done.
2480 while (state->memtupcount > 1)
2482 SortTuple stup = state->memtuples[0];
2484 /* this sifts-up the next-largest entry and decreases memtupcount */
2485 tuplesort_heap_siftup(state, false);
2486 state->memtuples[state->memtupcount] = stup;
2488 state->memtupcount = tupcount;
2491 * Reverse sort direction back to the original state. This is not
2492 * actually necessary but seems like a good idea for tidiness.
2494 REVERSEDIRECTION(state);
2496 state->status = TSS_SORTEDINMEM;
2497 state->boundUsed = true;
2501 * Insert a new tuple into an empty or existing heap, maintaining the
2502 * heap invariant. Caller is responsible for ensuring there's room.
2504 * Note: we assume *tuple is a temporary variable that can be scribbled on.
2505 * For some callers, tuple actually points to a memtuples[] entry above the
2506 * end of the heap. This is safe as long as it's not immediately adjacent
2507 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2508 * is, it might get overwritten before being moved into the heap!
2511 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
2512 int tupleindex, bool checkIndex)
2514 SortTuple *memtuples;
2518 * Save the tupleindex --- see notes above about writing on *tuple. It's a
2519 * historical artifact that tupleindex is passed as a separate argument
2520 * and not in *tuple, but it's notationally convenient so let's leave it
2523 tuple->tupindex = tupleindex;
2525 memtuples = state->memtuples;
2526 Assert(state->memtupcount < state->memtupsize);
2528 CHECK_FOR_INTERRUPTS();
2531 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2532 * using 1-based array indexes, not 0-based.
2534 j = state->memtupcount++;
2537 int i = (j - 1) >> 1;
2539 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
2541 memtuples[j] = memtuples[i];
2544 memtuples[j] = *tuple;
2548 * The tuple at state->memtuples[0] has been removed from the heap.
2549 * Decrement memtupcount, and sift up to maintain the heap invariant.
2552 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
2554 SortTuple *memtuples = state->memtuples;
2559 if (--state->memtupcount <= 0)
2562 CHECK_FOR_INTERRUPTS();
2564 n = state->memtupcount;
2565 tuple = &memtuples[n]; /* tuple that must be reinserted */
2566 i = 0; /* i is where the "hole" is */
2574 HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
2576 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
2578 memtuples[i] = memtuples[j];
2581 memtuples[i] = *tuple;
2586 * Tape interface routines
2590 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
2594 if (LogicalTapeRead(state->tapeset, tapenum,
2595 &len, sizeof(len)) != sizeof(len))
2596 elog(ERROR, "unexpected end of tape");
2597 if (len == 0 && !eofOK)
2598 elog(ERROR, "unexpected end of data");
2603 markrunend(Tuplesortstate *state, int tapenum)
2605 unsigned int len = 0;
2607 LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
2612 * Inline-able copy of FunctionCall2Coll() to save some cycles in sorting.
2615 myFunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
2617 FunctionCallInfoData fcinfo;
2620 InitFunctionCallInfoData(fcinfo, flinfo, 2, collation, NULL, NULL);
2622 fcinfo.arg[0] = arg1;
2623 fcinfo.arg[1] = arg2;
2624 fcinfo.argnull[0] = false;
2625 fcinfo.argnull[1] = false;
2627 result = FunctionCallInvoke(&fcinfo);
2629 /* Check for null result, since caller is clearly not expecting one */
2631 elog(ERROR, "function %u returned NULL", fcinfo.flinfo->fn_oid);
2637 * Apply a sort function (by now converted to fmgr lookup form)
2638 * and return a 3-way comparison result. This takes care of handling
2639 * reverse-sort and NULLs-ordering properly. We assume that DESC and
2640 * NULLS_FIRST options are encoded in sk_flags the same way btree does it.
2643 inlineApplySortFunction(FmgrInfo *sortFunction, int sk_flags, Oid collation,
2644 Datum datum1, bool isNull1,
2645 Datum datum2, bool isNull2)
2652 compare = 0; /* NULL "=" NULL */
2653 else if (sk_flags & SK_BT_NULLS_FIRST)
2654 compare = -1; /* NULL "<" NOT_NULL */
2656 compare = 1; /* NULL ">" NOT_NULL */
2660 if (sk_flags & SK_BT_NULLS_FIRST)
2661 compare = 1; /* NOT_NULL ">" NULL */
2663 compare = -1; /* NOT_NULL "<" NULL */
2667 compare = DatumGetInt32(myFunctionCall2Coll(sortFunction, collation,
2670 if (sk_flags & SK_BT_DESC)
2679 * Routines specialized for HeapTuple (actually MinimalTuple) case
2683 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
2685 SortSupport sortKey = state->sortKeys;
2692 /* Compare the leading sort key */
2693 compare = ApplySortComparator(a->datum1, a->isnull1,
2694 b->datum1, b->isnull1,
2699 /* Compare additional sort keys */
2700 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
2701 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
2702 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
2703 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
2704 tupDesc = state->tupDesc;
2706 for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
2708 AttrNumber attno = sortKey->ssup_attno;
2714 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
2715 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
2717 compare = ApplySortComparator(datum1, isnull1,
2728 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
2731 * We expect the passed "tup" to be a TupleTableSlot, and form a
2732 * MinimalTuple using the exported interface for that.
2734 TupleTableSlot *slot = (TupleTableSlot *) tup;
2738 /* copy the tuple into sort storage */
2739 tuple = ExecCopySlotMinimalTuple(slot);
2740 stup->tuple = (void *) tuple;
2741 USEMEM(state, GetMemoryChunkSpace(tuple));
2742 /* set up first-column key value */
2743 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
2744 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
2745 stup->datum1 = heap_getattr(&htup,
2746 state->sortKeys[0].ssup_attno,
2752 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
2754 MinimalTuple tuple = (MinimalTuple) stup->tuple;
2756 /* the part of the MinimalTuple we'll write: */
2757 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
2758 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
2760 /* total on-disk footprint: */
2761 unsigned int tuplen = tupbodylen + sizeof(int);
2763 LogicalTapeWrite(state->tapeset, tapenum,
2764 (void *) &tuplen, sizeof(tuplen));
2765 LogicalTapeWrite(state->tapeset, tapenum,
2766 (void *) tupbody, tupbodylen);
2767 if (state->randomAccess) /* need trailing length word? */
2768 LogicalTapeWrite(state->tapeset, tapenum,
2769 (void *) &tuplen, sizeof(tuplen));
2771 FREEMEM(state, GetMemoryChunkSpace(tuple));
2772 heap_free_minimal_tuple(tuple);
2776 readtup_heap(Tuplesortstate *state, SortTuple *stup,
2777 int tapenum, unsigned int len)
2779 unsigned int tupbodylen = len - sizeof(int);
2780 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
2781 MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
2782 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
2785 USEMEM(state, GetMemoryChunkSpace(tuple));
2786 /* read in the tuple proper */
2787 tuple->t_len = tuplen;
2788 LogicalTapeReadExact(state->tapeset, tapenum,
2789 tupbody, tupbodylen);
2790 if (state->randomAccess) /* need trailing length word? */
2791 LogicalTapeReadExact(state->tapeset, tapenum,
2792 &tuplen, sizeof(tuplen));
2793 stup->tuple = (void *) tuple;
2794 /* set up first-column key value */
2795 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
2796 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
2797 stup->datum1 = heap_getattr(&htup,
2798 state->sortKeys[0].ssup_attno,
2804 reversedirection_heap(Tuplesortstate *state)
2806 SortSupport sortKey = state->sortKeys;
2809 for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
2811 sortKey->ssup_reverse = !sortKey->ssup_reverse;
2812 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
2818 * Routines specialized for the CLUSTER case (HeapTuple data, with
2819 * comparisons per a btree index definition)
2823 comparetup_cluster(const SortTuple *a, const SortTuple *b,
2824 Tuplesortstate *state)
2826 ScanKey scanKey = state->indexScanKey;
2833 /* Compare the leading sort key, if it's simple */
2834 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2836 compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
2837 scanKey->sk_collation,
2838 a->datum1, a->isnull1,
2839 b->datum1, b->isnull1);
2840 if (compare != 0 || state->nKeys == 1)
2842 /* Compare additional columns the hard way */
2848 /* Must compare all keys the hard way */
2852 /* Compare additional sort keys */
2853 ltup = (HeapTuple) a->tuple;
2854 rtup = (HeapTuple) b->tuple;
2856 if (state->indexInfo->ii_Expressions == NULL)
2858 /* If not expression index, just compare the proper heap attrs */
2859 tupDesc = state->tupDesc;
2861 for (; nkey < state->nKeys; nkey++, scanKey++)
2863 AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
2869 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
2870 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
2872 compare = inlineApplySortFunction(&scanKey->sk_func,
2874 scanKey->sk_collation,
2884 * In the expression index case, compute the whole index tuple and
2885 * then compare values. It would perhaps be faster to compute only as
2886 * many columns as we need to compare, but that would require
2887 * duplicating all the logic in FormIndexDatum.
2889 Datum l_index_values[INDEX_MAX_KEYS];
2890 bool l_index_isnull[INDEX_MAX_KEYS];
2891 Datum r_index_values[INDEX_MAX_KEYS];
2892 bool r_index_isnull[INDEX_MAX_KEYS];
2893 TupleTableSlot *ecxt_scantuple;
2895 /* Reset context each time to prevent memory leakage */
2896 ResetPerTupleExprContext(state->estate);
2898 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
2900 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
2901 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
2902 l_index_values, l_index_isnull);
2904 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
2905 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
2906 r_index_values, r_index_isnull);
2908 for (; nkey < state->nKeys; nkey++, scanKey++)
2910 compare = inlineApplySortFunction(&scanKey->sk_func,
2912 scanKey->sk_collation,
2913 l_index_values[nkey],
2914 l_index_isnull[nkey],
2915 r_index_values[nkey],
2916 r_index_isnull[nkey]);
2926 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
2928 HeapTuple tuple = (HeapTuple) tup;
2930 /* copy the tuple into sort storage */
2931 tuple = heap_copytuple(tuple);
2932 stup->tuple = (void *) tuple;
2933 USEMEM(state, GetMemoryChunkSpace(tuple));
2934 /* set up first-column key value, if it's a simple column */
2935 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2936 stup->datum1 = heap_getattr(tuple,
2937 state->indexInfo->ii_KeyAttrNumbers[0],
2943 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
2945 HeapTuple tuple = (HeapTuple) stup->tuple;
2946 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
2948 /* We need to store t_self, but not other fields of HeapTupleData */
2949 LogicalTapeWrite(state->tapeset, tapenum,
2950 &tuplen, sizeof(tuplen));
2951 LogicalTapeWrite(state->tapeset, tapenum,
2952 &tuple->t_self, sizeof(ItemPointerData));
2953 LogicalTapeWrite(state->tapeset, tapenum,
2954 tuple->t_data, tuple->t_len);
2955 if (state->randomAccess) /* need trailing length word? */
2956 LogicalTapeWrite(state->tapeset, tapenum,
2957 &tuplen, sizeof(tuplen));
2959 FREEMEM(state, GetMemoryChunkSpace(tuple));
2960 heap_freetuple(tuple);
2964 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
2965 int tapenum, unsigned int tuplen)
2967 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
2968 HeapTuple tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE);
2970 USEMEM(state, GetMemoryChunkSpace(tuple));
2971 /* Reconstruct the HeapTupleData header */
2972 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
2973 tuple->t_len = t_len;
2974 LogicalTapeReadExact(state->tapeset, tapenum,
2975 &tuple->t_self, sizeof(ItemPointerData));
2976 /* We don't currently bother to reconstruct t_tableOid */
2977 tuple->t_tableOid = InvalidOid;
2978 /* Read in the tuple body */
2979 LogicalTapeReadExact(state->tapeset, tapenum,
2980 tuple->t_data, tuple->t_len);
2981 if (state->randomAccess) /* need trailing length word? */
2982 LogicalTapeReadExact(state->tapeset, tapenum,
2983 &tuplen, sizeof(tuplen));
2984 stup->tuple = (void *) tuple;
2985 /* set up first-column key value, if it's a simple column */
2986 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2987 stup->datum1 = heap_getattr(tuple,
2988 state->indexInfo->ii_KeyAttrNumbers[0],
2995 * Routines specialized for IndexTuple case
2997 * The btree and hash cases require separate comparison functions, but the
2998 * IndexTuple representation is the same so the copy/write/read support
2999 * functions can be shared.
3003 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
3004 Tuplesortstate *state)
3007 * This is similar to _bt_tuplecompare(), but we have already done the
3008 * index_getattr calls for the first column, and we need to keep track of
3009 * whether any null fields are present. Also see the special treatment
3010 * for equal keys at the end.
3012 ScanKey scanKey = state->indexScanKey;
3017 bool equal_hasnull = false;
3021 /* Compare the leading sort key */
3022 compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
3023 scanKey->sk_collation,
3024 a->datum1, a->isnull1,
3025 b->datum1, b->isnull1);
3029 /* they are equal, so we only need to examine one null flag */
3031 equal_hasnull = true;
3033 /* Compare additional sort keys */
3034 tuple1 = (IndexTuple) a->tuple;
3035 tuple2 = (IndexTuple) b->tuple;
3036 keysz = state->nKeys;
3037 tupDes = RelationGetDescr(state->indexRel);
3039 for (nkey = 2; nkey <= keysz; nkey++, scanKey++)
3046 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
3047 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
3049 compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
3050 scanKey->sk_collation,
3054 return compare; /* done when we find unequal attributes */
3056 /* they are equal, so we only need to examine one null flag */
3058 equal_hasnull = true;
3062 * If btree has asked us to enforce uniqueness, complain if two equal
3063 * tuples are detected (unless there was at least one NULL field).
3065 * It is sufficient to make the test here, because if two tuples are equal
3066 * they *must* get compared at some stage of the sort --- otherwise the
3067 * sort algorithm wouldn't have checked whether one must appear before the
3070 if (state->enforceUnique && !equal_hasnull)
3072 Datum values[INDEX_MAX_KEYS];
3073 bool isnull[INDEX_MAX_KEYS];
3076 * Some rather brain-dead implementations of qsort (such as the one in QNX 4)
3077 * will sometimes call the comparison routine to compare a value to itself,
3078 * but we always use our own implementation, which does not.
3080 Assert(tuple1 != tuple2);
3082 index_deform_tuple(tuple1, tupDes, values, isnull);
3084 (errcode(ERRCODE_UNIQUE_VIOLATION),
3085 errmsg("could not create unique index \"%s\"",
3086 RelationGetRelationName(state->indexRel)),
3087 errdetail("Key %s is duplicated.",
3088 BuildIndexValueDescription(state->indexRel,
3093 * If key values are equal, we sort on ItemPointer. This does not affect
3094 * validity of the finished index, but it may be useful to have index scans
3095 * in physical order.
3098 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3099 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3102 return (blk1 < blk2) ? -1 : 1;
3105 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3106 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3109 return (pos1 < pos2) ? -1 : 1;
3116 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
3117 Tuplesortstate *state)
3125 * Fetch hash keys and mask off bits we don't want to sort by. We know
3126 * that the first column of the index tuple is the hash key.
3128 Assert(!a->isnull1);
3129 hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
3130 Assert(!b->isnull1);
3131 hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
3135 else if (hash1 < hash2)
3139 * If hash values are equal, we sort on ItemPointer. This does not affect
3140 * validity of the finished index, but it may be useful to have index scans
3141 * in physical order.
3143 tuple1 = (IndexTuple) a->tuple;
3144 tuple2 = (IndexTuple) b->tuple;
3147 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3148 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3151 return (blk1 < blk2) ? -1 : 1;
3154 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3155 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3158 return (pos1 < pos2) ? -1 : 1;
3165 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
3167 IndexTuple tuple = (IndexTuple) tup;
3168 unsigned int tuplen = IndexTupleSize(tuple);
3169 IndexTuple newtuple;
3171 /* copy the tuple into sort storage */
3172 newtuple = (IndexTuple) palloc(tuplen);
3173 memcpy(newtuple, tuple, tuplen);
3174 USEMEM(state, GetMemoryChunkSpace(newtuple));
3175 stup->tuple = (void *) newtuple;
3176 /* set up first-column key value */
3177 stup->datum1 = index_getattr(newtuple,
3179 RelationGetDescr(state->indexRel),
3184 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
3186 IndexTuple tuple = (IndexTuple) stup->tuple;
3187 unsigned int tuplen;
3189 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
3190 LogicalTapeWrite(state->tapeset, tapenum,
3191 (void *) &tuplen, sizeof(tuplen));
3192 LogicalTapeWrite(state->tapeset, tapenum,
3193 (void *) tuple, IndexTupleSize(tuple));
3194 if (state->randomAccess) /* need trailing length word? */
3195 LogicalTapeWrite(state->tapeset, tapenum,
3196 (void *) &tuplen, sizeof(tuplen));
3198 FREEMEM(state, GetMemoryChunkSpace(tuple));
3203 readtup_index(Tuplesortstate *state, SortTuple *stup,
3204 int tapenum, unsigned int len)
3206 unsigned int tuplen = len - sizeof(unsigned int);
3207 IndexTuple tuple = (IndexTuple) palloc(tuplen);
3209 USEMEM(state, GetMemoryChunkSpace(tuple));
3210 LogicalTapeReadExact(state->tapeset, tapenum,
3212 if (state->randomAccess) /* need trailing length word? */
3213 LogicalTapeReadExact(state->tapeset, tapenum,
3214 &tuplen, sizeof(tuplen));
3215 stup->tuple = (void *) tuple;
3216 /* set up first-column key value */
3217 stup->datum1 = index_getattr(tuple,
3219 RelationGetDescr(state->indexRel),
3224 reversedirection_index_btree(Tuplesortstate *state)
3226 ScanKey scanKey = state->indexScanKey;
3229 for (nkey = 0; nkey < state->nKeys; nkey++, scanKey++)
3231 scanKey->sk_flags ^= (SK_BT_DESC | SK_BT_NULLS_FIRST);
3236 reversedirection_index_hash(Tuplesortstate *state)
3238 /* We don't support reversing direction in a hash index sort */
3239 elog(ERROR, "reversedirection_index_hash is not implemented");
3244 * Routines specialized for DatumTuple case
3248 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3250 return ApplySortComparator(a->datum1, a->isnull1,
3251 b->datum1, b->isnull1,
3256 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
3258 /* Not currently needed */
3259 elog(ERROR, "copytup_datum() should not be called");
3263 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
3266 unsigned int tuplen;
3267 unsigned int writtenlen;
3274 else if (state->datumTypeByVal)
3276 waddr = &stup->datum1;
3277 tuplen = sizeof(Datum);
3281 waddr = DatumGetPointer(stup->datum1);
3282 tuplen = datumGetSize(stup->datum1, false, state->datumTypeLen);
3283 Assert(tuplen != 0);
3286 writtenlen = tuplen + sizeof(unsigned int);
3288 LogicalTapeWrite(state->tapeset, tapenum,
3289 (void *) &writtenlen, sizeof(writtenlen));
3290 LogicalTapeWrite(state->tapeset, tapenum,
3292 if (state->randomAccess) /* need trailing length word? */
3293 LogicalTapeWrite(state->tapeset, tapenum,
3294 (void *) &writtenlen, sizeof(writtenlen));
3298 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3304 readtup_datum(Tuplesortstate *state, SortTuple *stup,
3305 int tapenum, unsigned int len)
3307 unsigned int tuplen = len - sizeof(unsigned int);
3312 stup->datum1 = (Datum) 0;
3313 stup->isnull1 = true;
3316 else if (state->datumTypeByVal)
3318 Assert(tuplen == sizeof(Datum));
3319 LogicalTapeReadExact(state->tapeset, tapenum,
3320 &stup->datum1, tuplen);
3321 stup->isnull1 = false;
3326 void *raddr = palloc(tuplen);
3328 LogicalTapeReadExact(state->tapeset, tapenum,
3330 stup->datum1 = PointerGetDatum(raddr);
3331 stup->isnull1 = false;
3332 stup->tuple = raddr;
3333 USEMEM(state, GetMemoryChunkSpace(raddr));
3336 if (state->randomAccess) /* need trailing length word? */
3337 LogicalTapeReadExact(state->tapeset, tapenum,
3338 &tuplen, sizeof(tuplen));
3342 reversedirection_datum(Tuplesortstate *state)
3344 state->onlyKey->ssup_reverse = !state->onlyKey->ssup_reverse;
3345 state->onlyKey->ssup_nulls_first = !state->onlyKey->ssup_nulls_first;
3349 * Convenience routine to free a tuple previously loaded into sort memory
3352 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
3354 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));