]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Fix broken comparetup_datum code.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  We divide the input into sorted runs using replacement
15  * selection, in the form of a priority tree implemented as a heap
16  * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17  * merge, Knuth's Algorithm 5.4.2D.  The logical "tapes" used by Algorithm D
18  * are implemented by logtape.c, which avoids space wastage by recycling
19  * disk space as soon as each block is read from its "tape".
20  *
21  * We do not form the initial runs using Knuth's recommended replacement
22  * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23  * number of records in memory at all times.  Since we are dealing with
24  * tuples that may vary considerably in size, we want to be able to vary
25  * the number of records kept in memory to ensure full utilization of the
26  * allowed sort memory space.  So, we keep the tuples in a variable-size
27  * heap, with the next record to go out at the top of the heap.  Like
28  * Algorithm 5.4.1R, each record is stored with the run number that it
29  * must go into, and we use (run number, key) as the ordering key for the
30  * heap.  When the run number at the top of the heap changes, we know that
31  * no more records of the prior run are left in the heap.
32  *
33  * The approximate amount of memory allowed for any one sort operation
34  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
35  * we absorb tuples and simply store them in an unsorted array as long as
36  * we haven't exceeded workMem.  If we reach the end of the input without
37  * exceeding workMem, we sort the array using qsort() and subsequently return
38  * tuples just by scanning the tuple array sequentially.  If we do exceed
39  * workMem, we construct a heap using Algorithm H and begin to emit tuples
40  * into sorted runs in temporary tapes, emitting just enough tuples at each
41  * step to get back within the workMem limit.  Whenever the run number at
42  * the top of the heap changes, we begin a new run with a new output tape
43  * (selected per Algorithm D).  After the end of the input is reached,
44  * we dump out remaining tuples in memory into a final run (or two),
45  * then merge the runs using Algorithm D.
46  *
47  * When merging runs, we use a heap containing just the frontmost tuple from
48  * each source run; we repeatedly output the smallest tuple and insert the
49  * next tuple from its source tape (if any).  When the heap empties, the merge
50  * is complete.  The basic merge algorithm thus needs very little memory ---
51  * only M tuples for an M-way merge, and M is constrained to a small number.
52  * However, we can still make good use of our full workMem allocation by
53  * pre-reading additional tuples from each source tape.  Without prereading,
54  * our access pattern to the temporary file would be very erratic; on average
55  * we'd read one block from each of M source tapes during the same time that
56  * we're writing M blocks to the output tape, so there is no sequentiality of
57  * access at all, defeating the read-ahead methods used by most Unix kernels.
58  * Worse, the output tape gets written into a very random sequence of blocks
59  * of the temp file, ensuring that things will be even worse when it comes
60  * time to read that tape.      A straightforward merge pass thus ends up doing a
61  * lot of waiting for disk seeks.  We can improve matters by prereading from
62  * each source tape sequentially, loading about workMem/M bytes from each tape
63  * in turn.  Then we run the merge algorithm, writing but not reading until
64  * one of the preloaded tuple series runs out.  Then we switch back to preread
65  * mode, fill memory again, and repeat.  This approach helps to localize both
66  * read and write accesses.
67  *
68  * When the caller requests random access to the sort result, we form
69  * the final sorted run on a logical tape which is then "frozen", so
70  * that we can access it randomly.      When the caller does not need random
71  * access, we return from tuplesort_performsort() as soon as we are down
72  * to one run per logical tape.  The final merge is then performed
73  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74  * saves one cycle of writing all the data out to disk and reading it in.
75  *
76  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
79  * tape drives are expensive beasts, and in particular that there will always
80  * be many more runs than tape drives.  In our implementation a "tape drive"
81  * doesn't cost much more than a few Kb of memory buffers, so we can afford
82  * to have lots of them.  In particular, if we can have as many tape drives
83  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
84  * code we determine the number of tapes M on the basis of workMem: we want
85  * workMem/M to be large enough that we read a fair amount of data each time
86  * we preread from a tape, so as to maintain the locality of access described
87  * above.  Nonetheless, with large workMem we can have many tapes.
88  *
89  *
90  * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
91  * Portions Copyright (c) 1994, Regents of the University of California
92  *
93  * IDENTIFICATION
94  *        src/backend/utils/sort/tuplesort.c
95  *
96  *-------------------------------------------------------------------------
97  */
98
99 #include "postgres.h"
100
101 #include <limits.h>
102
103 #include "access/nbtree.h"
104 #include "catalog/index.h"
105 #include "commands/tablespace.h"
106 #include "executor/executor.h"
107 #include "miscadmin.h"
108 #include "pg_trace.h"
109 #include "utils/datum.h"
110 #include "utils/logtape.h"
111 #include "utils/lsyscache.h"
112 #include "utils/memutils.h"
113 #include "utils/pg_rusage.h"
114 #include "utils/rel.h"
115 #include "utils/sortsupport.h"
116 #include "utils/tuplesort.h"
117
118
119 /* sort-type codes for sort__start probes */
120 #define HEAP_SORT               0
121 #define INDEX_SORT              1
122 #define DATUM_SORT              2
123 #define CLUSTER_SORT    3
124
125 /* GUC variables */
126 #ifdef TRACE_SORT
127 bool            trace_sort = false;
128 #endif
129
130 #ifdef DEBUG_BOUNDED_SORT
131 bool            optimize_bounded_sort = true;
132 #endif
133
134
135 /*
136  * The objects we actually sort are SortTuple structs.  These contain
137  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
138  * which is a separate palloc chunk --- we assume it is just one chunk and
139  * can be freed by a simple pfree().  SortTuples also contain the tuple's
140  * first key column in Datum/nullflag format, and an index integer.
141  *
142  * Storing the first key column lets us save heap_getattr or index_getattr
143  * calls during tuple comparisons.      We could extract and save all the key
144  * columns not just the first, but this would increase code complexity and
145  * overhead, and wouldn't actually save any comparison cycles in the common
146  * case where the first key determines the comparison result.  Note that
147  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
148  *
149  * When sorting single Datums, the data value is represented directly by
150  * datum1/isnull1.      If the datatype is pass-by-reference and isnull1 is false,
151  * then datum1 points to a separately palloc'd data value that is also pointed
152  * to by the "tuple" pointer; otherwise "tuple" is NULL.
153  *
154  * While building initial runs, tupindex holds the tuple's run number.  During
155  * merge passes, we re-use it to hold the input tape number that each tuple in
156  * the heap was read from, or to hold the index of the next tuple pre-read
157  * from the same tape in the case of pre-read entries.  tupindex goes unused
158  * if the sort occurs entirely in memory.
159  */
160 typedef struct
161 {
162         void       *tuple;                      /* the tuple proper */
163         Datum           datum1;                 /* value of first key column */
164         bool            isnull1;                /* is first key column NULL? */
165         int                     tupindex;               /* see notes above */
166 } SortTuple;
167
168
169 /*
170  * Possible states of a Tuplesort object.  These denote the states that
171  * persist between calls of Tuplesort routines.
172  */
173 typedef enum
174 {
175         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
176         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
177         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
178         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
179         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
180         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
181 } TupSortStatus;
182
183 /*
184  * Parameters for calculation of number of tapes to use --- see inittapes()
185  * and tuplesort_merge_order().
186  *
187  * In this calculation we assume that each tape will cost us about 3 blocks
188  * worth of buffer space (which is an underestimate for very large data
189  * volumes, but it's probably close enough --- see logtape.c).
190  *
191  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
192  * tape during a preread cycle (see discussion at top of file).
193  */
194 #define MINORDER                6               /* minimum merge order */
195 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
196 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
197
198 typedef int     (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
199         Tuplesortstate *state);
200
201 /*
202  * Private state of a Tuplesort operation.
203  */
204 struct Tuplesortstate
205 {
206         TupSortStatus status;           /* enumerated value as shown above */
207         int                     nKeys;                  /* number of columns in sort key */
208         bool            randomAccess;   /* did caller request random access? */
209         bool            bounded;                /* did caller specify a maximum number of
210                                                                  * tuples to return? */
211         bool            boundUsed;              /* true if we made use of a bounded heap */
212         int                     bound;                  /* if bounded, the maximum number of tuples */
213         long            availMem;               /* remaining memory available, in bytes */
214         long            allowedMem;             /* total memory allowed, in bytes */
215         int                     maxTapes;               /* number of tapes (Knuth's T) */
216         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
217         MemoryContext sortcontext;      /* memory context holding all sort data */
218         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
219
220         /*
221          * These function pointers decouple the routines that must know what kind
222          * of tuple we are sorting from the routines that don't need to know it.
223          * They are set up by the tuplesort_begin_xxx routines.
224          *
225          * Function to compare two tuples; result is per qsort() convention, ie:
226          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
227          * qsort_arg_comparator.
228          */
229         SortTupleComparator     comparetup;
230
231         /*
232          * Function to copy a supplied input tuple into palloc'd space and set up
233          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
234          * state->availMem must be decreased by the amount of space used for the
235          * tuple copy (note the SortTuple struct itself is not counted).
236          */
237         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
238
239         /*
240          * Function to write a stored tuple onto tape.  The representation of the
241          * tuple on tape need not be the same as it is in memory; requirements on
242          * the tape representation are given below.  After writing the tuple,
243          * pfree() the out-of-line data (not the SortTuple struct!), and increase
244          * state->availMem by the amount of memory space thereby released.
245          */
246         void            (*writetup) (Tuplesortstate *state, int tapenum,
247                                                                                  SortTuple *stup);
248
249         /*
250          * Function to read a stored tuple from tape back into memory. 'len' is
251          * the already-read length of the stored tuple.  Create a palloc'd copy,
252          * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
253          * decrease state->availMem by the amount of memory space consumed.
254          */
255         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
256                                                                                 int tapenum, unsigned int len);
257
258         /*
259          * Function to reverse the sort direction from its current state. (We
260          * could dispense with this if we wanted to enforce that all variants
261          * represent the sort key information alike.)
262          */
263         void            (*reversedirection) (Tuplesortstate *state);
264
265         /*
266          * This array holds the tuples now in sort memory.      If we are in state
267          * INITIAL, the tuples are in no particular order; if we are in state
268          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
269          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
270          * H.  (Note that memtupcount only counts the tuples that are part of the
271          * heap --- during merge passes, memtuples[] entries beyond tapeRange are
272          * never in the heap and are used to hold pre-read tuples.)  In state
273          * SORTEDONTAPE, the array is not used.
274          */
275         SortTuple  *memtuples;          /* array of SortTuple structs */
276         int                     memtupcount;    /* number of tuples currently present */
277         int                     memtupsize;             /* allocated length of memtuples array */
278
279         /*
280          * While building initial runs, this is the current output run number
281          * (starting at 0).  Afterwards, it is the number of initial runs we made.
282          */
283         int                     currentRun;
284
285         /*
286          * Unless otherwise noted, all pointer variables below are pointers to
287          * arrays of length maxTapes, holding per-tape data.
288          */
289
290         /*
291          * These variables are only used during merge passes.  mergeactive[i] is
292          * true if we are reading an input run from (actual) tape number i and
293          * have not yet exhausted that run.  mergenext[i] is the memtuples index
294          * of the next pre-read tuple (next to be loaded into the heap) for tape
295          * i, or 0 if we are out of pre-read tuples.  mergelast[i] similarly
296          * points to the last pre-read tuple from each tape.  mergeavailslots[i]
297          * is the number of unused memtuples[] slots reserved for tape i, and
298          * mergeavailmem[i] is the amount of unused space allocated for tape i.
299          * mergefreelist and mergefirstfree keep track of unused locations in the
300          * memtuples[] array.  The memtuples[].tupindex fields link together
301          * pre-read tuples for each tape as well as recycled locations in
302          * mergefreelist. It is OK to use 0 as a null link in these lists, because
303          * memtuples[0] is part of the merge heap and is never a pre-read tuple.
304          */
305         bool       *mergeactive;        /* active input run source? */
306         int                *mergenext;          /* first preread tuple for each source */
307         int                *mergelast;          /* last preread tuple for each source */
308         int                *mergeavailslots;    /* slots left for prereading each tape */
309         long       *mergeavailmem;      /* availMem for prereading each tape */
310         int                     mergefreelist;  /* head of freelist of recycled slots */
311         int                     mergefirstfree; /* first slot never used in this merge */
312
313         /*
314          * Variables for Algorithm D.  Note that destTape is a "logical" tape
315          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
316          * "logical" and "actual" tape numbers straight!
317          */
318         int                     Level;                  /* Knuth's l */
319         int                     destTape;               /* current output tape (Knuth's j, less 1) */
320         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
321         int                *tp_runs;            /* # of real runs on each tape */
322         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
323         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
324         int                     activeTapes;    /* # of active input tapes in merge pass */
325
326         /*
327          * These variables are used after completion of sorting to keep track of
328          * the next tuple to return.  (In the tape case, the tape's current read
329          * position is also critical state.)
330          */
331         int                     result_tape;    /* actual tape number of finished output */
332         int                     current;                /* array index (only used if SORTEDINMEM) */
333         bool            eof_reached;    /* reached EOF (needed for cursors) */
334
335         /* markpos_xxx holds marked position for mark and restore */
336         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
337         int                     markpos_offset; /* saved "current", or offset in tape block */
338         bool            markpos_eof;    /* saved "eof_reached" */
339
340         /*
341          * These variables are specific to the MinimalTuple case; they are set by
342          * tuplesort_begin_heap and used only by the MinimalTuple routines.
343          */
344         TupleDesc       tupDesc;
345         SortSupport     sortKeys;               /* array of length nKeys */
346
347         /*
348          * This variable is shared by the single-key MinimalTuple case and the
349          * Datum case.  Otherwise it's NULL.
350          */
351         SortSupport     onlyKey;
352
353         /*
354          * These variables are specific to the CLUSTER case; they are set by
355          * tuplesort_begin_cluster.  Note CLUSTER also uses tupDesc and
356          * indexScanKey.
357          */
358         IndexInfo  *indexInfo;          /* info about index being used for reference */
359         EState     *estate;                     /* for evaluating index expressions */
360
361         /*
362          * These variables are specific to the IndexTuple case; they are set by
363          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
364          */
365         Relation        indexRel;               /* index being built */
366
367         /* These are specific to the index_btree subcase: */
368         ScanKey         indexScanKey;
369         bool            enforceUnique;  /* complain if we find duplicate tuples */
370
371         /* These are specific to the index_hash subcase: */
372         uint32          hash_mask;              /* mask for sortable part of hash code */
373
374         /*
375          * These variables are specific to the Datum case; they are set by
376          * tuplesort_begin_datum and used only by the DatumTuple routines.
377          */
378         Oid                     datumType;
379         /* we need typelen and byval in order to know how to copy the Datums. */
380         int                     datumTypeLen;
381         bool            datumTypeByVal;
382
383         /*
384          * Resource snapshot for time of sort start.
385          */
386 #ifdef TRACE_SORT
387         PGRUsage        ru_start;
388 #endif
389 };
390
391 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
392 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
393 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
394 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
395 #define REVERSEDIRECTION(state) ((*(state)->reversedirection) (state))
396 #define LACKMEM(state)          ((state)->availMem < 0)
397 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
398 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
399
400 /*
401  * NOTES about on-tape representation of tuples:
402  *
403  * We require the first "unsigned int" of a stored tuple to be the total size
404  * on-tape of the tuple, including itself (so it is never zero; an all-zero
405  * unsigned int is used to delimit runs).  The remainder of the stored tuple
406  * may or may not match the in-memory representation of the tuple ---
407  * any conversion needed is the job of the writetup and readtup routines.
408  *
409  * If state->randomAccess is true, then the stored representation of the
410  * tuple must be followed by another "unsigned int" that is a copy of the
411  * length --- so the total tape space used is actually sizeof(unsigned int)
412  * more than the stored length value.  This allows read-backwards.      When
413  * randomAccess is not true, the write/read routines may omit the extra
414  * length word.
415  *
416  * writetup is expected to write both length words as well as the tuple
417  * data.  When readtup is called, the tape is positioned just after the
418  * front length word; readtup must read the tuple data and advance past
419  * the back length word (if present).
420  *
421  * The write/read routines can make use of the tuple description data
422  * stored in the Tuplesortstate record, if needed.      They are also expected
423  * to adjust state->availMem by the amount of memory space (not tape space!)
424  * released or consumed.  There is no error return from either writetup
425  * or readtup; they should ereport() on failure.
426  *
427  *
428  * NOTES about memory consumption calculations:
429  *
430  * We count space allocated for tuples against the workMem limit, plus
431  * the space used by the variable-size memtuples array.  Fixed-size space
432  * is not counted; it's small enough to not be interesting.
433  *
434  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
435  * rather than the originally-requested size.  This is important since
436  * palloc can add substantial overhead.  It's not a complete answer since
437  * we won't count any wasted space in palloc allocation blocks, but it's
438  * a lot better than what we were doing before 7.3.
439  */
440
441 /* When using this macro, beware of double evaluation of len */
442 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
443         do { \
444                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
445                         elog(ERROR, "unexpected end of data"); \
446         } while(0)
447
448
449 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
450 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
451 static void inittapes(Tuplesortstate *state);
452 static void selectnewtape(Tuplesortstate *state);
453 static void mergeruns(Tuplesortstate *state);
454 static void mergeonerun(Tuplesortstate *state);
455 static void beginmerge(Tuplesortstate *state);
456 static void mergepreread(Tuplesortstate *state);
457 static void mergeprereadone(Tuplesortstate *state, int srcTape);
458 static void dumptuples(Tuplesortstate *state, bool alltuples);
459 static void make_bounded_heap(Tuplesortstate *state);
460 static void sort_bounded_heap(Tuplesortstate *state);
461 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
462                                           int tupleindex, bool checkIndex);
463 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
464 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
465 static void markrunend(Tuplesortstate *state, int tapenum);
466 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
467                                 Tuplesortstate *state);
468 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
469 static void writetup_heap(Tuplesortstate *state, int tapenum,
470                           SortTuple *stup);
471 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
472                          int tapenum, unsigned int len);
473 static void reversedirection_heap(Tuplesortstate *state);
474 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
475                                    Tuplesortstate *state);
476 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
477 static void writetup_cluster(Tuplesortstate *state, int tapenum,
478                                  SortTuple *stup);
479 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
480                                 int tapenum, unsigned int len);
481 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
482                                            Tuplesortstate *state);
483 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
484                                           Tuplesortstate *state);
485 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
486 static void writetup_index(Tuplesortstate *state, int tapenum,
487                            SortTuple *stup);
488 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
489                           int tapenum, unsigned int len);
490 static void reversedirection_index_btree(Tuplesortstate *state);
491 static void reversedirection_index_hash(Tuplesortstate *state);
492 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
493                                  Tuplesortstate *state);
494 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
495 static void writetup_datum(Tuplesortstate *state, int tapenum,
496                            SortTuple *stup);
497 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
498                           int tapenum, unsigned int len);
499 static void reversedirection_datum(Tuplesortstate *state);
500 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
501
502 /*
503  * Special versions of qsort just for SortTuple objects.  We have one for the
504  * single-key case (qsort_ssup) and one for multi-key cases (qsort_tuple).
505  */
506 #include "qsort_tuple.c"
507
508
509 /*
510  *              tuplesort_begin_xxx
511  *
512  * Initialize for a tuple sort operation.
513  *
514  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
515  * zero or more times, then call tuplesort_performsort when all the tuples
516  * have been supplied.  After performsort, retrieve the tuples in sorted
517  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
518  * access was requested, rescan, markpos, and restorepos can also be called.)
519  * Call tuplesort_end to terminate the operation and release memory/disk space.
520  *
521  * Each variant of tuplesort_begin has a workMem parameter specifying the
522  * maximum number of kilobytes of RAM to use before spilling data to disk.
523  * (The normal value of this parameter is work_mem, but some callers use
524  * other values.)  Each variant also has a randomAccess parameter specifying
525  * whether the caller needs non-sequential access to the sort result.
526  */
527
528 static Tuplesortstate *
529 tuplesort_begin_common(int workMem, bool randomAccess)
530 {
531         Tuplesortstate *state;
532         MemoryContext sortcontext;
533         MemoryContext oldcontext;
534
535         /*
536          * Create a working memory context for this sort operation. All data
537          * needed by the sort will live inside this context.
538          */
539         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
540                                                                                 "TupleSort",
541                                                                                 ALLOCSET_DEFAULT_MINSIZE,
542                                                                                 ALLOCSET_DEFAULT_INITSIZE,
543                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
544
545         /*
546          * Make the Tuplesortstate within the per-sort context.  This way, we
547          * don't need a separate pfree() operation for it at shutdown.
548          */
549         oldcontext = MemoryContextSwitchTo(sortcontext);
550
551         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
552
553 #ifdef TRACE_SORT
554         if (trace_sort)
555                 pg_rusage_init(&state->ru_start);
556 #endif
557
558         state->status = TSS_INITIAL;
559         state->randomAccess = randomAccess;
560         state->bounded = false;
561         state->boundUsed = false;
562         state->allowedMem = workMem * 1024L;
563         state->availMem = state->allowedMem;
564         state->sortcontext = sortcontext;
565         state->tapeset = NULL;
566
567         state->memtupcount = 0;
568         state->memtupsize = 1024;       /* initial guess */
569         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
570
571         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
572
573         /* workMem must be large enough for the minimal memtuples array */
574         if (LACKMEM(state))
575                 elog(ERROR, "insufficient memory allowed for sort");
576
577         state->currentRun = 0;
578
579         /*
580          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
581          * inittapes(), if needed
582          */
583
584         state->result_tape = -1;        /* flag that result tape has not been formed */
585
586         MemoryContextSwitchTo(oldcontext);
587
588         return state;
589 }
590
591 Tuplesortstate *
592 tuplesort_begin_heap(TupleDesc tupDesc,
593                                          int nkeys, AttrNumber *attNums,
594                                          Oid *sortOperators, Oid *sortCollations,
595                                          bool *nullsFirstFlags,
596                                          int workMem, bool randomAccess)
597 {
598         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
599         MemoryContext oldcontext;
600         int                     i;
601
602         oldcontext = MemoryContextSwitchTo(state->sortcontext);
603
604         AssertArg(nkeys > 0);
605
606 #ifdef TRACE_SORT
607         if (trace_sort)
608                 elog(LOG,
609                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
610                          nkeys, workMem, randomAccess ? 't' : 'f');
611 #endif
612
613         state->nKeys = nkeys;
614
615         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
616                                                                 false,  /* no unique check */
617                                                                 nkeys,
618                                                                 workMem,
619                                                                 randomAccess);
620
621         state->comparetup = comparetup_heap;
622         state->copytup = copytup_heap;
623         state->writetup = writetup_heap;
624         state->readtup = readtup_heap;
625         state->reversedirection = reversedirection_heap;
626
627         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
628
629         /* Prepare SortSupport data for each column */
630         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
631
632         for (i = 0; i < nkeys; i++)
633         {
634                 SortSupport     sortKey = state->sortKeys + i;
635
636                 AssertArg(attNums[i] != 0);
637                 AssertArg(sortOperators[i] != 0);
638
639                 sortKey->ssup_cxt = CurrentMemoryContext;
640                 sortKey->ssup_collation = sortCollations[i];
641                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
642                 sortKey->ssup_attno = attNums[i];
643
644                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
645         }
646
647         if (nkeys == 1)
648                 state->onlyKey = state->sortKeys;
649
650         MemoryContextSwitchTo(oldcontext);
651
652         return state;
653 }
654
655 Tuplesortstate *
656 tuplesort_begin_cluster(TupleDesc tupDesc,
657                                                 Relation indexRel,
658                                                 int workMem, bool randomAccess)
659 {
660         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
661         MemoryContext oldcontext;
662
663         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
664
665         oldcontext = MemoryContextSwitchTo(state->sortcontext);
666
667 #ifdef TRACE_SORT
668         if (trace_sort)
669                 elog(LOG,
670                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
671                          RelationGetNumberOfAttributes(indexRel),
672                          workMem, randomAccess ? 't' : 'f');
673 #endif
674
675         state->nKeys = RelationGetNumberOfAttributes(indexRel);
676
677         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
678                                                                 false,  /* no unique check */
679                                                                 state->nKeys,
680                                                                 workMem,
681                                                                 randomAccess);
682
683         state->comparetup = comparetup_cluster;
684         state->copytup = copytup_cluster;
685         state->writetup = writetup_cluster;
686         state->readtup = readtup_cluster;
687         state->reversedirection = reversedirection_index_btree;
688
689         state->indexInfo = BuildIndexInfo(indexRel);
690         state->indexScanKey = _bt_mkscankey_nodata(indexRel);
691
692         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
693
694         if (state->indexInfo->ii_Expressions != NULL)
695         {
696                 TupleTableSlot *slot;
697                 ExprContext *econtext;
698
699                 /*
700                  * We will need to use FormIndexDatum to evaluate the index
701                  * expressions.  To do that, we need an EState, as well as a
702                  * TupleTableSlot to put the table tuples into.  The econtext's
703                  * scantuple has to point to that slot, too.
704                  */
705                 state->estate = CreateExecutorState();
706                 slot = MakeSingleTupleTableSlot(tupDesc);
707                 econtext = GetPerTupleExprContext(state->estate);
708                 econtext->ecxt_scantuple = slot;
709         }
710
711         MemoryContextSwitchTo(oldcontext);
712
713         return state;
714 }
715
716 Tuplesortstate *
717 tuplesort_begin_index_btree(Relation indexRel,
718                                                         bool enforceUnique,
719                                                         int workMem, bool randomAccess)
720 {
721         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
722         MemoryContext oldcontext;
723
724         oldcontext = MemoryContextSwitchTo(state->sortcontext);
725
726 #ifdef TRACE_SORT
727         if (trace_sort)
728                 elog(LOG,
729                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
730                          enforceUnique ? 't' : 'f',
731                          workMem, randomAccess ? 't' : 'f');
732 #endif
733
734         state->nKeys = RelationGetNumberOfAttributes(indexRel);
735
736         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
737                                                                 enforceUnique,
738                                                                 state->nKeys,
739                                                                 workMem,
740                                                                 randomAccess);
741
742         state->comparetup = comparetup_index_btree;
743         state->copytup = copytup_index;
744         state->writetup = writetup_index;
745         state->readtup = readtup_index;
746         state->reversedirection = reversedirection_index_btree;
747
748         state->indexRel = indexRel;
749         state->indexScanKey = _bt_mkscankey_nodata(indexRel);
750         state->enforceUnique = enforceUnique;
751
752         MemoryContextSwitchTo(oldcontext);
753
754         return state;
755 }
756
757 Tuplesortstate *
758 tuplesort_begin_index_hash(Relation indexRel,
759                                                    uint32 hash_mask,
760                                                    int workMem, bool randomAccess)
761 {
762         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
763         MemoryContext oldcontext;
764
765         oldcontext = MemoryContextSwitchTo(state->sortcontext);
766
767 #ifdef TRACE_SORT
768         if (trace_sort)
769                 elog(LOG,
770                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
771                          hash_mask,
772                          workMem, randomAccess ? 't' : 'f');
773 #endif
774
775         state->nKeys = 1;                       /* Only one sort column, the hash code */
776
777         state->comparetup = comparetup_index_hash;
778         state->copytup = copytup_index;
779         state->writetup = writetup_index;
780         state->readtup = readtup_index;
781         state->reversedirection = reversedirection_index_hash;
782
783         state->indexRel = indexRel;
784
785         state->hash_mask = hash_mask;
786
787         MemoryContextSwitchTo(oldcontext);
788
789         return state;
790 }
791
792 Tuplesortstate *
793 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
794                                           bool nullsFirstFlag,
795                                           int workMem, bool randomAccess)
796 {
797         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
798         MemoryContext oldcontext;
799         int16           typlen;
800         bool            typbyval;
801
802         oldcontext = MemoryContextSwitchTo(state->sortcontext);
803
804 #ifdef TRACE_SORT
805         if (trace_sort)
806                 elog(LOG,
807                          "begin datum sort: workMem = %d, randomAccess = %c",
808                          workMem, randomAccess ? 't' : 'f');
809 #endif
810
811         state->nKeys = 1;                       /* always a one-column sort */
812
813         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
814                                                                 false,  /* no unique check */
815                                                                 1,
816                                                                 workMem,
817                                                                 randomAccess);
818
819         state->comparetup = comparetup_datum;
820         state->copytup = copytup_datum;
821         state->writetup = writetup_datum;
822         state->readtup = readtup_datum;
823         state->reversedirection = reversedirection_datum;
824
825         state->datumType = datumType;
826
827         /* Prepare SortSupport data */
828         state->onlyKey = (SortSupport) palloc0(sizeof(SortSupportData));
829
830         state->onlyKey->ssup_cxt = CurrentMemoryContext;
831         state->onlyKey->ssup_collation = sortCollation;
832         state->onlyKey->ssup_nulls_first = nullsFirstFlag;
833
834         PrepareSortSupportFromOrderingOp(sortOperator, state->onlyKey);
835
836         /* lookup necessary attributes of the datum type */
837         get_typlenbyval(datumType, &typlen, &typbyval);
838         state->datumTypeLen = typlen;
839         state->datumTypeByVal = typbyval;
840
841         MemoryContextSwitchTo(oldcontext);
842
843         return state;
844 }
845
846 /*
847  * tuplesort_set_bound
848  *
849  *      Advise tuplesort that at most the first N result tuples are required.
850  *
851  * Must be called before inserting any tuples.  (Actually, we could allow it
852  * as long as the sort hasn't spilled to disk, but there seems no need for
853  * delayed calls at the moment.)
854  *
855  * This is a hint only. The tuplesort may still return more tuples than
856  * requested.
857  */
858 void
859 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
860 {
861         /* Assert we're called before loading any tuples */
862         Assert(state->status == TSS_INITIAL);
863         Assert(state->memtupcount == 0);
864         Assert(!state->bounded);
865
866 #ifdef DEBUG_BOUNDED_SORT
867         /* Honor GUC setting that disables the feature (for easy testing) */
868         if (!optimize_bounded_sort)
869                 return;
870 #endif
871
872         /* We want to be able to compute bound * 2, so limit the setting */
873         if (bound > (int64) (INT_MAX / 2))
874                 return;
875
876         state->bounded = true;
877         state->bound = (int) bound;
878 }
879
880 /*
881  * tuplesort_end
882  *
883  *      Release resources and clean up.
884  *
885  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
886  * pointing to garbage.  Be careful not to attempt to use or free such
887  * pointers afterwards!
888  */
889 void
890 tuplesort_end(Tuplesortstate *state)
891 {
892         /* context swap probably not needed, but let's be safe */
893         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
894
895 #ifdef TRACE_SORT
896         long            spaceUsed;
897
898         if (state->tapeset)
899                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
900         else
901                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
902 #endif
903
904         /*
905          * Delete temporary "tape" files, if any.
906          *
907          * Note: want to include this in reported total cost of sort, hence need
908          * for two #ifdef TRACE_SORT sections.
909          */
910         if (state->tapeset)
911                 LogicalTapeSetClose(state->tapeset);
912
913 #ifdef TRACE_SORT
914         if (trace_sort)
915         {
916                 if (state->tapeset)
917                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
918                                  spaceUsed, pg_rusage_show(&state->ru_start));
919                 else
920                         elog(LOG, "internal sort ended, %ld KB used: %s",
921                                  spaceUsed, pg_rusage_show(&state->ru_start));
922         }
923
924         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
925 #else
926
927         /*
928          * If you disabled TRACE_SORT, you can still probe sort__done, but you
929          * ain't getting space-used stats.
930          */
931         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
932 #endif
933
934         /* Free any execution state created for CLUSTER case */
935         if (state->estate != NULL)
936         {
937                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
938
939                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
940                 FreeExecutorState(state->estate);
941         }
942
943         MemoryContextSwitchTo(oldcontext);
944
945         /*
946          * Free the per-sort memory context, thereby releasing all working memory,
947          * including the Tuplesortstate struct itself.
948          */
949         MemoryContextDelete(state->sortcontext);
950 }
951
952 /*
953  * Grow the memtuples[] array, if possible within our memory constraint.
954  * Return TRUE if able to enlarge the array, FALSE if not.
955  *
956  * At each increment we double the size of the array.  When we are short
957  * on memory we could consider smaller increases, but because availMem
958  * moves around with tuple addition/removal, this might result in thrashing.
959  * Small increases in the array size are likely to be pretty inefficient.
960  */
961 static bool
962 grow_memtuples(Tuplesortstate *state)
963 {
964         /*
965          * We need to be sure that we do not cause LACKMEM to become true, else
966          * the space management algorithm will go nuts.  We assume here that the
967          * memory chunk overhead associated with the memtuples array is constant
968          * and so there will be no unexpected addition to what we ask for.      (The
969          * minimum array size established in tuplesort_begin_common is large
970          * enough to force palloc to treat it as a separate chunk, so this
971          * assumption should be good.  But let's check it.)
972          */
973         if (state->availMem <= (long) (state->memtupsize * sizeof(SortTuple)))
974                 return false;
975
976         /*
977          * On a 64-bit machine, allowedMem could be high enough to get us into
978          * trouble with MaxAllocSize, too.
979          */
980         if ((Size) (state->memtupsize * 2) >= MaxAllocSize / sizeof(SortTuple))
981                 return false;
982
983         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
984         state->memtupsize *= 2;
985         state->memtuples = (SortTuple *)
986                 repalloc(state->memtuples,
987                                  state->memtupsize * sizeof(SortTuple));
988         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
989         if (LACKMEM(state))
990                 elog(ERROR, "unexpected out-of-memory situation during sort");
991         return true;
992 }
993
994 /*
995  * Accept one tuple while collecting input data for sort.
996  *
997  * Note that the input data is always copied; the caller need not save it.
998  */
999 void
1000 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1001 {
1002         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1003         SortTuple       stup;
1004
1005         /*
1006          * Copy the given tuple into memory we control, and decrease availMem.
1007          * Then call the common code.
1008          */
1009         COPYTUP(state, &stup, (void *) slot);
1010
1011         puttuple_common(state, &stup);
1012
1013         MemoryContextSwitchTo(oldcontext);
1014 }
1015
1016 /*
1017  * Accept one tuple while collecting input data for sort.
1018  *
1019  * Note that the input data is always copied; the caller need not save it.
1020  */
1021 void
1022 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1023 {
1024         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1025         SortTuple       stup;
1026
1027         /*
1028          * Copy the given tuple into memory we control, and decrease availMem.
1029          * Then call the common code.
1030          */
1031         COPYTUP(state, &stup, (void *) tup);
1032
1033         puttuple_common(state, &stup);
1034
1035         MemoryContextSwitchTo(oldcontext);
1036 }
1037
1038 /*
1039  * Accept one index tuple while collecting input data for sort.
1040  *
1041  * Note that the input tuple is always copied; the caller need not save it.
1042  */
1043 void
1044 tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple)
1045 {
1046         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1047         SortTuple       stup;
1048
1049         /*
1050          * Copy the given tuple into memory we control, and decrease availMem.
1051          * Then call the common code.
1052          */
1053         COPYTUP(state, &stup, (void *) tuple);
1054
1055         puttuple_common(state, &stup);
1056
1057         MemoryContextSwitchTo(oldcontext);
1058 }
1059
1060 /*
1061  * Accept one Datum while collecting input data for sort.
1062  *
1063  * If the Datum is pass-by-ref type, the value will be copied.
1064  */
1065 void
1066 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1067 {
1068         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1069         SortTuple       stup;
1070
1071         /*
1072          * If it's a pass-by-reference value, copy it into memory we control, and
1073          * decrease availMem.  Then call the common code.
1074          */
1075         if (isNull || state->datumTypeByVal)
1076         {
1077                 stup.datum1 = val;
1078                 stup.isnull1 = isNull;
1079                 stup.tuple = NULL;              /* no separate storage */
1080         }
1081         else
1082         {
1083                 stup.datum1 = datumCopy(val, false, state->datumTypeLen);
1084                 stup.isnull1 = false;
1085                 stup.tuple = DatumGetPointer(stup.datum1);
1086                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1087         }
1088
1089         puttuple_common(state, &stup);
1090
1091         MemoryContextSwitchTo(oldcontext);
1092 }
1093
1094 /*
1095  * Shared code for tuple and datum cases.
1096  */
1097 static void
1098 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1099 {
1100         switch (state->status)
1101         {
1102                 case TSS_INITIAL:
1103
1104                         /*
1105                          * Save the tuple into the unsorted array.      First, grow the array
1106                          * as needed.  Note that we try to grow the array when there is
1107                          * still one free slot remaining --- if we fail, there'll still be
1108                          * room to store the incoming tuple, and then we'll switch to
1109                          * tape-based operation.
1110                          */
1111                         if (state->memtupcount >= state->memtupsize - 1)
1112                         {
1113                                 (void) grow_memtuples(state);
1114                                 Assert(state->memtupcount < state->memtupsize);
1115                         }
1116                         state->memtuples[state->memtupcount++] = *tuple;
1117
1118                         /*
1119                          * Check if it's time to switch over to a bounded heapsort. We do
1120                          * so if the input tuple count exceeds twice the desired tuple
1121                          * count (this is a heuristic for where heapsort becomes cheaper
1122                          * than a quicksort), or if we've just filled workMem and have
1123                          * enough tuples to meet the bound.
1124                          *
1125                          * Note that once we enter TSS_BOUNDED state we will always try to
1126                          * complete the sort that way.  In the worst case, if later input
1127                          * tuples are larger than earlier ones, this might cause us to
1128                          * exceed workMem significantly.
1129                          */
1130                         if (state->bounded &&
1131                                 (state->memtupcount > state->bound * 2 ||
1132                                  (state->memtupcount > state->bound && LACKMEM(state))))
1133                         {
1134 #ifdef TRACE_SORT
1135                                 if (trace_sort)
1136                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1137                                                  state->memtupcount,
1138                                                  pg_rusage_show(&state->ru_start));
1139 #endif
1140                                 make_bounded_heap(state);
1141                                 return;
1142                         }
1143
1144                         /*
1145                          * Done if we still fit in available memory and have array slots.
1146                          */
1147                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1148                                 return;
1149
1150                         /*
1151                          * Nope; time to switch to tape-based operation.
1152                          */
1153                         inittapes(state);
1154
1155                         /*
1156                          * Dump tuples until we are back under the limit.
1157                          */
1158                         dumptuples(state, false);
1159                         break;
1160
1161                 case TSS_BOUNDED:
1162
1163                         /*
1164                          * We don't want to grow the array here, so check whether the new
1165                          * tuple can be discarded before putting it in.  This should be a
1166                          * good speed optimization, too, since when there are many more
1167                          * input tuples than the bound, most input tuples can be discarded
1168                          * with just this one comparison.  Note that because we currently
1169                          * have the sort direction reversed, we must check for <= not >=.
1170                          */
1171                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1172                         {
1173                                 /* new tuple <= top of the heap, so we can discard it */
1174                                 free_sort_tuple(state, tuple);
1175                                 CHECK_FOR_INTERRUPTS();
1176                         }
1177                         else
1178                         {
1179                                 /* discard top of heap, sift up, insert new tuple */
1180                                 free_sort_tuple(state, &state->memtuples[0]);
1181                                 tuplesort_heap_siftup(state, false);
1182                                 tuplesort_heap_insert(state, tuple, 0, false);
1183                         }
1184                         break;
1185
1186                 case TSS_BUILDRUNS:
1187
1188                         /*
1189                          * Insert the tuple into the heap, with run number currentRun if
1190                          * it can go into the current run, else run number currentRun+1.
1191                          * The tuple can go into the current run if it is >= the first
1192                          * not-yet-output tuple.  (Actually, it could go into the current
1193                          * run if it is >= the most recently output tuple ... but that
1194                          * would require keeping around the tuple we last output, and it's
1195                          * simplest to let writetup free each tuple as soon as it's
1196                          * written.)
1197                          *
1198                          * Note there will always be at least one tuple in the heap at
1199                          * this point; see dumptuples.
1200                          */
1201                         Assert(state->memtupcount > 0);
1202                         if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1203                                 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1204                         else
1205                                 tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
1206
1207                         /*
1208                          * If we are over the memory limit, dump tuples till we're under.
1209                          */
1210                         dumptuples(state, false);
1211                         break;
1212
1213                 default:
1214                         elog(ERROR, "invalid tuplesort state");
1215                         break;
1216         }
1217 }
1218
1219 /*
1220  * All tuples have been provided; finish the sort.
1221  */
1222 void
1223 tuplesort_performsort(Tuplesortstate *state)
1224 {
1225         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1226
1227 #ifdef TRACE_SORT
1228         if (trace_sort)
1229                 elog(LOG, "performsort starting: %s",
1230                          pg_rusage_show(&state->ru_start));
1231 #endif
1232
1233         switch (state->status)
1234         {
1235                 case TSS_INITIAL:
1236
1237                         /*
1238                          * We were able to accumulate all the tuples within the allowed
1239                          * amount of memory.  Just qsort 'em and we're done.
1240                          */
1241                         if (state->memtupcount > 1)
1242                         {
1243                                 /* Can we use the single-key sort function? */
1244                                 if (state->onlyKey != NULL)
1245                                         qsort_ssup(state->memtuples, state->memtupcount,
1246                                                            state->onlyKey);
1247                                 else
1248                                         qsort_tuple(state->memtuples,
1249                                                                 state->memtupcount,
1250                                                                 state->comparetup,
1251                                                                 state);
1252                         }
1253                         state->current = 0;
1254                         state->eof_reached = false;
1255                         state->markpos_offset = 0;
1256                         state->markpos_eof = false;
1257                         state->status = TSS_SORTEDINMEM;
1258                         break;
1259
1260                 case TSS_BOUNDED:
1261
1262                         /*
1263                          * We were able to accumulate all the tuples required for output
1264                          * in memory, using a heap to eliminate excess tuples.  Now we
1265                          * have to transform the heap to a properly-sorted array.
1266                          */
1267                         sort_bounded_heap(state);
1268                         state->current = 0;
1269                         state->eof_reached = false;
1270                         state->markpos_offset = 0;
1271                         state->markpos_eof = false;
1272                         state->status = TSS_SORTEDINMEM;
1273                         break;
1274
1275                 case TSS_BUILDRUNS:
1276
1277                         /*
1278                          * Finish tape-based sort.      First, flush all tuples remaining in
1279                          * memory out to tape; then merge until we have a single remaining
1280                          * run (or, if !randomAccess, one run per tape). Note that
1281                          * mergeruns sets the correct state->status.
1282                          */
1283                         dumptuples(state, true);
1284                         mergeruns(state);
1285                         state->eof_reached = false;
1286                         state->markpos_block = 0L;
1287                         state->markpos_offset = 0;
1288                         state->markpos_eof = false;
1289                         break;
1290
1291                 default:
1292                         elog(ERROR, "invalid tuplesort state");
1293                         break;
1294         }
1295
1296 #ifdef TRACE_SORT
1297         if (trace_sort)
1298         {
1299                 if (state->status == TSS_FINALMERGE)
1300                         elog(LOG, "performsort done (except %d-way final merge): %s",
1301                                  state->activeTapes,
1302                                  pg_rusage_show(&state->ru_start));
1303                 else
1304                         elog(LOG, "performsort done: %s",
1305                                  pg_rusage_show(&state->ru_start));
1306         }
1307 #endif
1308
1309         MemoryContextSwitchTo(oldcontext);
1310 }
1311
1312 /*
1313  * Internal routine to fetch the next tuple in either forward or back
1314  * direction into *stup.  Returns FALSE if no more tuples.
1315  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1316  */
1317 static bool
1318 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1319                                                   SortTuple *stup, bool *should_free)
1320 {
1321         unsigned int tuplen;
1322
1323         switch (state->status)
1324         {
1325                 case TSS_SORTEDINMEM:
1326                         Assert(forward || state->randomAccess);
1327                         *should_free = false;
1328                         if (forward)
1329                         {
1330                                 if (state->current < state->memtupcount)
1331                                 {
1332                                         *stup = state->memtuples[state->current++];
1333                                         return true;
1334                                 }
1335                                 state->eof_reached = true;
1336
1337                                 /*
1338                                  * Complain if caller tries to retrieve more tuples than
1339                                  * originally asked for in a bounded sort.      This is because
1340                                  * returning EOF here might be the wrong thing.
1341                                  */
1342                                 if (state->bounded && state->current >= state->bound)
1343                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1344
1345                                 return false;
1346                         }
1347                         else
1348                         {
1349                                 if (state->current <= 0)
1350                                         return false;
1351
1352                                 /*
1353                                  * if all tuples are fetched already then we return last
1354                                  * tuple, else - tuple before last returned.
1355                                  */
1356                                 if (state->eof_reached)
1357                                         state->eof_reached = false;
1358                                 else
1359                                 {
1360                                         state->current--;       /* last returned tuple */
1361                                         if (state->current <= 0)
1362                                                 return false;
1363                                 }
1364                                 *stup = state->memtuples[state->current - 1];
1365                                 return true;
1366                         }
1367                         break;
1368
1369                 case TSS_SORTEDONTAPE:
1370                         Assert(forward || state->randomAccess);
1371                         *should_free = true;
1372                         if (forward)
1373                         {
1374                                 if (state->eof_reached)
1375                                         return false;
1376                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1377                                 {
1378                                         READTUP(state, stup, state->result_tape, tuplen);
1379                                         return true;
1380                                 }
1381                                 else
1382                                 {
1383                                         state->eof_reached = true;
1384                                         return false;
1385                                 }
1386                         }
1387
1388                         /*
1389                          * Backward.
1390                          *
1391                          * if all tuples are fetched already then we return last tuple,
1392                          * else - tuple before last returned.
1393                          */
1394                         if (state->eof_reached)
1395                         {
1396                                 /*
1397                                  * Seek position is pointing just past the zero tuplen at the
1398                                  * end of file; back up to fetch last tuple's ending length
1399                                  * word.  If seek fails we must have a completely empty file.
1400                                  */
1401                                 if (!LogicalTapeBackspace(state->tapeset,
1402                                                                                   state->result_tape,
1403                                                                                   2 * sizeof(unsigned int)))
1404                                         return false;
1405                                 state->eof_reached = false;
1406                         }
1407                         else
1408                         {
1409                                 /*
1410                                  * Back up and fetch previously-returned tuple's ending length
1411                                  * word.  If seek fails, assume we are at start of file.
1412                                  */
1413                                 if (!LogicalTapeBackspace(state->tapeset,
1414                                                                                   state->result_tape,
1415                                                                                   sizeof(unsigned int)))
1416                                         return false;
1417                                 tuplen = getlen(state, state->result_tape, false);
1418
1419                                 /*
1420                                  * Back up to get ending length word of tuple before it.
1421                                  */
1422                                 if (!LogicalTapeBackspace(state->tapeset,
1423                                                                                   state->result_tape,
1424                                                                                   tuplen + 2 * sizeof(unsigned int)))
1425                                 {
1426                                         /*
1427                                          * If that fails, presumably the prev tuple is the first
1428                                          * in the file.  Back up so that it becomes next to read
1429                                          * in forward direction (not obviously right, but that is
1430                                          * what in-memory case does).
1431                                          */
1432                                         if (!LogicalTapeBackspace(state->tapeset,
1433                                                                                           state->result_tape,
1434                                                                                           tuplen + sizeof(unsigned int)))
1435                                                 elog(ERROR, "bogus tuple length in backward scan");
1436                                         return false;
1437                                 }
1438                         }
1439
1440                         tuplen = getlen(state, state->result_tape, false);
1441
1442                         /*
1443                          * Now we have the length of the prior tuple, back up and read it.
1444                          * Note: READTUP expects we are positioned after the initial
1445                          * length word of the tuple, so back up to that point.
1446                          */
1447                         if (!LogicalTapeBackspace(state->tapeset,
1448                                                                           state->result_tape,
1449                                                                           tuplen))
1450                                 elog(ERROR, "bogus tuple length in backward scan");
1451                         READTUP(state, stup, state->result_tape, tuplen);
1452                         return true;
1453
1454                 case TSS_FINALMERGE:
1455                         Assert(forward);
1456                         *should_free = true;
1457
1458                         /*
1459                          * This code should match the inner loop of mergeonerun().
1460                          */
1461                         if (state->memtupcount > 0)
1462                         {
1463                                 int                     srcTape = state->memtuples[0].tupindex;
1464                                 Size            tuplen;
1465                                 int                     tupIndex;
1466                                 SortTuple  *newtup;
1467
1468                                 *stup = state->memtuples[0];
1469                                 /* returned tuple is no longer counted in our memory space */
1470                                 if (stup->tuple)
1471                                 {
1472                                         tuplen = GetMemoryChunkSpace(stup->tuple);
1473                                         state->availMem += tuplen;
1474                                         state->mergeavailmem[srcTape] += tuplen;
1475                                 }
1476                                 tuplesort_heap_siftup(state, false);
1477                                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1478                                 {
1479                                         /*
1480                                          * out of preloaded data on this tape, try to read more
1481                                          *
1482                                          * Unlike mergeonerun(), we only preload from the single
1483                                          * tape that's run dry.  See mergepreread() comments.
1484                                          */
1485                                         mergeprereadone(state, srcTape);
1486
1487                                         /*
1488                                          * if still no data, we've reached end of run on this tape
1489                                          */
1490                                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1491                                                 return true;
1492                                 }
1493                                 /* pull next preread tuple from list, insert in heap */
1494                                 newtup = &state->memtuples[tupIndex];
1495                                 state->mergenext[srcTape] = newtup->tupindex;
1496                                 if (state->mergenext[srcTape] == 0)
1497                                         state->mergelast[srcTape] = 0;
1498                                 tuplesort_heap_insert(state, newtup, srcTape, false);
1499                                 /* put the now-unused memtuples entry on the freelist */
1500                                 newtup->tupindex = state->mergefreelist;
1501                                 state->mergefreelist = tupIndex;
1502                                 state->mergeavailslots[srcTape]++;
1503                                 return true;
1504                         }
1505                         return false;
1506
1507                 default:
1508                         elog(ERROR, "invalid tuplesort state");
1509                         return false;           /* keep compiler quiet */
1510         }
1511 }
1512
1513 /*
1514  * Fetch the next tuple in either forward or back direction.
1515  * If successful, put tuple in slot and return TRUE; else, clear the slot
1516  * and return FALSE.
1517  */
1518 bool
1519 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
1520                                            TupleTableSlot *slot)
1521 {
1522         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1523         SortTuple       stup;
1524         bool            should_free;
1525
1526         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1527                 stup.tuple = NULL;
1528
1529         MemoryContextSwitchTo(oldcontext);
1530
1531         if (stup.tuple)
1532         {
1533                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
1534                 return true;
1535         }
1536         else
1537         {
1538                 ExecClearTuple(slot);
1539                 return false;
1540         }
1541 }
1542
1543 /*
1544  * Fetch the next tuple in either forward or back direction.
1545  * Returns NULL if no more tuples.      If *should_free is set, the
1546  * caller must pfree the returned tuple when done with it.
1547  */
1548 HeapTuple
1549 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
1550 {
1551         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1552         SortTuple       stup;
1553
1554         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1555                 stup.tuple = NULL;
1556
1557         MemoryContextSwitchTo(oldcontext);
1558
1559         return stup.tuple;
1560 }
1561
1562 /*
1563  * Fetch the next index tuple in either forward or back direction.
1564  * Returns NULL if no more tuples.      If *should_free is set, the
1565  * caller must pfree the returned tuple when done with it.
1566  */
1567 IndexTuple
1568 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
1569                                                 bool *should_free)
1570 {
1571         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1572         SortTuple       stup;
1573
1574         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1575                 stup.tuple = NULL;
1576
1577         MemoryContextSwitchTo(oldcontext);
1578
1579         return (IndexTuple) stup.tuple;
1580 }
1581
1582 /*
1583  * Fetch the next Datum in either forward or back direction.
1584  * Returns FALSE if no more datums.
1585  *
1586  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1587  * and is now owned by the caller.
1588  */
1589 bool
1590 tuplesort_getdatum(Tuplesortstate *state, bool forward,
1591                                    Datum *val, bool *isNull)
1592 {
1593         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1594         SortTuple       stup;
1595         bool            should_free;
1596
1597         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1598         {
1599                 MemoryContextSwitchTo(oldcontext);
1600                 return false;
1601         }
1602
1603         if (stup.isnull1 || state->datumTypeByVal)
1604         {
1605                 *val = stup.datum1;
1606                 *isNull = stup.isnull1;
1607         }
1608         else
1609         {
1610                 if (should_free)
1611                         *val = stup.datum1;
1612                 else
1613                         *val = datumCopy(stup.datum1, false, state->datumTypeLen);
1614                 *isNull = false;
1615         }
1616
1617         MemoryContextSwitchTo(oldcontext);
1618
1619         return true;
1620 }
1621
1622 /*
1623  * tuplesort_merge_order - report merge order we'll use for given memory
1624  * (note: "merge order" just means the number of input tapes in the merge).
1625  *
1626  * This is exported for use by the planner.  allowedMem is in bytes.
1627  */
1628 int
1629 tuplesort_merge_order(long allowedMem)
1630 {
1631         int                     mOrder;
1632
1633         /*
1634          * We need one tape for each merge input, plus another one for the output,
1635          * and each of these tapes needs buffer space.  In addition we want
1636          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
1637          * count).
1638          *
1639          * Note: you might be thinking we need to account for the memtuples[]
1640          * array in this calculation, but we effectively treat that as part of the
1641          * MERGE_BUFFER_SIZE workspace.
1642          */
1643         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
1644                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
1645
1646         /* Even in minimum memory, use at least a MINORDER merge */
1647         mOrder = Max(mOrder, MINORDER);
1648
1649         return mOrder;
1650 }
1651
1652 /*
1653  * inittapes - initialize for tape sorting.
1654  *
1655  * This is called only if we have found we don't have room to sort in memory.
1656  */
1657 static void
1658 inittapes(Tuplesortstate *state)
1659 {
1660         int                     maxTapes,
1661                                 ntuples,
1662                                 j;
1663         long            tapeSpace;
1664
1665         /* Compute number of tapes to use: merge order plus 1 */
1666         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
1667
1668         /*
1669          * We must have at least 2*maxTapes slots in the memtuples[] array, else
1670          * we'd not have room for merge heap plus preread.  It seems unlikely that
1671          * this case would ever occur, but be safe.
1672          */
1673         maxTapes = Min(maxTapes, state->memtupsize / 2);
1674
1675         state->maxTapes = maxTapes;
1676         state->tapeRange = maxTapes - 1;
1677
1678 #ifdef TRACE_SORT
1679         if (trace_sort)
1680                 elog(LOG, "switching to external sort with %d tapes: %s",
1681                          maxTapes, pg_rusage_show(&state->ru_start));
1682 #endif
1683
1684         /*
1685          * Decrease availMem to reflect the space needed for tape buffers; but
1686          * don't decrease it to the point that we have no room for tuples. (That
1687          * case is only likely to occur if sorting pass-by-value Datums; in all
1688          * other scenarios the memtuples[] array is unlikely to occupy more than
1689          * half of allowedMem.  In the pass-by-value case it's not important to
1690          * account for tuple space, so we don't care if LACKMEM becomes
1691          * inaccurate.)
1692          */
1693         tapeSpace = maxTapes * TAPE_BUFFER_OVERHEAD;
1694         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
1695                 USEMEM(state, tapeSpace);
1696
1697         /*
1698          * Make sure that the temp file(s) underlying the tape set are created in
1699          * suitable temp tablespaces.
1700          */
1701         PrepareTempTablespaces();
1702
1703         /*
1704          * Create the tape set and allocate the per-tape data arrays.
1705          */
1706         state->tapeset = LogicalTapeSetCreate(maxTapes);
1707
1708         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
1709         state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
1710         state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
1711         state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
1712         state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long));
1713         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
1714         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
1715         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
1716         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
1717
1718         /*
1719          * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
1720          * marked as belonging to run number zero.
1721          *
1722          * NOTE: we pass false for checkIndex since there's no point in comparing
1723          * indexes in this step, even though we do intend the indexes to be part
1724          * of the sort key...
1725          */
1726         ntuples = state->memtupcount;
1727         state->memtupcount = 0;         /* make the heap empty */
1728         for (j = 0; j < ntuples; j++)
1729         {
1730                 /* Must copy source tuple to avoid possible overwrite */
1731                 SortTuple       stup = state->memtuples[j];
1732
1733                 tuplesort_heap_insert(state, &stup, 0, false);
1734         }
1735         Assert(state->memtupcount == ntuples);
1736
1737         state->currentRun = 0;
1738
1739         /*
1740          * Initialize variables of Algorithm D (step D1).
1741          */
1742         for (j = 0; j < maxTapes; j++)
1743         {
1744                 state->tp_fib[j] = 1;
1745                 state->tp_runs[j] = 0;
1746                 state->tp_dummy[j] = 1;
1747                 state->tp_tapenum[j] = j;
1748         }
1749         state->tp_fib[state->tapeRange] = 0;
1750         state->tp_dummy[state->tapeRange] = 0;
1751
1752         state->Level = 1;
1753         state->destTape = 0;
1754
1755         state->status = TSS_BUILDRUNS;
1756 }
1757
1758 /*
1759  * selectnewtape -- select new tape for new initial run.
1760  *
1761  * This is called after finishing a run when we know another run
1762  * must be started.  This implements steps D3, D4 of Algorithm D.
1763  */
1764 static void
1765 selectnewtape(Tuplesortstate *state)
1766 {
1767         int                     j;
1768         int                     a;
1769
1770         /* Step D3: advance j (destTape) */
1771         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
1772         {
1773                 state->destTape++;
1774                 return;
1775         }
1776         if (state->tp_dummy[state->destTape] != 0)
1777         {
1778                 state->destTape = 0;
1779                 return;
1780         }
1781
1782         /* Step D4: increase level */
1783         state->Level++;
1784         a = state->tp_fib[0];
1785         for (j = 0; j < state->tapeRange; j++)
1786         {
1787                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
1788                 state->tp_fib[j] = a + state->tp_fib[j + 1];
1789         }
1790         state->destTape = 0;
1791 }
1792
1793 /*
1794  * mergeruns -- merge all the completed initial runs.
1795  *
1796  * This implements steps D5, D6 of Algorithm D.  All input data has
1797  * already been written to initial runs on tape (see dumptuples).
1798  */
1799 static void
1800 mergeruns(Tuplesortstate *state)
1801 {
1802         int                     tapenum,
1803                                 svTape,
1804                                 svRuns,
1805                                 svDummy;
1806
1807         Assert(state->status == TSS_BUILDRUNS);
1808         Assert(state->memtupcount == 0);
1809
1810         /*
1811          * If we produced only one initial run (quite likely if the total data
1812          * volume is between 1X and 2X workMem), we can just use that tape as the
1813          * finished output, rather than doing a useless merge.  (This obvious
1814          * optimization is not in Knuth's algorithm.)
1815          */
1816         if (state->currentRun == 1)
1817         {
1818                 state->result_tape = state->tp_tapenum[state->destTape];
1819                 /* must freeze and rewind the finished output tape */
1820                 LogicalTapeFreeze(state->tapeset, state->result_tape);
1821                 state->status = TSS_SORTEDONTAPE;
1822                 return;
1823         }
1824
1825         /* End of step D2: rewind all output tapes to prepare for merging */
1826         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1827                 LogicalTapeRewind(state->tapeset, tapenum, false);
1828
1829         for (;;)
1830         {
1831                 /*
1832                  * At this point we know that tape[T] is empty.  If there's just one
1833                  * (real or dummy) run left on each input tape, then only one merge
1834                  * pass remains.  If we don't have to produce a materialized sorted
1835                  * tape, we can stop at this point and do the final merge on-the-fly.
1836                  */
1837                 if (!state->randomAccess)
1838                 {
1839                         bool            allOneRun = true;
1840
1841                         Assert(state->tp_runs[state->tapeRange] == 0);
1842                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1843                         {
1844                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
1845                                 {
1846                                         allOneRun = false;
1847                                         break;
1848                                 }
1849                         }
1850                         if (allOneRun)
1851                         {
1852                                 /* Tell logtape.c we won't be writing anymore */
1853                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
1854                                 /* Initialize for the final merge pass */
1855                                 beginmerge(state);
1856                                 state->status = TSS_FINALMERGE;
1857                                 return;
1858                         }
1859                 }
1860
1861                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
1862                 while (state->tp_runs[state->tapeRange - 1] ||
1863                            state->tp_dummy[state->tapeRange - 1])
1864                 {
1865                         bool            allDummy = true;
1866
1867                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1868                         {
1869                                 if (state->tp_dummy[tapenum] == 0)
1870                                 {
1871                                         allDummy = false;
1872                                         break;
1873                                 }
1874                         }
1875
1876                         if (allDummy)
1877                         {
1878                                 state->tp_dummy[state->tapeRange]++;
1879                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
1880                                         state->tp_dummy[tapenum]--;
1881                         }
1882                         else
1883                                 mergeonerun(state);
1884                 }
1885
1886                 /* Step D6: decrease level */
1887                 if (--state->Level == 0)
1888                         break;
1889                 /* rewind output tape T to use as new input */
1890                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
1891                                                   false);
1892                 /* rewind used-up input tape P, and prepare it for write pass */
1893                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
1894                                                   true);
1895                 state->tp_runs[state->tapeRange - 1] = 0;
1896
1897                 /*
1898                  * reassign tape units per step D6; note we no longer care about A[]
1899                  */
1900                 svTape = state->tp_tapenum[state->tapeRange];
1901                 svDummy = state->tp_dummy[state->tapeRange];
1902                 svRuns = state->tp_runs[state->tapeRange];
1903                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
1904                 {
1905                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
1906                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
1907                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
1908                 }
1909                 state->tp_tapenum[0] = svTape;
1910                 state->tp_dummy[0] = svDummy;
1911                 state->tp_runs[0] = svRuns;
1912         }
1913
1914         /*
1915          * Done.  Knuth says that the result is on TAPE[1], but since we exited
1916          * the loop without performing the last iteration of step D6, we have not
1917          * rearranged the tape unit assignment, and therefore the result is on
1918          * TAPE[T].  We need to do it this way so that we can freeze the final
1919          * output tape while rewinding it.      The last iteration of step D6 would be
1920          * a waste of cycles anyway...
1921          */
1922         state->result_tape = state->tp_tapenum[state->tapeRange];
1923         LogicalTapeFreeze(state->tapeset, state->result_tape);
1924         state->status = TSS_SORTEDONTAPE;
1925 }
1926
1927 /*
1928  * Merge one run from each input tape, except ones with dummy runs.
1929  *
1930  * This is the inner loop of Algorithm D step D5.  We know that the
1931  * output tape is TAPE[T].
1932  */
1933 static void
1934 mergeonerun(Tuplesortstate *state)
1935 {
1936         int                     destTape = state->tp_tapenum[state->tapeRange];
1937         int                     srcTape;
1938         int                     tupIndex;
1939         SortTuple  *tup;
1940         long            priorAvail,
1941                                 spaceFreed;
1942
1943         /*
1944          * Start the merge by loading one tuple from each active source tape into
1945          * the heap.  We can also decrease the input run/dummy run counts.
1946          */
1947         beginmerge(state);
1948
1949         /*
1950          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
1951          * out, and replacing it with next tuple from same tape (if there is
1952          * another one).
1953          */
1954         while (state->memtupcount > 0)
1955         {
1956                 /* write the tuple to destTape */
1957                 priorAvail = state->availMem;
1958                 srcTape = state->memtuples[0].tupindex;
1959                 WRITETUP(state, destTape, &state->memtuples[0]);
1960                 /* writetup adjusted total free space, now fix per-tape space */
1961                 spaceFreed = state->availMem - priorAvail;
1962                 state->mergeavailmem[srcTape] += spaceFreed;
1963                 /* compact the heap */
1964                 tuplesort_heap_siftup(state, false);
1965                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1966                 {
1967                         /* out of preloaded data on this tape, try to read more */
1968                         mergepreread(state);
1969                         /* if still no data, we've reached end of run on this tape */
1970                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1971                                 continue;
1972                 }
1973                 /* pull next preread tuple from list, insert in heap */
1974                 tup = &state->memtuples[tupIndex];
1975                 state->mergenext[srcTape] = tup->tupindex;
1976                 if (state->mergenext[srcTape] == 0)
1977                         state->mergelast[srcTape] = 0;
1978                 tuplesort_heap_insert(state, tup, srcTape, false);
1979                 /* put the now-unused memtuples entry on the freelist */
1980                 tup->tupindex = state->mergefreelist;
1981                 state->mergefreelist = tupIndex;
1982                 state->mergeavailslots[srcTape]++;
1983         }
1984
1985         /*
1986          * When the heap empties, we're done.  Write an end-of-run marker on the
1987          * output tape, and increment its count of real runs.
1988          */
1989         markrunend(state, destTape);
1990         state->tp_runs[state->tapeRange]++;
1991
1992 #ifdef TRACE_SORT
1993         if (trace_sort)
1994                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
1995                          pg_rusage_show(&state->ru_start));
1996 #endif
1997 }
1998
1999 /*
2000  * beginmerge - initialize for a merge pass
2001  *
2002  * We decrease the counts of real and dummy runs for each tape, and mark
2003  * which tapes contain active input runs in mergeactive[].      Then, load
2004  * as many tuples as we can from each active input tape, and finally
2005  * fill the merge heap with the first tuple from each active tape.
2006  */
2007 static void
2008 beginmerge(Tuplesortstate *state)
2009 {
2010         int                     activeTapes;
2011         int                     tapenum;
2012         int                     srcTape;
2013         int                     slotsPerTape;
2014         long            spacePerTape;
2015
2016         /* Heap should be empty here */
2017         Assert(state->memtupcount == 0);
2018
2019         /* Adjust run counts and mark the active tapes */
2020         memset(state->mergeactive, 0,
2021                    state->maxTapes * sizeof(*state->mergeactive));
2022         activeTapes = 0;
2023         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2024         {
2025                 if (state->tp_dummy[tapenum] > 0)
2026                         state->tp_dummy[tapenum]--;
2027                 else
2028                 {
2029                         Assert(state->tp_runs[tapenum] > 0);
2030                         state->tp_runs[tapenum]--;
2031                         srcTape = state->tp_tapenum[tapenum];
2032                         state->mergeactive[srcTape] = true;
2033                         activeTapes++;
2034                 }
2035         }
2036         state->activeTapes = activeTapes;
2037
2038         /* Clear merge-pass state variables */
2039         memset(state->mergenext, 0,
2040                    state->maxTapes * sizeof(*state->mergenext));
2041         memset(state->mergelast, 0,
2042                    state->maxTapes * sizeof(*state->mergelast));
2043         state->mergefreelist = 0;       /* nothing in the freelist */
2044         state->mergefirstfree = activeTapes;            /* 1st slot avail for preread */
2045
2046         /*
2047          * Initialize space allocation to let each active input tape have an equal
2048          * share of preread space.
2049          */
2050         Assert(activeTapes > 0);
2051         slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2052         Assert(slotsPerTape > 0);
2053         spacePerTape = state->availMem / activeTapes;
2054         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2055         {
2056                 if (state->mergeactive[srcTape])
2057                 {
2058                         state->mergeavailslots[srcTape] = slotsPerTape;
2059                         state->mergeavailmem[srcTape] = spacePerTape;
2060                 }
2061         }
2062
2063         /*
2064          * Preread as many tuples as possible (and at least one) from each active
2065          * tape
2066          */
2067         mergepreread(state);
2068
2069         /* Load the merge heap with the first tuple from each input tape */
2070         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2071         {
2072                 int                     tupIndex = state->mergenext[srcTape];
2073                 SortTuple  *tup;
2074
2075                 if (tupIndex)
2076                 {
2077                         tup = &state->memtuples[tupIndex];
2078                         state->mergenext[srcTape] = tup->tupindex;
2079                         if (state->mergenext[srcTape] == 0)
2080                                 state->mergelast[srcTape] = 0;
2081                         tuplesort_heap_insert(state, tup, srcTape, false);
2082                         /* put the now-unused memtuples entry on the freelist */
2083                         tup->tupindex = state->mergefreelist;
2084                         state->mergefreelist = tupIndex;
2085                         state->mergeavailslots[srcTape]++;
2086                 }
2087         }
2088 }
2089
2090 /*
2091  * mergepreread - load tuples from merge input tapes
2092  *
2093  * This routine exists to improve sequentiality of reads during a merge pass,
2094  * as explained in the header comments of this file.  Load tuples from each
2095  * active source tape until the tape's run is exhausted or it has used up
2096  * its fair share of available memory.  In any case, we guarantee that there
2097  * is at least one preread tuple available from each unexhausted input tape.
2098  *
2099  * We invoke this routine at the start of a merge pass for initial load,
2100  * and then whenever any tape's preread data runs out.  Note that we load
2101  * as much data as possible from all tapes, not just the one that ran out.
2102  * This is because logtape.c works best with a usage pattern that alternates
2103  * between reading a lot of data and writing a lot of data, so whenever we
2104  * are forced to read, we should fill working memory completely.
2105  *
2106  * In FINALMERGE state, we *don't* use this routine, but instead just preread
2107  * from the single tape that ran dry.  There's no read/write alternation in
2108  * that state and so no point in scanning through all the tapes to fix one.
2109  * (Moreover, there may be quite a lot of inactive tapes in that state, since
2110  * we might have had many fewer runs than tapes.  In a regular tape-to-tape
2111  * merge we can expect most of the tapes to be active.)
2112  */
2113 static void
2114 mergepreread(Tuplesortstate *state)
2115 {
2116         int                     srcTape;
2117
2118         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2119                 mergeprereadone(state, srcTape);
2120 }
2121
2122 /*
2123  * mergeprereadone - load tuples from one merge input tape
2124  *
2125  * Read tuples from the specified tape until it has used up its free memory
2126  * or array slots; but ensure that we have at least one tuple, if any are
2127  * to be had.
2128  */
2129 static void
2130 mergeprereadone(Tuplesortstate *state, int srcTape)
2131 {
2132         unsigned int tuplen;
2133         SortTuple       stup;
2134         int                     tupIndex;
2135         long            priorAvail,
2136                                 spaceUsed;
2137
2138         if (!state->mergeactive[srcTape])
2139                 return;                                 /* tape's run is already exhausted */
2140         priorAvail = state->availMem;
2141         state->availMem = state->mergeavailmem[srcTape];
2142         while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) ||
2143                    state->mergenext[srcTape] == 0)
2144         {
2145                 /* read next tuple, if any */
2146                 if ((tuplen = getlen(state, srcTape, true)) == 0)
2147                 {
2148                         state->mergeactive[srcTape] = false;
2149                         break;
2150                 }
2151                 READTUP(state, &stup, srcTape, tuplen);
2152                 /* find a free slot in memtuples[] for it */
2153                 tupIndex = state->mergefreelist;
2154                 if (tupIndex)
2155                         state->mergefreelist = state->memtuples[tupIndex].tupindex;
2156                 else
2157                 {
2158                         tupIndex = state->mergefirstfree++;
2159                         Assert(tupIndex < state->memtupsize);
2160                 }
2161                 state->mergeavailslots[srcTape]--;
2162                 /* store tuple, append to list for its tape */
2163                 stup.tupindex = 0;
2164                 state->memtuples[tupIndex] = stup;
2165                 if (state->mergelast[srcTape])
2166                         state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
2167                 else
2168                         state->mergenext[srcTape] = tupIndex;
2169                 state->mergelast[srcTape] = tupIndex;
2170         }
2171         /* update per-tape and global availmem counts */
2172         spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
2173         state->mergeavailmem[srcTape] = state->availMem;
2174         state->availMem = priorAvail - spaceUsed;
2175 }
2176
2177 /*
2178  * dumptuples - remove tuples from heap and write to tape
2179  *
2180  * This is used during initial-run building, but not during merging.
2181  *
2182  * When alltuples = false, dump only enough tuples to get under the
2183  * availMem limit (and leave at least one tuple in the heap in any case,
2184  * since puttuple assumes it always has a tuple to compare to).  We also
2185  * insist there be at least one free slot in the memtuples[] array.
2186  *
2187  * When alltuples = true, dump everything currently in memory.
2188  * (This case is only used at end of input data.)
2189  *
2190  * If we empty the heap, close out the current run and return (this should
2191  * only happen at end of input data).  If we see that the tuple run number
2192  * at the top of the heap has changed, start a new run.
2193  */
2194 static void
2195 dumptuples(Tuplesortstate *state, bool alltuples)
2196 {
2197         while (alltuples ||
2198                    (LACKMEM(state) && state->memtupcount > 1) ||
2199                    state->memtupcount >= state->memtupsize)
2200         {
2201                 /*
2202                  * Dump the heap's frontmost entry, and sift up to remove it from the
2203                  * heap.
2204                  */
2205                 Assert(state->memtupcount > 0);
2206                 WRITETUP(state, state->tp_tapenum[state->destTape],
2207                                  &state->memtuples[0]);
2208                 tuplesort_heap_siftup(state, true);
2209
2210                 /*
2211                  * If the heap is empty *or* top run number has changed, we've
2212                  * finished the current run.
2213                  */
2214                 if (state->memtupcount == 0 ||
2215                         state->currentRun != state->memtuples[0].tupindex)
2216                 {
2217                         markrunend(state, state->tp_tapenum[state->destTape]);
2218                         state->currentRun++;
2219                         state->tp_runs[state->destTape]++;
2220                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2221
2222 #ifdef TRACE_SORT
2223                         if (trace_sort)
2224                                 elog(LOG, "finished writing%s run %d to tape %d: %s",
2225                                          (state->memtupcount == 0) ? " final" : "",
2226                                          state->currentRun, state->destTape,
2227                                          pg_rusage_show(&state->ru_start));
2228 #endif
2229
2230                         /*
2231                          * Done if heap is empty, else prepare for new run.
2232                          */
2233                         if (state->memtupcount == 0)
2234                                 break;
2235                         Assert(state->currentRun == state->memtuples[0].tupindex);
2236                         selectnewtape(state);
2237                 }
2238         }
2239 }
2240
2241 /*
2242  * tuplesort_rescan             - rewind and replay the scan
2243  */
2244 void
2245 tuplesort_rescan(Tuplesortstate *state)
2246 {
2247         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2248
2249         Assert(state->randomAccess);
2250
2251         switch (state->status)
2252         {
2253                 case TSS_SORTEDINMEM:
2254                         state->current = 0;
2255                         state->eof_reached = false;
2256                         state->markpos_offset = 0;
2257                         state->markpos_eof = false;
2258                         break;
2259                 case TSS_SORTEDONTAPE:
2260                         LogicalTapeRewind(state->tapeset,
2261                                                           state->result_tape,
2262                                                           false);
2263                         state->eof_reached = false;
2264                         state->markpos_block = 0L;
2265                         state->markpos_offset = 0;
2266                         state->markpos_eof = false;
2267                         break;
2268                 default:
2269                         elog(ERROR, "invalid tuplesort state");
2270                         break;
2271         }
2272
2273         MemoryContextSwitchTo(oldcontext);
2274 }
2275
2276 /*
2277  * tuplesort_markpos    - saves current position in the merged sort file
2278  */
2279 void
2280 tuplesort_markpos(Tuplesortstate *state)
2281 {
2282         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2283
2284         Assert(state->randomAccess);
2285
2286         switch (state->status)
2287         {
2288                 case TSS_SORTEDINMEM:
2289                         state->markpos_offset = state->current;
2290                         state->markpos_eof = state->eof_reached;
2291                         break;
2292                 case TSS_SORTEDONTAPE:
2293                         LogicalTapeTell(state->tapeset,
2294                                                         state->result_tape,
2295                                                         &state->markpos_block,
2296                                                         &state->markpos_offset);
2297                         state->markpos_eof = state->eof_reached;
2298                         break;
2299                 default:
2300                         elog(ERROR, "invalid tuplesort state");
2301                         break;
2302         }
2303
2304         MemoryContextSwitchTo(oldcontext);
2305 }
2306
2307 /*
2308  * tuplesort_restorepos - restores current position in merged sort file to
2309  *                                                last saved position
2310  */
2311 void
2312 tuplesort_restorepos(Tuplesortstate *state)
2313 {
2314         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2315
2316         Assert(state->randomAccess);
2317
2318         switch (state->status)
2319         {
2320                 case TSS_SORTEDINMEM:
2321                         state->current = state->markpos_offset;
2322                         state->eof_reached = state->markpos_eof;
2323                         break;
2324                 case TSS_SORTEDONTAPE:
2325                         if (!LogicalTapeSeek(state->tapeset,
2326                                                                  state->result_tape,
2327                                                                  state->markpos_block,
2328                                                                  state->markpos_offset))
2329                                 elog(ERROR, "tuplesort_restorepos failed");
2330                         state->eof_reached = state->markpos_eof;
2331                         break;
2332                 default:
2333                         elog(ERROR, "invalid tuplesort state");
2334                         break;
2335         }
2336
2337         MemoryContextSwitchTo(oldcontext);
2338 }
2339
2340 /*
2341  * tuplesort_get_stats - extract summary statistics
2342  *
2343  * This can be called after tuplesort_performsort() finishes to obtain
2344  * printable summary information about how the sort was performed.
2345  * spaceUsed is measured in kilobytes.
2346  */
2347 void
2348 tuplesort_get_stats(Tuplesortstate *state,
2349                                         const char **sortMethod,
2350                                         const char **spaceType,
2351                                         long *spaceUsed)
2352 {
2353         /*
2354          * Note: it might seem we should provide both memory and disk usage for a
2355          * disk-based sort.  However, the current code doesn't track memory space
2356          * accurately once we have begun to return tuples to the caller (since we
2357          * don't account for pfree's the caller is expected to do), so we cannot
2358          * rely on availMem in a disk sort.  This does not seem worth the overhead
2359          * to fix.      Is it worth creating an API for the memory context code to
2360          * tell us how much is actually used in sortcontext?
2361          */
2362         if (state->tapeset)
2363         {
2364                 *spaceType = "Disk";
2365                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
2366         }
2367         else
2368         {
2369                 *spaceType = "Memory";
2370                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
2371         }
2372
2373         switch (state->status)
2374         {
2375                 case TSS_SORTEDINMEM:
2376                         if (state->boundUsed)
2377                                 *sortMethod = "top-N heapsort";
2378                         else
2379                                 *sortMethod = "quicksort";
2380                         break;
2381                 case TSS_SORTEDONTAPE:
2382                         *sortMethod = "external sort";
2383                         break;
2384                 case TSS_FINALMERGE:
2385                         *sortMethod = "external merge";
2386                         break;
2387                 default:
2388                         *sortMethod = "still in progress";
2389                         break;
2390         }
2391 }
2392
2393
2394 /*
2395  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2396  *
2397  * Compare two SortTuples.      If checkIndex is true, use the tuple index
2398  * as the front of the sort key; otherwise, no.
2399  */
2400
2401 #define HEAPCOMPARE(tup1,tup2) \
2402         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2403          ((tup1)->tupindex) - ((tup2)->tupindex) : \
2404          COMPARETUP(state, tup1, tup2))
2405
2406 /*
2407  * Convert the existing unordered array of SortTuples to a bounded heap,
2408  * discarding all but the smallest "state->bound" tuples.
2409  *
2410  * When working with a bounded heap, we want to keep the largest entry
2411  * at the root (array entry zero), instead of the smallest as in the normal
2412  * sort case.  This allows us to discard the largest entry cheaply.
2413  * Therefore, we temporarily reverse the sort direction.
2414  *
2415  * We assume that all entries in a bounded heap will always have tupindex
2416  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2417  * the direction of comparison for tupindexes.
2418  */
2419 static void
2420 make_bounded_heap(Tuplesortstate *state)
2421 {
2422         int                     tupcount = state->memtupcount;
2423         int                     i;
2424
2425         Assert(state->status == TSS_INITIAL);
2426         Assert(state->bounded);
2427         Assert(tupcount >= state->bound);
2428
2429         /* Reverse sort direction so largest entry will be at root */
2430         REVERSEDIRECTION(state);
2431
2432         state->memtupcount = 0;         /* make the heap empty */
2433         for (i = 0; i < tupcount; i++)
2434         {
2435                 if (state->memtupcount >= state->bound &&
2436                   COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2437                 {
2438                         /* New tuple would just get thrown out, so skip it */
2439                         free_sort_tuple(state, &state->memtuples[i]);
2440                         CHECK_FOR_INTERRUPTS();
2441                 }
2442                 else
2443                 {
2444                         /* Insert next tuple into heap */
2445                         /* Must copy source tuple to avoid possible overwrite */
2446                         SortTuple       stup = state->memtuples[i];
2447
2448                         tuplesort_heap_insert(state, &stup, 0, false);
2449
2450                         /* If heap too full, discard largest entry */
2451                         if (state->memtupcount > state->bound)
2452                         {
2453                                 free_sort_tuple(state, &state->memtuples[0]);
2454                                 tuplesort_heap_siftup(state, false);
2455                         }
2456                 }
2457         }
2458
2459         Assert(state->memtupcount == state->bound);
2460         state->status = TSS_BOUNDED;
2461 }
2462
2463 /*
2464  * Convert the bounded heap to a properly-sorted array
2465  */
2466 static void
2467 sort_bounded_heap(Tuplesortstate *state)
2468 {
2469         int                     tupcount = state->memtupcount;
2470
2471         Assert(state->status == TSS_BOUNDED);
2472         Assert(state->bounded);
2473         Assert(tupcount == state->bound);
2474
2475         /*
2476          * We can unheapify in place because each sift-up will remove the largest
2477          * entry, which we can promptly store in the newly freed slot at the end.
2478          * Once we're down to a single-entry heap, we're done.
2479          */
2480         while (state->memtupcount > 1)
2481         {
2482                 SortTuple       stup = state->memtuples[0];
2483
2484                 /* this sifts-up the next-largest entry and decreases memtupcount */
2485                 tuplesort_heap_siftup(state, false);
2486                 state->memtuples[state->memtupcount] = stup;
2487         }
2488         state->memtupcount = tupcount;
2489
2490         /*
2491          * Reverse sort direction back to the original state.  This is not
2492          * actually necessary but seems like a good idea for tidiness.
2493          */
2494         REVERSEDIRECTION(state);
2495
2496         state->status = TSS_SORTEDINMEM;
2497         state->boundUsed = true;
2498 }
2499
2500 /*
2501  * Insert a new tuple into an empty or existing heap, maintaining the
2502  * heap invariant.      Caller is responsible for ensuring there's room.
2503  *
2504  * Note: we assume *tuple is a temporary variable that can be scribbled on.
2505  * For some callers, tuple actually points to a memtuples[] entry above the
2506  * end of the heap.  This is safe as long as it's not immediately adjacent
2507  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2508  * is, it might get overwritten before being moved into the heap!
2509  */
2510 static void
2511 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
2512                                           int tupleindex, bool checkIndex)
2513 {
2514         SortTuple  *memtuples;
2515         int                     j;
2516
2517         /*
2518          * Save the tupleindex --- see notes above about writing on *tuple. It's a
2519          * historical artifact that tupleindex is passed as a separate argument
2520          * and not in *tuple, but it's notationally convenient so let's leave it
2521          * that way.
2522          */
2523         tuple->tupindex = tupleindex;
2524
2525         memtuples = state->memtuples;
2526         Assert(state->memtupcount < state->memtupsize);
2527
2528         CHECK_FOR_INTERRUPTS();
2529
2530         /*
2531          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2532          * using 1-based array indexes, not 0-based.
2533          */
2534         j = state->memtupcount++;
2535         while (j > 0)
2536         {
2537                 int                     i = (j - 1) >> 1;
2538
2539                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
2540                         break;
2541                 memtuples[j] = memtuples[i];
2542                 j = i;
2543         }
2544         memtuples[j] = *tuple;
2545 }
2546
2547 /*
2548  * The tuple at state->memtuples[0] has been removed from the heap.
2549  * Decrement memtupcount, and sift up to maintain the heap invariant.
2550  */
2551 static void
2552 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
2553 {
2554         SortTuple  *memtuples = state->memtuples;
2555         SortTuple  *tuple;
2556         int                     i,
2557                                 n;
2558
2559         if (--state->memtupcount <= 0)
2560                 return;
2561
2562         CHECK_FOR_INTERRUPTS();
2563
2564         n = state->memtupcount;
2565         tuple = &memtuples[n];          /* tuple that must be reinserted */
2566         i = 0;                                          /* i is where the "hole" is */
2567         for (;;)
2568         {
2569                 int                     j = 2 * i + 1;
2570
2571                 if (j >= n)
2572                         break;
2573                 if (j + 1 < n &&
2574                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
2575                         j++;
2576                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
2577                         break;
2578                 memtuples[i] = memtuples[j];
2579                 i = j;
2580         }
2581         memtuples[i] = *tuple;
2582 }
2583
2584
2585 /*
2586  * Tape interface routines
2587  */
2588
2589 static unsigned int
2590 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
2591 {
2592         unsigned int len;
2593
2594         if (LogicalTapeRead(state->tapeset, tapenum,
2595                                                 &len, sizeof(len)) != sizeof(len))
2596                 elog(ERROR, "unexpected end of tape");
2597         if (len == 0 && !eofOK)
2598                 elog(ERROR, "unexpected end of data");
2599         return len;
2600 }
2601
2602 static void
2603 markrunend(Tuplesortstate *state, int tapenum)
2604 {
2605         unsigned int len = 0;
2606
2607         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
2608 }
2609
2610
2611 /*
2612  * Inline-able copy of FunctionCall2Coll() to save some cycles in sorting.
2613  */
2614 static inline Datum
2615 myFunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
2616 {
2617         FunctionCallInfoData fcinfo;
2618         Datum           result;
2619
2620         InitFunctionCallInfoData(fcinfo, flinfo, 2, collation, NULL, NULL);
2621
2622         fcinfo.arg[0] = arg1;
2623         fcinfo.arg[1] = arg2;
2624         fcinfo.argnull[0] = false;
2625         fcinfo.argnull[1] = false;
2626
2627         result = FunctionCallInvoke(&fcinfo);
2628
2629         /* Check for null result, since caller is clearly not expecting one */
2630         if (fcinfo.isnull)
2631                 elog(ERROR, "function %u returned NULL", fcinfo.flinfo->fn_oid);
2632
2633         return result;
2634 }
2635
2636 /*
2637  * Apply a sort function (by now converted to fmgr lookup form)
2638  * and return a 3-way comparison result.  This takes care of handling
2639  * reverse-sort and NULLs-ordering properly.  We assume that DESC and
2640  * NULLS_FIRST options are encoded in sk_flags the same way btree does it.
2641  */
2642 static inline int32
2643 inlineApplySortFunction(FmgrInfo *sortFunction, int sk_flags, Oid collation,
2644                                                 Datum datum1, bool isNull1,
2645                                                 Datum datum2, bool isNull2)
2646 {
2647         int32           compare;
2648
2649         if (isNull1)
2650         {
2651                 if (isNull2)
2652                         compare = 0;            /* NULL "=" NULL */
2653                 else if (sk_flags & SK_BT_NULLS_FIRST)
2654                         compare = -1;           /* NULL "<" NOT_NULL */
2655                 else
2656                         compare = 1;            /* NULL ">" NOT_NULL */
2657         }
2658         else if (isNull2)
2659         {
2660                 if (sk_flags & SK_BT_NULLS_FIRST)
2661                         compare = 1;            /* NOT_NULL ">" NULL */
2662                 else
2663                         compare = -1;           /* NOT_NULL "<" NULL */
2664         }
2665         else
2666         {
2667                 compare = DatumGetInt32(myFunctionCall2Coll(sortFunction, collation,
2668                                                                                                         datum1, datum2));
2669
2670                 if (sk_flags & SK_BT_DESC)
2671                         compare = -compare;
2672         }
2673
2674         return compare;
2675 }
2676
2677
2678 /*
2679  * Routines specialized for HeapTuple (actually MinimalTuple) case
2680  */
2681
2682 static int
2683 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
2684 {
2685         SortSupport     sortKey = state->sortKeys;
2686         HeapTupleData ltup;
2687         HeapTupleData rtup;
2688         TupleDesc       tupDesc;
2689         int                     nkey;
2690         int32           compare;
2691
2692         /* Compare the leading sort key */
2693         compare = ApplySortComparator(a->datum1, a->isnull1,
2694                                                                   b->datum1, b->isnull1,
2695                                                                   sortKey);
2696         if (compare != 0)
2697                 return compare;
2698
2699         /* Compare additional sort keys */
2700         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
2701         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
2702         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
2703         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
2704         tupDesc = state->tupDesc;
2705         sortKey++;
2706         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
2707         {
2708                 AttrNumber      attno = sortKey->ssup_attno;
2709                 Datum           datum1,
2710                                         datum2;
2711                 bool            isnull1,
2712                                         isnull2;
2713
2714                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
2715                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
2716
2717                 compare = ApplySortComparator(datum1, isnull1,
2718                                                                           datum2, isnull2,
2719                                                                           sortKey);
2720                 if (compare != 0)
2721                         return compare;
2722         }
2723
2724         return 0;
2725 }
2726
2727 static void
2728 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
2729 {
2730         /*
2731          * We expect the passed "tup" to be a TupleTableSlot, and form a
2732          * MinimalTuple using the exported interface for that.
2733          */
2734         TupleTableSlot *slot = (TupleTableSlot *) tup;
2735         MinimalTuple tuple;
2736         HeapTupleData htup;
2737
2738         /* copy the tuple into sort storage */
2739         tuple = ExecCopySlotMinimalTuple(slot);
2740         stup->tuple = (void *) tuple;
2741         USEMEM(state, GetMemoryChunkSpace(tuple));
2742         /* set up first-column key value */
2743         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
2744         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
2745         stup->datum1 = heap_getattr(&htup,
2746                                                                 state->sortKeys[0].ssup_attno,
2747                                                                 state->tupDesc,
2748                                                                 &stup->isnull1);
2749 }
2750
2751 static void
2752 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
2753 {
2754         MinimalTuple tuple = (MinimalTuple) stup->tuple;
2755
2756         /* the part of the MinimalTuple we'll write: */
2757         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
2758         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
2759
2760         /* total on-disk footprint: */
2761         unsigned int tuplen = tupbodylen + sizeof(int);
2762
2763         LogicalTapeWrite(state->tapeset, tapenum,
2764                                          (void *) &tuplen, sizeof(tuplen));
2765         LogicalTapeWrite(state->tapeset, tapenum,
2766                                          (void *) tupbody, tupbodylen);
2767         if (state->randomAccess)        /* need trailing length word? */
2768                 LogicalTapeWrite(state->tapeset, tapenum,
2769                                                  (void *) &tuplen, sizeof(tuplen));
2770
2771         FREEMEM(state, GetMemoryChunkSpace(tuple));
2772         heap_free_minimal_tuple(tuple);
2773 }
2774
2775 static void
2776 readtup_heap(Tuplesortstate *state, SortTuple *stup,
2777                          int tapenum, unsigned int len)
2778 {
2779         unsigned int tupbodylen = len - sizeof(int);
2780         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
2781         MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
2782         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
2783         HeapTupleData htup;
2784
2785         USEMEM(state, GetMemoryChunkSpace(tuple));
2786         /* read in the tuple proper */
2787         tuple->t_len = tuplen;
2788         LogicalTapeReadExact(state->tapeset, tapenum,
2789                                                  tupbody, tupbodylen);
2790         if (state->randomAccess)        /* need trailing length word? */
2791                 LogicalTapeReadExact(state->tapeset, tapenum,
2792                                                          &tuplen, sizeof(tuplen));
2793         stup->tuple = (void *) tuple;
2794         /* set up first-column key value */
2795         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
2796         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
2797         stup->datum1 = heap_getattr(&htup,
2798                                                                 state->sortKeys[0].ssup_attno,
2799                                                                 state->tupDesc,
2800                                                                 &stup->isnull1);
2801 }
2802
2803 static void
2804 reversedirection_heap(Tuplesortstate *state)
2805 {
2806         SortSupport     sortKey = state->sortKeys;
2807         int                     nkey;
2808
2809         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
2810         {
2811                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
2812                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
2813         }
2814 }
2815
2816
2817 /*
2818  * Routines specialized for the CLUSTER case (HeapTuple data, with
2819  * comparisons per a btree index definition)
2820  */
2821
2822 static int
2823 comparetup_cluster(const SortTuple *a, const SortTuple *b,
2824                                    Tuplesortstate *state)
2825 {
2826         ScanKey         scanKey = state->indexScanKey;
2827         HeapTuple       ltup;
2828         HeapTuple       rtup;
2829         TupleDesc       tupDesc;
2830         int                     nkey;
2831         int32           compare;
2832
2833         /* Compare the leading sort key, if it's simple */
2834         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2835         {
2836                 compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
2837                                                                                   scanKey->sk_collation,
2838                                                                                   a->datum1, a->isnull1,
2839                                                                                   b->datum1, b->isnull1);
2840                 if (compare != 0 || state->nKeys == 1)
2841                         return compare;
2842                 /* Compare additional columns the hard way */
2843                 scanKey++;
2844                 nkey = 1;
2845         }
2846         else
2847         {
2848                 /* Must compare all keys the hard way */
2849                 nkey = 0;
2850         }
2851
2852         /* Compare additional sort keys */
2853         ltup = (HeapTuple) a->tuple;
2854         rtup = (HeapTuple) b->tuple;
2855
2856         if (state->indexInfo->ii_Expressions == NULL)
2857         {
2858                 /* If not expression index, just compare the proper heap attrs */
2859                 tupDesc = state->tupDesc;
2860
2861                 for (; nkey < state->nKeys; nkey++, scanKey++)
2862                 {
2863                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
2864                         Datum           datum1,
2865                                                 datum2;
2866                         bool            isnull1,
2867                                                 isnull2;
2868
2869                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
2870                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
2871
2872                         compare = inlineApplySortFunction(&scanKey->sk_func,
2873                                                                                           scanKey->sk_flags,
2874                                                                                           scanKey->sk_collation,
2875                                                                                           datum1, isnull1,
2876                                                                                           datum2, isnull2);
2877                         if (compare != 0)
2878                                 return compare;
2879                 }
2880         }
2881         else
2882         {
2883                 /*
2884                  * In the expression index case, compute the whole index tuple and
2885                  * then compare values.  It would perhaps be faster to compute only as
2886                  * many columns as we need to compare, but that would require
2887                  * duplicating all the logic in FormIndexDatum.
2888                  */
2889                 Datum           l_index_values[INDEX_MAX_KEYS];
2890                 bool            l_index_isnull[INDEX_MAX_KEYS];
2891                 Datum           r_index_values[INDEX_MAX_KEYS];
2892                 bool            r_index_isnull[INDEX_MAX_KEYS];
2893                 TupleTableSlot *ecxt_scantuple;
2894
2895                 /* Reset context each time to prevent memory leakage */
2896                 ResetPerTupleExprContext(state->estate);
2897
2898                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
2899
2900                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
2901                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
2902                                            l_index_values, l_index_isnull);
2903
2904                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
2905                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
2906                                            r_index_values, r_index_isnull);
2907
2908                 for (; nkey < state->nKeys; nkey++, scanKey++)
2909                 {
2910                         compare = inlineApplySortFunction(&scanKey->sk_func,
2911                                                                                           scanKey->sk_flags,
2912                                                                                           scanKey->sk_collation,
2913                                                                                           l_index_values[nkey],
2914                                                                                           l_index_isnull[nkey],
2915                                                                                           r_index_values[nkey],
2916                                                                                           r_index_isnull[nkey]);
2917                         if (compare != 0)
2918                                 return compare;
2919                 }
2920         }
2921
2922         return 0;
2923 }
2924
2925 static void
2926 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
2927 {
2928         HeapTuple       tuple = (HeapTuple) tup;
2929
2930         /* copy the tuple into sort storage */
2931         tuple = heap_copytuple(tuple);
2932         stup->tuple = (void *) tuple;
2933         USEMEM(state, GetMemoryChunkSpace(tuple));
2934         /* set up first-column key value, if it's a simple column */
2935         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2936                 stup->datum1 = heap_getattr(tuple,
2937                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
2938                                                                         state->tupDesc,
2939                                                                         &stup->isnull1);
2940 }
2941
2942 static void
2943 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
2944 {
2945         HeapTuple       tuple = (HeapTuple) stup->tuple;
2946         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
2947
2948         /* We need to store t_self, but not other fields of HeapTupleData */
2949         LogicalTapeWrite(state->tapeset, tapenum,
2950                                          &tuplen, sizeof(tuplen));
2951         LogicalTapeWrite(state->tapeset, tapenum,
2952                                          &tuple->t_self, sizeof(ItemPointerData));
2953         LogicalTapeWrite(state->tapeset, tapenum,
2954                                          tuple->t_data, tuple->t_len);
2955         if (state->randomAccess)        /* need trailing length word? */
2956                 LogicalTapeWrite(state->tapeset, tapenum,
2957                                                  &tuplen, sizeof(tuplen));
2958
2959         FREEMEM(state, GetMemoryChunkSpace(tuple));
2960         heap_freetuple(tuple);
2961 }
2962
2963 static void
2964 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
2965                                 int tapenum, unsigned int tuplen)
2966 {
2967         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
2968         HeapTuple       tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE);
2969
2970         USEMEM(state, GetMemoryChunkSpace(tuple));
2971         /* Reconstruct the HeapTupleData header */
2972         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
2973         tuple->t_len = t_len;
2974         LogicalTapeReadExact(state->tapeset, tapenum,
2975                                                  &tuple->t_self, sizeof(ItemPointerData));
2976         /* We don't currently bother to reconstruct t_tableOid */
2977         tuple->t_tableOid = InvalidOid;
2978         /* Read in the tuple body */
2979         LogicalTapeReadExact(state->tapeset, tapenum,
2980                                                  tuple->t_data, tuple->t_len);
2981         if (state->randomAccess)        /* need trailing length word? */
2982                 LogicalTapeReadExact(state->tapeset, tapenum,
2983                                                          &tuplen, sizeof(tuplen));
2984         stup->tuple = (void *) tuple;
2985         /* set up first-column key value, if it's a simple column */
2986         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
2987                 stup->datum1 = heap_getattr(tuple,
2988                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
2989                                                                         state->tupDesc,
2990                                                                         &stup->isnull1);
2991 }
2992
2993
2994 /*
2995  * Routines specialized for IndexTuple case
2996  *
2997  * The btree and hash cases require separate comparison functions, but the
2998  * IndexTuple representation is the same so the copy/write/read support
2999  * functions can be shared.
3000  */
3001
3002 static int
3003 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
3004                                            Tuplesortstate *state)
3005 {
3006         /*
3007          * This is similar to _bt_tuplecompare(), but we have already done the
3008          * index_getattr calls for the first column, and we need to keep track of
3009          * whether any null fields are present.  Also see the special treatment
3010          * for equal keys at the end.
3011          */
3012         ScanKey         scanKey = state->indexScanKey;
3013         IndexTuple      tuple1;
3014         IndexTuple      tuple2;
3015         int                     keysz;
3016         TupleDesc       tupDes;
3017         bool            equal_hasnull = false;
3018         int                     nkey;
3019         int32           compare;
3020
3021         /* Compare the leading sort key */
3022         compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
3023                                                                           scanKey->sk_collation,
3024                                                                           a->datum1, a->isnull1,
3025                                                                           b->datum1, b->isnull1);
3026         if (compare != 0)
3027                 return compare;
3028
3029         /* they are equal, so we only need to examine one null flag */
3030         if (a->isnull1)
3031                 equal_hasnull = true;
3032
3033         /* Compare additional sort keys */
3034         tuple1 = (IndexTuple) a->tuple;
3035         tuple2 = (IndexTuple) b->tuple;
3036         keysz = state->nKeys;
3037         tupDes = RelationGetDescr(state->indexRel);
3038         scanKey++;
3039         for (nkey = 2; nkey <= keysz; nkey++, scanKey++)
3040         {
3041                 Datum           datum1,
3042                                         datum2;
3043                 bool            isnull1,
3044                                         isnull2;
3045
3046                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
3047                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
3048
3049                 compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags,
3050                                                                                   scanKey->sk_collation,
3051                                                                                   datum1, isnull1,
3052                                                                                   datum2, isnull2);
3053                 if (compare != 0)
3054                         return compare;         /* done when we find unequal attributes */
3055
3056                 /* they are equal, so we only need to examine one null flag */
3057                 if (isnull1)
3058                         equal_hasnull = true;
3059         }
3060
3061         /*
3062          * If btree has asked us to enforce uniqueness, complain if two equal
3063          * tuples are detected (unless there was at least one NULL field).
3064          *
3065          * It is sufficient to make the test here, because if two tuples are equal
3066          * they *must* get compared at some stage of the sort --- otherwise the
3067          * sort algorithm wouldn't have checked whether one must appear before the
3068          * other.
3069          */
3070         if (state->enforceUnique && !equal_hasnull)
3071         {
3072                 Datum           values[INDEX_MAX_KEYS];
3073                 bool            isnull[INDEX_MAX_KEYS];
3074
3075                 /*
3076                  * Some rather brain-dead implementations of qsort (such as the one in QNX 4)
3077                  * will sometimes call the comparison routine to compare a value to itself,
3078                  * but we always use our own implementation, which does not.
3079                  */
3080                 Assert(tuple1 != tuple2);
3081
3082                 index_deform_tuple(tuple1, tupDes, values, isnull);
3083                 ereport(ERROR,
3084                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
3085                                  errmsg("could not create unique index \"%s\"",
3086                                                 RelationGetRelationName(state->indexRel)),
3087                                  errdetail("Key %s is duplicated.",
3088                                                    BuildIndexValueDescription(state->indexRel,
3089                                                                                                           values, isnull))));
3090         }
3091
3092         /*
3093          * If key values are equal, we sort on ItemPointer.  This does not affect
3094          * validity of the finished index, but it may be useful to have index scans
3095          * in physical order.
3096          */
3097         {
3098                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3099                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3100
3101                 if (blk1 != blk2)
3102                         return (blk1 < blk2) ? -1 : 1;
3103         }
3104         {
3105                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3106                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3107
3108                 if (pos1 != pos2)
3109                         return (pos1 < pos2) ? -1 : 1;
3110         }
3111
3112         return 0;
3113 }
3114
3115 static int
3116 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
3117                                           Tuplesortstate *state)
3118 {
3119         uint32          hash1;
3120         uint32          hash2;
3121         IndexTuple      tuple1;
3122         IndexTuple      tuple2;
3123
3124         /*
3125          * Fetch hash keys and mask off bits we don't want to sort by. We know
3126          * that the first column of the index tuple is the hash key.
3127          */
3128         Assert(!a->isnull1);
3129         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
3130         Assert(!b->isnull1);
3131         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
3132
3133         if (hash1 > hash2)
3134                 return 1;
3135         else if (hash1 < hash2)
3136                 return -1;
3137
3138         /*
3139          * If hash values are equal, we sort on ItemPointer.  This does not affect
3140          * validity of the finished index, but it may be useful to have index scans
3141          * in physical order.
3142          */
3143         tuple1 = (IndexTuple) a->tuple;
3144         tuple2 = (IndexTuple) b->tuple;
3145
3146         {
3147                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3148                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3149
3150                 if (blk1 != blk2)
3151                         return (blk1 < blk2) ? -1 : 1;
3152         }
3153         {
3154                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3155                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3156
3157                 if (pos1 != pos2)
3158                         return (pos1 < pos2) ? -1 : 1;
3159         }
3160
3161         return 0;
3162 }
3163
3164 static void
3165 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
3166 {
3167         IndexTuple      tuple = (IndexTuple) tup;
3168         unsigned int tuplen = IndexTupleSize(tuple);
3169         IndexTuple      newtuple;
3170
3171         /* copy the tuple into sort storage */
3172         newtuple = (IndexTuple) palloc(tuplen);
3173         memcpy(newtuple, tuple, tuplen);
3174         USEMEM(state, GetMemoryChunkSpace(newtuple));
3175         stup->tuple = (void *) newtuple;
3176         /* set up first-column key value */
3177         stup->datum1 = index_getattr(newtuple,
3178                                                                  1,
3179                                                                  RelationGetDescr(state->indexRel),
3180                                                                  &stup->isnull1);
3181 }
3182
3183 static void
3184 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
3185 {
3186         IndexTuple      tuple = (IndexTuple) stup->tuple;
3187         unsigned int tuplen;
3188
3189         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
3190         LogicalTapeWrite(state->tapeset, tapenum,
3191                                          (void *) &tuplen, sizeof(tuplen));
3192         LogicalTapeWrite(state->tapeset, tapenum,
3193                                          (void *) tuple, IndexTupleSize(tuple));
3194         if (state->randomAccess)        /* need trailing length word? */
3195                 LogicalTapeWrite(state->tapeset, tapenum,
3196                                                  (void *) &tuplen, sizeof(tuplen));
3197
3198         FREEMEM(state, GetMemoryChunkSpace(tuple));
3199         pfree(tuple);
3200 }
3201
3202 static void
3203 readtup_index(Tuplesortstate *state, SortTuple *stup,
3204                           int tapenum, unsigned int len)
3205 {
3206         unsigned int tuplen = len - sizeof(unsigned int);
3207         IndexTuple      tuple = (IndexTuple) palloc(tuplen);
3208
3209         USEMEM(state, GetMemoryChunkSpace(tuple));
3210         LogicalTapeReadExact(state->tapeset, tapenum,
3211                                                  tuple, tuplen);
3212         if (state->randomAccess)        /* need trailing length word? */
3213                 LogicalTapeReadExact(state->tapeset, tapenum,
3214                                                          &tuplen, sizeof(tuplen));
3215         stup->tuple = (void *) tuple;
3216         /* set up first-column key value */
3217         stup->datum1 = index_getattr(tuple,
3218                                                                  1,
3219                                                                  RelationGetDescr(state->indexRel),
3220                                                                  &stup->isnull1);
3221 }
3222
3223 static void
3224 reversedirection_index_btree(Tuplesortstate *state)
3225 {
3226         ScanKey         scanKey = state->indexScanKey;
3227         int                     nkey;
3228
3229         for (nkey = 0; nkey < state->nKeys; nkey++, scanKey++)
3230         {
3231                 scanKey->sk_flags ^= (SK_BT_DESC | SK_BT_NULLS_FIRST);
3232         }
3233 }
3234
3235 static void
3236 reversedirection_index_hash(Tuplesortstate *state)
3237 {
3238         /* We don't support reversing direction in a hash index sort */
3239         elog(ERROR, "reversedirection_index_hash is not implemented");
3240 }
3241
3242
3243 /*
3244  * Routines specialized for DatumTuple case
3245  */
3246
3247 static int
3248 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3249 {
3250         return ApplySortComparator(a->datum1, a->isnull1,
3251                                                            b->datum1, b->isnull1,
3252                                                            state->onlyKey);
3253 }
3254
3255 static void
3256 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
3257 {
3258         /* Not currently needed */
3259         elog(ERROR, "copytup_datum() should not be called");
3260 }
3261
3262 static void
3263 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
3264 {
3265         void       *waddr;
3266         unsigned int tuplen;
3267         unsigned int writtenlen;
3268
3269         if (stup->isnull1)
3270         {
3271                 waddr = NULL;
3272                 tuplen = 0;
3273         }
3274         else if (state->datumTypeByVal)
3275         {
3276                 waddr = &stup->datum1;
3277                 tuplen = sizeof(Datum);
3278         }
3279         else
3280         {
3281                 waddr = DatumGetPointer(stup->datum1);
3282                 tuplen = datumGetSize(stup->datum1, false, state->datumTypeLen);
3283                 Assert(tuplen != 0);
3284         }
3285
3286         writtenlen = tuplen + sizeof(unsigned int);
3287
3288         LogicalTapeWrite(state->tapeset, tapenum,
3289                                          (void *) &writtenlen, sizeof(writtenlen));
3290         LogicalTapeWrite(state->tapeset, tapenum,
3291                                          waddr, tuplen);
3292         if (state->randomAccess)        /* need trailing length word? */
3293                 LogicalTapeWrite(state->tapeset, tapenum,
3294                                                  (void *) &writtenlen, sizeof(writtenlen));
3295
3296         if (stup->tuple)
3297         {
3298                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3299                 pfree(stup->tuple);
3300         }
3301 }
3302
3303 static void
3304 readtup_datum(Tuplesortstate *state, SortTuple *stup,
3305                           int tapenum, unsigned int len)
3306 {
3307         unsigned int tuplen = len - sizeof(unsigned int);
3308
3309         if (tuplen == 0)
3310         {
3311                 /* it's NULL */
3312                 stup->datum1 = (Datum) 0;
3313                 stup->isnull1 = true;
3314                 stup->tuple = NULL;
3315         }
3316         else if (state->datumTypeByVal)
3317         {
3318                 Assert(tuplen == sizeof(Datum));
3319                 LogicalTapeReadExact(state->tapeset, tapenum,
3320                                                          &stup->datum1, tuplen);
3321                 stup->isnull1 = false;
3322                 stup->tuple = NULL;
3323         }
3324         else
3325         {
3326                 void       *raddr = palloc(tuplen);
3327
3328                 LogicalTapeReadExact(state->tapeset, tapenum,
3329                                                          raddr, tuplen);
3330                 stup->datum1 = PointerGetDatum(raddr);
3331                 stup->isnull1 = false;
3332                 stup->tuple = raddr;
3333                 USEMEM(state, GetMemoryChunkSpace(raddr));
3334         }
3335
3336         if (state->randomAccess)        /* need trailing length word? */
3337                 LogicalTapeReadExact(state->tapeset, tapenum,
3338                                                          &tuplen, sizeof(tuplen));
3339 }
3340
3341 static void
3342 reversedirection_datum(Tuplesortstate *state)
3343 {
3344         state->onlyKey->ssup_reverse = !state->onlyKey->ssup_reverse;
3345         state->onlyKey->ssup_nulls_first = !state->onlyKey->ssup_nulls_first;
3346 }
3347
3348 /*
3349  * Convenience routine to free a tuple previously loaded into sort memory
3350  */
3351 static void
3352 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
3353 {
3354         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3355         pfree(stup->tuple);
3356 }