]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Extend abbreviated key infrastructure to datum tuplesorts.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  We divide the input into sorted runs using replacement
15  * selection, in the form of a priority tree implemented as a heap
16  * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17  * merge, Knuth's Algorithm 5.4.2D.  The logical "tapes" used by Algorithm D
18  * are implemented by logtape.c, which avoids space wastage by recycling
19  * disk space as soon as each block is read from its "tape".
20  *
21  * We do not form the initial runs using Knuth's recommended replacement
22  * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23  * number of records in memory at all times.  Since we are dealing with
24  * tuples that may vary considerably in size, we want to be able to vary
25  * the number of records kept in memory to ensure full utilization of the
26  * allowed sort memory space.  So, we keep the tuples in a variable-size
27  * heap, with the next record to go out at the top of the heap.  Like
28  * Algorithm 5.4.1R, each record is stored with the run number that it
29  * must go into, and we use (run number, key) as the ordering key for the
30  * heap.  When the run number at the top of the heap changes, we know that
31  * no more records of the prior run are left in the heap.
32  *
33  * The approximate amount of memory allowed for any one sort operation
34  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
35  * we absorb tuples and simply store them in an unsorted array as long as
36  * we haven't exceeded workMem.  If we reach the end of the input without
37  * exceeding workMem, we sort the array using qsort() and subsequently return
38  * tuples just by scanning the tuple array sequentially.  If we do exceed
39  * workMem, we construct a heap using Algorithm H and begin to emit tuples
40  * into sorted runs in temporary tapes, emitting just enough tuples at each
41  * step to get back within the workMem limit.  Whenever the run number at
42  * the top of the heap changes, we begin a new run with a new output tape
43  * (selected per Algorithm D).  After the end of the input is reached,
44  * we dump out remaining tuples in memory into a final run (or two),
45  * then merge the runs using Algorithm D.
46  *
47  * When merging runs, we use a heap containing just the frontmost tuple from
48  * each source run; we repeatedly output the smallest tuple and insert the
49  * next tuple from its source tape (if any).  When the heap empties, the merge
50  * is complete.  The basic merge algorithm thus needs very little memory ---
51  * only M tuples for an M-way merge, and M is constrained to a small number.
52  * However, we can still make good use of our full workMem allocation by
53  * pre-reading additional tuples from each source tape.  Without prereading,
54  * our access pattern to the temporary file would be very erratic; on average
55  * we'd read one block from each of M source tapes during the same time that
56  * we're writing M blocks to the output tape, so there is no sequentiality of
57  * access at all, defeating the read-ahead methods used by most Unix kernels.
58  * Worse, the output tape gets written into a very random sequence of blocks
59  * of the temp file, ensuring that things will be even worse when it comes
60  * time to read that tape.  A straightforward merge pass thus ends up doing a
61  * lot of waiting for disk seeks.  We can improve matters by prereading from
62  * each source tape sequentially, loading about workMem/M bytes from each tape
63  * in turn.  Then we run the merge algorithm, writing but not reading until
64  * one of the preloaded tuple series runs out.  Then we switch back to preread
65  * mode, fill memory again, and repeat.  This approach helps to localize both
66  * read and write accesses.
67  *
68  * When the caller requests random access to the sort result, we form
69  * the final sorted run on a logical tape which is then "frozen", so
70  * that we can access it randomly.  When the caller does not need random
71  * access, we return from tuplesort_performsort() as soon as we are down
72  * to one run per logical tape.  The final merge is then performed
73  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74  * saves one cycle of writing all the data out to disk and reading it in.
75  *
76  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
79  * tape drives are expensive beasts, and in particular that there will always
80  * be many more runs than tape drives.  In our implementation a "tape drive"
81  * doesn't cost much more than a few Kb of memory buffers, so we can afford
82  * to have lots of them.  In particular, if we can have as many tape drives
83  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
84  * code we determine the number of tapes M on the basis of workMem: we want
85  * workMem/M to be large enough that we read a fair amount of data each time
86  * we preread from a tape, so as to maintain the locality of access described
87  * above.  Nonetheless, with large workMem we can have many tapes.
88  *
89  *
90  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
91  * Portions Copyright (c) 1994, Regents of the University of California
92  *
93  * IDENTIFICATION
94  *        src/backend/utils/sort/tuplesort.c
95  *
96  *-------------------------------------------------------------------------
97  */
98
99 #include "postgres.h"
100
101 #include <limits.h>
102
103 #include "access/htup_details.h"
104 #include "access/nbtree.h"
105 #include "catalog/index.h"
106 #include "commands/tablespace.h"
107 #include "executor/executor.h"
108 #include "miscadmin.h"
109 #include "pg_trace.h"
110 #include "utils/datum.h"
111 #include "utils/logtape.h"
112 #include "utils/lsyscache.h"
113 #include "utils/memutils.h"
114 #include "utils/pg_rusage.h"
115 #include "utils/rel.h"
116 #include "utils/sortsupport.h"
117 #include "utils/tuplesort.h"
118
119
120 /* sort-type codes for sort__start probes */
121 #define HEAP_SORT               0
122 #define INDEX_SORT              1
123 #define DATUM_SORT              2
124 #define CLUSTER_SORT    3
125
126 /* GUC variables */
127 #ifdef TRACE_SORT
128 bool            trace_sort = false;
129 #endif
130
131 #ifdef DEBUG_BOUNDED_SORT
132 bool            optimize_bounded_sort = true;
133 #endif
134
135
136 /*
137  * The objects we actually sort are SortTuple structs.  These contain
138  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
139  * which is a separate palloc chunk --- we assume it is just one chunk and
140  * can be freed by a simple pfree().  SortTuples also contain the tuple's
141  * first key column in Datum/nullflag format, and an index integer.
142  *
143  * Storing the first key column lets us save heap_getattr or index_getattr
144  * calls during tuple comparisons.  We could extract and save all the key
145  * columns not just the first, but this would increase code complexity and
146  * overhead, and wouldn't actually save any comparison cycles in the common
147  * case where the first key determines the comparison result.  Note that
148  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
149  *
150  * There is one special case: when the sort support infrastructure provides an
151  * "abbreviated key" representation, where the key is (typically) a pass by
152  * value proxy for a pass by reference type.  In this case, the abbreviated key
153  * is stored in datum1 in place of the actual first key column.
154  *
155  * When sorting single Datums, the data value is represented directly by
156  * datum1/isnull1 for pass by value types (or null values).  If the datatype is
157  * pass-by-reference and isnull1 is false, then "tuple" points to a separately
158  * palloc'd data value, otherwise "tuple" is NULL.  The value of datum1 is then
159  * either the same pointer as "tuple", or is an abbreviated key value as
160  * described above.  Accordingly, "tuple" is always used in preference to
161  * datum1 as the authoritative value for pass-by-reference cases.
162  *
163  * While building initial runs, tupindex holds the tuple's run number.  During
164  * merge passes, we re-use it to hold the input tape number that each tuple in
165  * the heap was read from, or to hold the index of the next tuple pre-read
166  * from the same tape in the case of pre-read entries.  tupindex goes unused
167  * if the sort occurs entirely in memory.
168  */
169 typedef struct
170 {
171         void       *tuple;                      /* the tuple proper */
172         Datum           datum1;                 /* value of first key column */
173         bool            isnull1;                /* is first key column NULL? */
174         int                     tupindex;               /* see notes above */
175 } SortTuple;
176
177
178 /*
179  * Possible states of a Tuplesort object.  These denote the states that
180  * persist between calls of Tuplesort routines.
181  */
182 typedef enum
183 {
184         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
185         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
186         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
187         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
188         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
189         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
190 } TupSortStatus;
191
192 /*
193  * Parameters for calculation of number of tapes to use --- see inittapes()
194  * and tuplesort_merge_order().
195  *
196  * In this calculation we assume that each tape will cost us about 3 blocks
197  * worth of buffer space (which is an underestimate for very large data
198  * volumes, but it's probably close enough --- see logtape.c).
199  *
200  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
201  * tape during a preread cycle (see discussion at top of file).
202  */
203 #define MINORDER                6               /* minimum merge order */
204 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
205 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
206
207 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
208                                                                                                 Tuplesortstate *state);
209
210 /*
211  * Private state of a Tuplesort operation.
212  */
213 struct Tuplesortstate
214 {
215         TupSortStatus status;           /* enumerated value as shown above */
216         int                     nKeys;                  /* number of columns in sort key */
217         bool            randomAccess;   /* did caller request random access? */
218         bool            bounded;                /* did caller specify a maximum number of
219                                                                  * tuples to return? */
220         bool            boundUsed;              /* true if we made use of a bounded heap */
221         int                     bound;                  /* if bounded, the maximum number of tuples */
222         int64           availMem;               /* remaining memory available, in bytes */
223         int64           allowedMem;             /* total memory allowed, in bytes */
224         int                     maxTapes;               /* number of tapes (Knuth's T) */
225         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
226         MemoryContext sortcontext;      /* memory context holding all sort data */
227         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
228
229         /*
230          * These function pointers decouple the routines that must know what kind
231          * of tuple we are sorting from the routines that don't need to know it.
232          * They are set up by the tuplesort_begin_xxx routines.
233          *
234          * Function to compare two tuples; result is per qsort() convention, ie:
235          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
236          * qsort_arg_comparator.
237          */
238         SortTupleComparator comparetup;
239
240         /*
241          * Function to copy a supplied input tuple into palloc'd space and set up
242          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
243          * state->availMem must be decreased by the amount of space used for the
244          * tuple copy (note the SortTuple struct itself is not counted).
245          */
246         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
247
248         /*
249          * Function to write a stored tuple onto tape.  The representation of the
250          * tuple on tape need not be the same as it is in memory; requirements on
251          * the tape representation are given below.  After writing the tuple,
252          * pfree() the out-of-line data (not the SortTuple struct!), and increase
253          * state->availMem by the amount of memory space thereby released.
254          */
255         void            (*writetup) (Tuplesortstate *state, int tapenum,
256                                                                                  SortTuple *stup);
257
258         /*
259          * Function to read a stored tuple from tape back into memory. 'len' is
260          * the already-read length of the stored tuple.  Create a palloc'd copy,
261          * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
262          * decrease state->availMem by the amount of memory space consumed.
263          */
264         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
265                                                                                 int tapenum, unsigned int len);
266
267         /*
268          * This array holds the tuples now in sort memory.  If we are in state
269          * INITIAL, the tuples are in no particular order; if we are in state
270          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
271          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
272          * H.  (Note that memtupcount only counts the tuples that are part of the
273          * heap --- during merge passes, memtuples[] entries beyond tapeRange are
274          * never in the heap and are used to hold pre-read tuples.)  In state
275          * SORTEDONTAPE, the array is not used.
276          */
277         SortTuple  *memtuples;          /* array of SortTuple structs */
278         int                     memtupcount;    /* number of tuples currently present */
279         int                     memtupsize;             /* allocated length of memtuples array */
280         bool            growmemtuples;  /* memtuples' growth still underway? */
281
282         /*
283          * While building initial runs, this is the current output run number
284          * (starting at 0).  Afterwards, it is the number of initial runs we made.
285          */
286         int                     currentRun;
287
288         /*
289          * Unless otherwise noted, all pointer variables below are pointers to
290          * arrays of length maxTapes, holding per-tape data.
291          */
292
293         /*
294          * These variables are only used during merge passes.  mergeactive[i] is
295          * true if we are reading an input run from (actual) tape number i and
296          * have not yet exhausted that run.  mergenext[i] is the memtuples index
297          * of the next pre-read tuple (next to be loaded into the heap) for tape
298          * i, or 0 if we are out of pre-read tuples.  mergelast[i] similarly
299          * points to the last pre-read tuple from each tape.  mergeavailslots[i]
300          * is the number of unused memtuples[] slots reserved for tape i, and
301          * mergeavailmem[i] is the amount of unused space allocated for tape i.
302          * mergefreelist and mergefirstfree keep track of unused locations in the
303          * memtuples[] array.  The memtuples[].tupindex fields link together
304          * pre-read tuples for each tape as well as recycled locations in
305          * mergefreelist. It is OK to use 0 as a null link in these lists, because
306          * memtuples[0] is part of the merge heap and is never a pre-read tuple.
307          */
308         bool       *mergeactive;        /* active input run source? */
309         int                *mergenext;          /* first preread tuple for each source */
310         int                *mergelast;          /* last preread tuple for each source */
311         int                *mergeavailslots;    /* slots left for prereading each tape */
312         int64      *mergeavailmem;      /* availMem for prereading each tape */
313         int                     mergefreelist;  /* head of freelist of recycled slots */
314         int                     mergefirstfree; /* first slot never used in this merge */
315
316         /*
317          * Variables for Algorithm D.  Note that destTape is a "logical" tape
318          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
319          * "logical" and "actual" tape numbers straight!
320          */
321         int                     Level;                  /* Knuth's l */
322         int                     destTape;               /* current output tape (Knuth's j, less 1) */
323         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
324         int                *tp_runs;            /* # of real runs on each tape */
325         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
326         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
327         int                     activeTapes;    /* # of active input tapes in merge pass */
328
329         /*
330          * These variables are used after completion of sorting to keep track of
331          * the next tuple to return.  (In the tape case, the tape's current read
332          * position is also critical state.)
333          */
334         int                     result_tape;    /* actual tape number of finished output */
335         int                     current;                /* array index (only used if SORTEDINMEM) */
336         bool            eof_reached;    /* reached EOF (needed for cursors) */
337
338         /* markpos_xxx holds marked position for mark and restore */
339         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
340         int                     markpos_offset; /* saved "current", or offset in tape block */
341         bool            markpos_eof;    /* saved "eof_reached" */
342
343         /*
344          * The sortKeys variable is used by every case other than the datum and
345          * hash index cases; it is set by tuplesort_begin_xxx.  tupDesc is only
346          * used by the MinimalTuple and CLUSTER routines, though.
347          */
348         TupleDesc       tupDesc;
349         SortSupport sortKeys;           /* array of length nKeys */
350
351         /*
352          * This variable is shared by the single-key MinimalTuple case and the
353          * Datum case (which both use qsort_ssup()).  Otherwise it's NULL.
354          */
355         SortSupport onlyKey;
356
357         /*
358          * Additional state for managing "abbreviated key" sortsupport routines
359          * (which currently may be used by all cases except the Datum sort case and
360          * hash index case).  Tracks the intervals at which the optimization's
361          * effectiveness is tested.
362          */
363         int64           abbrevNext;             /* Tuple # at which to next check applicability */
364
365         /*
366          * These variables are specific to the CLUSTER case; they are set by
367          * tuplesort_begin_cluster.
368          */
369         IndexInfo  *indexInfo;          /* info about index being used for reference */
370         EState     *estate;                     /* for evaluating index expressions */
371
372         /*
373          * These variables are specific to the IndexTuple case; they are set by
374          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
375          */
376         Relation        heapRel;                /* table the index is being built on */
377         Relation        indexRel;               /* index being built */
378
379         /* These are specific to the index_btree subcase: */
380         bool            enforceUnique;  /* complain if we find duplicate tuples */
381
382         /* These are specific to the index_hash subcase: */
383         uint32          hash_mask;              /* mask for sortable part of hash code */
384
385         /*
386          * These variables are specific to the Datum case; they are set by
387          * tuplesort_begin_datum and used only by the DatumTuple routines.
388          */
389         Oid                     datumType;
390         /* we need typelen and byval in order to know how to copy the Datums. */
391         int                     datumTypeLen;
392         bool            datumTypeByVal;
393
394         /*
395          * Resource snapshot for time of sort start.
396          */
397 #ifdef TRACE_SORT
398         PGRUsage        ru_start;
399 #endif
400 };
401
402 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
403 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
404 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
405 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
406 #define LACKMEM(state)          ((state)->availMem < 0)
407 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
408 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
409
410 /*
411  * NOTES about on-tape representation of tuples:
412  *
413  * We require the first "unsigned int" of a stored tuple to be the total size
414  * on-tape of the tuple, including itself (so it is never zero; an all-zero
415  * unsigned int is used to delimit runs).  The remainder of the stored tuple
416  * may or may not match the in-memory representation of the tuple ---
417  * any conversion needed is the job of the writetup and readtup routines.
418  *
419  * If state->randomAccess is true, then the stored representation of the
420  * tuple must be followed by another "unsigned int" that is a copy of the
421  * length --- so the total tape space used is actually sizeof(unsigned int)
422  * more than the stored length value.  This allows read-backwards.  When
423  * randomAccess is not true, the write/read routines may omit the extra
424  * length word.
425  *
426  * writetup is expected to write both length words as well as the tuple
427  * data.  When readtup is called, the tape is positioned just after the
428  * front length word; readtup must read the tuple data and advance past
429  * the back length word (if present).
430  *
431  * The write/read routines can make use of the tuple description data
432  * stored in the Tuplesortstate record, if needed.  They are also expected
433  * to adjust state->availMem by the amount of memory space (not tape space!)
434  * released or consumed.  There is no error return from either writetup
435  * or readtup; they should ereport() on failure.
436  *
437  *
438  * NOTES about memory consumption calculations:
439  *
440  * We count space allocated for tuples against the workMem limit, plus
441  * the space used by the variable-size memtuples array.  Fixed-size space
442  * is not counted; it's small enough to not be interesting.
443  *
444  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
445  * rather than the originally-requested size.  This is important since
446  * palloc can add substantial overhead.  It's not a complete answer since
447  * we won't count any wasted space in palloc allocation blocks, but it's
448  * a lot better than what we were doing before 7.3.
449  */
450
451 /* When using this macro, beware of double evaluation of len */
452 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
453         do { \
454                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
455                         elog(ERROR, "unexpected end of data"); \
456         } while(0)
457
458
459 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
460 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
461 static bool consider_abort_common(Tuplesortstate *state);
462 static void inittapes(Tuplesortstate *state);
463 static void selectnewtape(Tuplesortstate *state);
464 static void mergeruns(Tuplesortstate *state);
465 static void mergeonerun(Tuplesortstate *state);
466 static void beginmerge(Tuplesortstate *state);
467 static void mergepreread(Tuplesortstate *state);
468 static void mergeprereadone(Tuplesortstate *state, int srcTape);
469 static void dumptuples(Tuplesortstate *state, bool alltuples);
470 static void make_bounded_heap(Tuplesortstate *state);
471 static void sort_bounded_heap(Tuplesortstate *state);
472 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
473                                           int tupleindex, bool checkIndex);
474 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
475 static void reversedirection(Tuplesortstate *state);
476 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
477 static void markrunend(Tuplesortstate *state, int tapenum);
478 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
479                                 Tuplesortstate *state);
480 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
481 static void writetup_heap(Tuplesortstate *state, int tapenum,
482                           SortTuple *stup);
483 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
484                          int tapenum, unsigned int len);
485 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
486                                    Tuplesortstate *state);
487 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
488 static void writetup_cluster(Tuplesortstate *state, int tapenum,
489                                  SortTuple *stup);
490 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
491                                 int tapenum, unsigned int len);
492 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
493                                            Tuplesortstate *state);
494 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
495                                           Tuplesortstate *state);
496 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
497 static void writetup_index(Tuplesortstate *state, int tapenum,
498                            SortTuple *stup);
499 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
500                           int tapenum, unsigned int len);
501 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
502                                  Tuplesortstate *state);
503 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
504 static void writetup_datum(Tuplesortstate *state, int tapenum,
505                            SortTuple *stup);
506 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
507                           int tapenum, unsigned int len);
508 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
509
510 /*
511  * Special versions of qsort just for SortTuple objects.  qsort_tuple() sorts
512  * any variant of SortTuples, using the appropriate comparetup function.
513  * qsort_ssup() is specialized for the case where the comparetup function
514  * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
515  * and Datum sorts.
516  */
517 #include "qsort_tuple.c"
518
519
520 /*
521  *              tuplesort_begin_xxx
522  *
523  * Initialize for a tuple sort operation.
524  *
525  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
526  * zero or more times, then call tuplesort_performsort when all the tuples
527  * have been supplied.  After performsort, retrieve the tuples in sorted
528  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
529  * access was requested, rescan, markpos, and restorepos can also be called.)
530  * Call tuplesort_end to terminate the operation and release memory/disk space.
531  *
532  * Each variant of tuplesort_begin has a workMem parameter specifying the
533  * maximum number of kilobytes of RAM to use before spilling data to disk.
534  * (The normal value of this parameter is work_mem, but some callers use
535  * other values.)  Each variant also has a randomAccess parameter specifying
536  * whether the caller needs non-sequential access to the sort result.
537  */
538
539 static Tuplesortstate *
540 tuplesort_begin_common(int workMem, bool randomAccess)
541 {
542         Tuplesortstate *state;
543         MemoryContext sortcontext;
544         MemoryContext oldcontext;
545
546         /*
547          * Create a working memory context for this sort operation. All data
548          * needed by the sort will live inside this context.
549          */
550         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
551                                                                                 "TupleSort",
552                                                                                 ALLOCSET_DEFAULT_MINSIZE,
553                                                                                 ALLOCSET_DEFAULT_INITSIZE,
554                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
555
556         /*
557          * Make the Tuplesortstate within the per-sort context.  This way, we
558          * don't need a separate pfree() operation for it at shutdown.
559          */
560         oldcontext = MemoryContextSwitchTo(sortcontext);
561
562         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
563
564 #ifdef TRACE_SORT
565         if (trace_sort)
566                 pg_rusage_init(&state->ru_start);
567 #endif
568
569         state->status = TSS_INITIAL;
570         state->randomAccess = randomAccess;
571         state->bounded = false;
572         state->boundUsed = false;
573         state->allowedMem = workMem * (int64) 1024;
574         state->availMem = state->allowedMem;
575         state->sortcontext = sortcontext;
576         state->tapeset = NULL;
577
578         state->memtupcount = 0;
579         state->memtupsize = 1024;       /* initial guess */
580         state->growmemtuples = true;
581         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
582
583         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
584
585         /* workMem must be large enough for the minimal memtuples array */
586         if (LACKMEM(state))
587                 elog(ERROR, "insufficient memory allowed for sort");
588
589         state->currentRun = 0;
590
591         /*
592          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
593          * inittapes(), if needed
594          */
595
596         state->result_tape = -1;        /* flag that result tape has not been formed */
597
598         MemoryContextSwitchTo(oldcontext);
599
600         return state;
601 }
602
603 Tuplesortstate *
604 tuplesort_begin_heap(TupleDesc tupDesc,
605                                          int nkeys, AttrNumber *attNums,
606                                          Oid *sortOperators, Oid *sortCollations,
607                                          bool *nullsFirstFlags,
608                                          int workMem, bool randomAccess)
609 {
610         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
611         MemoryContext oldcontext;
612         int                     i;
613
614         oldcontext = MemoryContextSwitchTo(state->sortcontext);
615
616         AssertArg(nkeys > 0);
617
618 #ifdef TRACE_SORT
619         if (trace_sort)
620                 elog(LOG,
621                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
622                          nkeys, workMem, randomAccess ? 't' : 'f');
623 #endif
624
625         state->nKeys = nkeys;
626
627         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
628                                                                 false,  /* no unique check */
629                                                                 nkeys,
630                                                                 workMem,
631                                                                 randomAccess);
632
633         state->comparetup = comparetup_heap;
634         state->copytup = copytup_heap;
635         state->writetup = writetup_heap;
636         state->readtup = readtup_heap;
637
638         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
639         state->abbrevNext = 10;
640
641         /* Prepare SortSupport data for each column */
642         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
643
644         for (i = 0; i < nkeys; i++)
645         {
646                 SortSupport sortKey = state->sortKeys + i;
647
648                 AssertArg(attNums[i] != 0);
649                 AssertArg(sortOperators[i] != 0);
650
651                 sortKey->ssup_cxt = CurrentMemoryContext;
652                 sortKey->ssup_collation = sortCollations[i];
653                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
654                 sortKey->ssup_attno = attNums[i];
655                 /* Convey if abbreviation optimization is applicable in principle */
656                 sortKey->abbreviate = (i == 0);
657
658                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
659         }
660
661         /*
662          * The "onlyKey" optimization cannot be used with abbreviated keys, since
663          * tie-breaker comparisons may be required.  Typically, the optimization is
664          * only of value to pass-by-value types anyway, whereas abbreviated keys
665          * are typically only of value to pass-by-reference types.
666          */
667         if (nkeys == 1 && !state->sortKeys->abbrev_converter)
668                 state->onlyKey = state->sortKeys;
669
670         MemoryContextSwitchTo(oldcontext);
671
672         return state;
673 }
674
675 Tuplesortstate *
676 tuplesort_begin_cluster(TupleDesc tupDesc,
677                                                 Relation indexRel,
678                                                 int workMem, bool randomAccess)
679 {
680         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
681         ScanKey                 indexScanKey;
682         MemoryContext oldcontext;
683         int                             i;
684
685         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
686
687         oldcontext = MemoryContextSwitchTo(state->sortcontext);
688
689 #ifdef TRACE_SORT
690         if (trace_sort)
691                 elog(LOG,
692                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
693                          RelationGetNumberOfAttributes(indexRel),
694                          workMem, randomAccess ? 't' : 'f');
695 #endif
696
697         state->nKeys = RelationGetNumberOfAttributes(indexRel);
698
699         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
700                                                                 false,  /* no unique check */
701                                                                 state->nKeys,
702                                                                 workMem,
703                                                                 randomAccess);
704
705         state->comparetup = comparetup_cluster;
706         state->copytup = copytup_cluster;
707         state->writetup = writetup_cluster;
708         state->readtup = readtup_cluster;
709         state->abbrevNext = 10;
710
711         state->indexInfo = BuildIndexInfo(indexRel);
712
713         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
714
715         indexScanKey = _bt_mkscankey_nodata(indexRel);
716
717         if (state->indexInfo->ii_Expressions != NULL)
718         {
719                 TupleTableSlot *slot;
720                 ExprContext *econtext;
721
722                 /*
723                  * We will need to use FormIndexDatum to evaluate the index
724                  * expressions.  To do that, we need an EState, as well as a
725                  * TupleTableSlot to put the table tuples into.  The econtext's
726                  * scantuple has to point to that slot, too.
727                  */
728                 state->estate = CreateExecutorState();
729                 slot = MakeSingleTupleTableSlot(tupDesc);
730                 econtext = GetPerTupleExprContext(state->estate);
731                 econtext->ecxt_scantuple = slot;
732         }
733
734         /* Prepare SortSupport data for each column */
735         state->sortKeys = (SortSupport) palloc0(state->nKeys *
736                                                                                         sizeof(SortSupportData));
737
738         for (i = 0; i < state->nKeys; i++)
739         {
740                 SortSupport sortKey = state->sortKeys + i;
741                 ScanKey         scanKey = indexScanKey + i;
742                 int16           strategy;
743
744                 sortKey->ssup_cxt = CurrentMemoryContext;
745                 sortKey->ssup_collation = scanKey->sk_collation;
746                 sortKey->ssup_nulls_first =
747                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
748                 sortKey->ssup_attno = scanKey->sk_attno;
749                 /* Convey if abbreviation optimization is applicable in principle */
750                 sortKey->abbreviate = (i == 0);
751
752                 AssertState(sortKey->ssup_attno != 0);
753
754                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
755                         BTGreaterStrategyNumber : BTLessStrategyNumber;
756
757                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
758         }
759
760         _bt_freeskey(indexScanKey);
761
762         MemoryContextSwitchTo(oldcontext);
763
764         return state;
765 }
766
767 Tuplesortstate *
768 tuplesort_begin_index_btree(Relation heapRel,
769                                                         Relation indexRel,
770                                                         bool enforceUnique,
771                                                         int workMem, bool randomAccess)
772 {
773         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
774         ScanKey                 indexScanKey;
775         MemoryContext oldcontext;
776         int                             i;
777
778         oldcontext = MemoryContextSwitchTo(state->sortcontext);
779
780 #ifdef TRACE_SORT
781         if (trace_sort)
782                 elog(LOG,
783                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
784                          enforceUnique ? 't' : 'f',
785                          workMem, randomAccess ? 't' : 'f');
786 #endif
787
788         state->nKeys = RelationGetNumberOfAttributes(indexRel);
789
790         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
791                                                                 enforceUnique,
792                                                                 state->nKeys,
793                                                                 workMem,
794                                                                 randomAccess);
795
796         state->comparetup = comparetup_index_btree;
797         state->copytup = copytup_index;
798         state->writetup = writetup_index;
799         state->readtup = readtup_index;
800         state->abbrevNext = 10;
801
802         state->heapRel = heapRel;
803         state->indexRel = indexRel;
804         state->enforceUnique = enforceUnique;
805
806         indexScanKey = _bt_mkscankey_nodata(indexRel);
807         state->nKeys = RelationGetNumberOfAttributes(indexRel);
808
809         /* Prepare SortSupport data for each column */
810         state->sortKeys = (SortSupport) palloc0(state->nKeys *
811                                                                                         sizeof(SortSupportData));
812
813         for (i = 0; i < state->nKeys; i++)
814         {
815                 SortSupport sortKey = state->sortKeys + i;
816                 ScanKey         scanKey = indexScanKey + i;
817                 int16           strategy;
818
819                 sortKey->ssup_cxt = CurrentMemoryContext;
820                 sortKey->ssup_collation = scanKey->sk_collation;
821                 sortKey->ssup_nulls_first =
822                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
823                 sortKey->ssup_attno = scanKey->sk_attno;
824                 /* Convey if abbreviation optimization is applicable in principle */
825                 sortKey->abbreviate = (i == 0);
826
827                 AssertState(sortKey->ssup_attno != 0);
828
829                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
830                         BTGreaterStrategyNumber : BTLessStrategyNumber;
831
832                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
833         }
834
835         _bt_freeskey(indexScanKey);
836
837         MemoryContextSwitchTo(oldcontext);
838
839         return state;
840 }
841
842 Tuplesortstate *
843 tuplesort_begin_index_hash(Relation heapRel,
844                                                    Relation indexRel,
845                                                    uint32 hash_mask,
846                                                    int workMem, bool randomAccess)
847 {
848         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
849         MemoryContext oldcontext;
850
851         oldcontext = MemoryContextSwitchTo(state->sortcontext);
852
853 #ifdef TRACE_SORT
854         if (trace_sort)
855                 elog(LOG,
856                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
857                          hash_mask,
858                          workMem, randomAccess ? 't' : 'f');
859 #endif
860
861         state->nKeys = 1;                       /* Only one sort column, the hash code */
862
863         state->comparetup = comparetup_index_hash;
864         state->copytup = copytup_index;
865         state->writetup = writetup_index;
866         state->readtup = readtup_index;
867
868         state->heapRel = heapRel;
869         state->indexRel = indexRel;
870
871         state->hash_mask = hash_mask;
872
873         MemoryContextSwitchTo(oldcontext);
874
875         return state;
876 }
877
878 Tuplesortstate *
879 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
880                                           bool nullsFirstFlag,
881                                           int workMem, bool randomAccess)
882 {
883         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
884         MemoryContext oldcontext;
885         int16           typlen;
886         bool            typbyval;
887
888         oldcontext = MemoryContextSwitchTo(state->sortcontext);
889
890 #ifdef TRACE_SORT
891         if (trace_sort)
892                 elog(LOG,
893                          "begin datum sort: workMem = %d, randomAccess = %c",
894                          workMem, randomAccess ? 't' : 'f');
895 #endif
896
897         state->nKeys = 1;                       /* always a one-column sort */
898
899         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
900                                                                 false,  /* no unique check */
901                                                                 1,
902                                                                 workMem,
903                                                                 randomAccess);
904
905         state->comparetup = comparetup_datum;
906         state->copytup = copytup_datum;
907         state->writetup = writetup_datum;
908         state->readtup = readtup_datum;
909         state->abbrevNext = 10;
910
911         state->datumType = datumType;
912
913         /* lookup necessary attributes of the datum type */
914         get_typlenbyval(datumType, &typlen, &typbyval);
915         state->datumTypeLen = typlen;
916         state->datumTypeByVal = typbyval;
917
918         /* Prepare SortSupport data */
919         state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
920
921         state->sortKeys->ssup_cxt = CurrentMemoryContext;
922         state->sortKeys->ssup_collation = sortCollation;
923         state->sortKeys->ssup_nulls_first = nullsFirstFlag;
924
925         /* abbreviation is possible here only for by-reference types */
926         state->sortKeys->abbreviate = !typbyval;
927
928         PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
929
930         /*
931          * The "onlyKey" optimization cannot be used with abbreviated keys, since
932          * tie-breaker comparisons may be required.  Typically, the optimization is
933          * only of value to pass-by-value types anyway, whereas abbreviated keys
934          * are typically only of value to pass-by-reference types.
935          */
936         if (!state->sortKeys->abbrev_converter)
937                 state->onlyKey = state->sortKeys;
938
939         MemoryContextSwitchTo(oldcontext);
940
941         return state;
942 }
943
944 /*
945  * tuplesort_set_bound
946  *
947  *      Advise tuplesort that at most the first N result tuples are required.
948  *
949  * Must be called before inserting any tuples.  (Actually, we could allow it
950  * as long as the sort hasn't spilled to disk, but there seems no need for
951  * delayed calls at the moment.)
952  *
953  * This is a hint only. The tuplesort may still return more tuples than
954  * requested.
955  */
956 void
957 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
958 {
959         /* Assert we're called before loading any tuples */
960         Assert(state->status == TSS_INITIAL);
961         Assert(state->memtupcount == 0);
962         Assert(!state->bounded);
963
964 #ifdef DEBUG_BOUNDED_SORT
965         /* Honor GUC setting that disables the feature (for easy testing) */
966         if (!optimize_bounded_sort)
967                 return;
968 #endif
969
970         /* We want to be able to compute bound * 2, so limit the setting */
971         if (bound > (int64) (INT_MAX / 2))
972                 return;
973
974         state->bounded = true;
975         state->bound = (int) bound;
976
977         /*
978          * Bounded sorts are not an effective target for abbreviated key
979          * optimization.  Disable by setting state to be consistent with no
980          * abbreviation support.
981          */
982         state->sortKeys->abbrev_converter = NULL;
983         if (state->sortKeys->abbrev_full_comparator)
984                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
985
986         /* Not strictly necessary, but be tidy */
987         state->sortKeys->abbrev_abort = NULL;
988         state->sortKeys->abbrev_full_comparator = NULL;
989 }
990
991 /*
992  * tuplesort_end
993  *
994  *      Release resources and clean up.
995  *
996  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
997  * pointing to garbage.  Be careful not to attempt to use or free such
998  * pointers afterwards!
999  */
1000 void
1001 tuplesort_end(Tuplesortstate *state)
1002 {
1003         /* context swap probably not needed, but let's be safe */
1004         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1005
1006 #ifdef TRACE_SORT
1007         long            spaceUsed;
1008
1009         if (state->tapeset)
1010                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1011         else
1012                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1013 #endif
1014
1015         /*
1016          * Delete temporary "tape" files, if any.
1017          *
1018          * Note: want to include this in reported total cost of sort, hence need
1019          * for two #ifdef TRACE_SORT sections.
1020          */
1021         if (state->tapeset)
1022                 LogicalTapeSetClose(state->tapeset);
1023
1024 #ifdef TRACE_SORT
1025         if (trace_sort)
1026         {
1027                 if (state->tapeset)
1028                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
1029                                  spaceUsed, pg_rusage_show(&state->ru_start));
1030                 else
1031                         elog(LOG, "internal sort ended, %ld KB used: %s",
1032                                  spaceUsed, pg_rusage_show(&state->ru_start));
1033         }
1034
1035         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1036 #else
1037
1038         /*
1039          * If you disabled TRACE_SORT, you can still probe sort__done, but you
1040          * ain't getting space-used stats.
1041          */
1042         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1043 #endif
1044
1045         /* Free any execution state created for CLUSTER case */
1046         if (state->estate != NULL)
1047         {
1048                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1049
1050                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1051                 FreeExecutorState(state->estate);
1052         }
1053
1054         MemoryContextSwitchTo(oldcontext);
1055
1056         /*
1057          * Free the per-sort memory context, thereby releasing all working memory,
1058          * including the Tuplesortstate struct itself.
1059          */
1060         MemoryContextDelete(state->sortcontext);
1061 }
1062
1063 /*
1064  * Grow the memtuples[] array, if possible within our memory constraint.  We
1065  * must not exceed INT_MAX tuples in memory or the caller-provided memory
1066  * limit.  Return TRUE if we were able to enlarge the array, FALSE if not.
1067  *
1068  * Normally, at each increment we double the size of the array.  When doing
1069  * that would exceed a limit, we attempt one last, smaller increase (and then
1070  * clear the growmemtuples flag so we don't try any more).  That allows us to
1071  * use memory as fully as permitted; sticking to the pure doubling rule could
1072  * result in almost half going unused.  Because availMem moves around with
1073  * tuple addition/removal, we need some rule to prevent making repeated small
1074  * increases in memtupsize, which would just be useless thrashing.  The
1075  * growmemtuples flag accomplishes that and also prevents useless
1076  * recalculations in this function.
1077  */
1078 static bool
1079 grow_memtuples(Tuplesortstate *state)
1080 {
1081         int                     newmemtupsize;
1082         int                     memtupsize = state->memtupsize;
1083         int64           memNowUsed = state->allowedMem - state->availMem;
1084
1085         /* Forget it if we've already maxed out memtuples, per comment above */
1086         if (!state->growmemtuples)
1087                 return false;
1088
1089         /* Select new value of memtupsize */
1090         if (memNowUsed <= state->availMem)
1091         {
1092                 /*
1093                  * We've used no more than half of allowedMem; double our usage,
1094                  * clamping at INT_MAX tuples.
1095                  */
1096                 if (memtupsize < INT_MAX / 2)
1097                         newmemtupsize = memtupsize * 2;
1098                 else
1099                 {
1100                         newmemtupsize = INT_MAX;
1101                         state->growmemtuples = false;
1102                 }
1103         }
1104         else
1105         {
1106                 /*
1107                  * This will be the last increment of memtupsize.  Abandon doubling
1108                  * strategy and instead increase as much as we safely can.
1109                  *
1110                  * To stay within allowedMem, we can't increase memtupsize by more
1111                  * than availMem / sizeof(SortTuple) elements.  In practice, we want
1112                  * to increase it by considerably less, because we need to leave some
1113                  * space for the tuples to which the new array slots will refer.  We
1114                  * assume the new tuples will be about the same size as the tuples
1115                  * we've already seen, and thus we can extrapolate from the space
1116                  * consumption so far to estimate an appropriate new size for the
1117                  * memtuples array.  The optimal value might be higher or lower than
1118                  * this estimate, but it's hard to know that in advance.  We again
1119                  * clamp at INT_MAX tuples.
1120                  *
1121                  * This calculation is safe against enlarging the array so much that
1122                  * LACKMEM becomes true, because the memory currently used includes
1123                  * the present array; thus, there would be enough allowedMem for the
1124                  * new array elements even if no other memory were currently used.
1125                  *
1126                  * We do the arithmetic in float8, because otherwise the product of
1127                  * memtupsize and allowedMem could overflow.  Any inaccuracy in the
1128                  * result should be insignificant; but even if we computed a
1129                  * completely insane result, the checks below will prevent anything
1130                  * really bad from happening.
1131                  */
1132                 double          grow_ratio;
1133
1134                 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1135                 if (memtupsize * grow_ratio < INT_MAX)
1136                         newmemtupsize = (int) (memtupsize * grow_ratio);
1137                 else
1138                         newmemtupsize = INT_MAX;
1139
1140                 /* We won't make any further enlargement attempts */
1141                 state->growmemtuples = false;
1142         }
1143
1144         /* Must enlarge array by at least one element, else report failure */
1145         if (newmemtupsize <= memtupsize)
1146                 goto noalloc;
1147
1148         /*
1149          * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize.  Clamp
1150          * to ensure our request won't be rejected.  Note that we can easily
1151          * exhaust address space before facing this outcome.  (This is presently
1152          * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1153          * don't rely on that at this distance.)
1154          */
1155         if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1156         {
1157                 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1158                 state->growmemtuples = false;   /* can't grow any more */
1159         }
1160
1161         /*
1162          * We need to be sure that we do not cause LACKMEM to become true, else
1163          * the space management algorithm will go nuts.  The code above should
1164          * never generate a dangerous request, but to be safe, check explicitly
1165          * that the array growth fits within availMem.  (We could still cause
1166          * LACKMEM if the memory chunk overhead associated with the memtuples
1167          * array were to increase.  That shouldn't happen with any sane value of
1168          * allowedMem, because at any array size large enough to risk LACKMEM,
1169          * palloc would be treating both old and new arrays as separate chunks.
1170          * But we'll check LACKMEM explicitly below just in case.)
1171          */
1172         if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1173                 goto noalloc;
1174
1175         /* OK, do it */
1176         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1177         state->memtupsize = newmemtupsize;
1178         state->memtuples = (SortTuple *)
1179                 repalloc_huge(state->memtuples,
1180                                           state->memtupsize * sizeof(SortTuple));
1181         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1182         if (LACKMEM(state))
1183                 elog(ERROR, "unexpected out-of-memory situation during sort");
1184         return true;
1185
1186 noalloc:
1187         /* If for any reason we didn't realloc, shut off future attempts */
1188         state->growmemtuples = false;
1189         return false;
1190 }
1191
1192 /*
1193  * Accept one tuple while collecting input data for sort.
1194  *
1195  * Note that the input data is always copied; the caller need not save it.
1196  */
1197 void
1198 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1199 {
1200         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1201         SortTuple       stup;
1202
1203         /*
1204          * Copy the given tuple into memory we control, and decrease availMem.
1205          * Then call the common code.
1206          */
1207         COPYTUP(state, &stup, (void *) slot);
1208
1209         puttuple_common(state, &stup);
1210
1211         MemoryContextSwitchTo(oldcontext);
1212 }
1213
1214 /*
1215  * Accept one tuple while collecting input data for sort.
1216  *
1217  * Note that the input data is always copied; the caller need not save it.
1218  */
1219 void
1220 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1221 {
1222         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1223         SortTuple       stup;
1224
1225         /*
1226          * Copy the given tuple into memory we control, and decrease availMem.
1227          * Then call the common code.
1228          */
1229         COPYTUP(state, &stup, (void *) tup);
1230
1231         puttuple_common(state, &stup);
1232
1233         MemoryContextSwitchTo(oldcontext);
1234 }
1235
1236 /*
1237  * Collect one index tuple while collecting input data for sort, building
1238  * it from caller-supplied values.
1239  */
1240 void
1241 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1242                                                           ItemPointer self, Datum *values,
1243                                                           bool *isnull)
1244 {
1245         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1246         SortTuple       stup;
1247         Datum           original;
1248         IndexTuple      tuple;
1249
1250         stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1251         tuple = ((IndexTuple) stup.tuple);
1252         tuple->t_tid = *self;
1253         USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1254         /* set up first-column key value */
1255         original = index_getattr(tuple,
1256                                                          1,
1257                                                          RelationGetDescr(state->indexRel),
1258                                                          &stup.isnull1);
1259
1260         if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1261         {
1262                 /*
1263                  * Store ordinary Datum representation, or NULL value.  If there is a
1264                  * converter it won't expect NULL values, and cost model is not
1265                  * required to account for NULL, so in that case we avoid calling
1266                  * converter and just set datum1 to "void" representation (to be
1267                  * consistent).
1268                  */
1269                 stup.datum1 = original;
1270         }
1271         else if (!consider_abort_common(state))
1272         {
1273                 /* Store abbreviated key representation */
1274                 stup.datum1 = state->sortKeys->abbrev_converter(original,
1275                                                                                                                 state->sortKeys);
1276         }
1277         else
1278         {
1279                 /* Abort abbreviation */
1280                 int             i;
1281
1282                 stup.datum1 = original;
1283
1284                 /*
1285                  * Set state to be consistent with never trying abbreviation.
1286                  *
1287                  * Alter datum1 representation in already-copied tuples, so as to
1288                  * ensure a consistent representation (current tuple was just handled).
1289                  * Note that we rely on all tuples copied so far actually being
1290                  * contained within memtuples array.
1291                  */
1292                 for (i = 0; i < state->memtupcount; i++)
1293                 {
1294                         SortTuple *mtup = &state->memtuples[i];
1295
1296                         tuple = mtup->tuple;
1297                         mtup->datum1 = index_getattr(tuple,
1298                                                                                  1,
1299                                                                                  RelationGetDescr(state->indexRel),
1300                                                                                  &stup.isnull1);
1301                 }
1302         }
1303
1304         puttuple_common(state, &stup);
1305
1306         MemoryContextSwitchTo(oldcontext);
1307 }
1308
1309 /*
1310  * Accept one Datum while collecting input data for sort.
1311  *
1312  * If the Datum is pass-by-ref type, the value will be copied.
1313  */
1314 void
1315 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1316 {
1317         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1318         SortTuple       stup;
1319
1320         /*
1321          * Pass-by-value types or null values are just stored directly in
1322          * stup.datum1 (and stup.tuple is not used and set to NULL).
1323          *
1324          * Non-null pass-by-reference values need to be copied into memory we
1325          * control, and possibly abbreviated. The copied value is pointed to by
1326          * stup.tuple and is treated as the canonical copy (e.g. to return via
1327          * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1328          * abbreviated value if abbreviation is happening, otherwise it's identical
1329          * to stup.tuple.
1330          */
1331
1332         if (isNull || state->datumTypeByVal)
1333         {
1334                 stup.datum1 = val;
1335                 stup.isnull1 = isNull;
1336                 stup.tuple = NULL;              /* no separate storage */
1337         }
1338         else
1339         {
1340                 Datum   original = datumCopy(val, false, state->datumTypeLen);
1341
1342                 stup.isnull1 = false;
1343                 stup.tuple = DatumGetPointer(original);
1344                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1345
1346                 if (!state->sortKeys->abbrev_converter)
1347                 {
1348                         stup.datum1 = original;
1349                 }
1350                 else if (!consider_abort_common(state))
1351                 {
1352                         /* Store abbreviated key representation */
1353                         stup.datum1 = state->sortKeys->abbrev_converter(original,
1354                                                                                                                         state->sortKeys);
1355                 }
1356                 else
1357                 {
1358                         /* Abort abbreviation */
1359                         int             i;
1360
1361                         stup.datum1 = original;
1362
1363                         /*
1364                          * Set state to be consistent with never trying abbreviation.
1365                          *
1366                          * Alter datum1 representation in already-copied tuples, so as to
1367                          * ensure a consistent representation (current tuple was just handled).
1368                          * Note that we rely on all tuples copied so far actually being
1369                          * contained within memtuples array.
1370                          */
1371                         for (i = 0; i < state->memtupcount; i++)
1372                         {
1373                                 SortTuple *mtup = &state->memtuples[i];
1374
1375                                 mtup->datum1 = PointerGetDatum(mtup->tuple);
1376                         }
1377                 }
1378         }
1379
1380         puttuple_common(state, &stup);
1381
1382         MemoryContextSwitchTo(oldcontext);
1383 }
1384
1385 /*
1386  * Shared code for tuple and datum cases.
1387  */
1388 static void
1389 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1390 {
1391         switch (state->status)
1392         {
1393                 case TSS_INITIAL:
1394
1395                         /*
1396                          * Save the tuple into the unsorted array.  First, grow the array
1397                          * as needed.  Note that we try to grow the array when there is
1398                          * still one free slot remaining --- if we fail, there'll still be
1399                          * room to store the incoming tuple, and then we'll switch to
1400                          * tape-based operation.
1401                          */
1402                         if (state->memtupcount >= state->memtupsize - 1)
1403                         {
1404                                 (void) grow_memtuples(state);
1405                                 Assert(state->memtupcount < state->memtupsize);
1406                         }
1407                         state->memtuples[state->memtupcount++] = *tuple;
1408
1409                         /*
1410                          * Check if it's time to switch over to a bounded heapsort. We do
1411                          * so if the input tuple count exceeds twice the desired tuple
1412                          * count (this is a heuristic for where heapsort becomes cheaper
1413                          * than a quicksort), or if we've just filled workMem and have
1414                          * enough tuples to meet the bound.
1415                          *
1416                          * Note that once we enter TSS_BOUNDED state we will always try to
1417                          * complete the sort that way.  In the worst case, if later input
1418                          * tuples are larger than earlier ones, this might cause us to
1419                          * exceed workMem significantly.
1420                          */
1421                         if (state->bounded &&
1422                                 (state->memtupcount > state->bound * 2 ||
1423                                  (state->memtupcount > state->bound && LACKMEM(state))))
1424                         {
1425 #ifdef TRACE_SORT
1426                                 if (trace_sort)
1427                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1428                                                  state->memtupcount,
1429                                                  pg_rusage_show(&state->ru_start));
1430 #endif
1431                                 make_bounded_heap(state);
1432                                 return;
1433                         }
1434
1435                         /*
1436                          * Done if we still fit in available memory and have array slots.
1437                          */
1438                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1439                                 return;
1440
1441                         /*
1442                          * Nope; time to switch to tape-based operation.
1443                          */
1444                         inittapes(state);
1445
1446                         /*
1447                          * Dump tuples until we are back under the limit.
1448                          */
1449                         dumptuples(state, false);
1450                         break;
1451
1452                 case TSS_BOUNDED:
1453
1454                         /*
1455                          * We don't want to grow the array here, so check whether the new
1456                          * tuple can be discarded before putting it in.  This should be a
1457                          * good speed optimization, too, since when there are many more
1458                          * input tuples than the bound, most input tuples can be discarded
1459                          * with just this one comparison.  Note that because we currently
1460                          * have the sort direction reversed, we must check for <= not >=.
1461                          */
1462                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1463                         {
1464                                 /* new tuple <= top of the heap, so we can discard it */
1465                                 free_sort_tuple(state, tuple);
1466                                 CHECK_FOR_INTERRUPTS();
1467                         }
1468                         else
1469                         {
1470                                 /* discard top of heap, sift up, insert new tuple */
1471                                 free_sort_tuple(state, &state->memtuples[0]);
1472                                 tuplesort_heap_siftup(state, false);
1473                                 tuplesort_heap_insert(state, tuple, 0, false);
1474                         }
1475                         break;
1476
1477                 case TSS_BUILDRUNS:
1478
1479                         /*
1480                          * Insert the tuple into the heap, with run number currentRun if
1481                          * it can go into the current run, else run number currentRun+1.
1482                          * The tuple can go into the current run if it is >= the first
1483                          * not-yet-output tuple.  (Actually, it could go into the current
1484                          * run if it is >= the most recently output tuple ... but that
1485                          * would require keeping around the tuple we last output, and it's
1486                          * simplest to let writetup free each tuple as soon as it's
1487                          * written.)
1488                          *
1489                          * Note there will always be at least one tuple in the heap at
1490                          * this point; see dumptuples.
1491                          */
1492                         Assert(state->memtupcount > 0);
1493                         if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1494                                 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1495                         else
1496                                 tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
1497
1498                         /*
1499                          * If we are over the memory limit, dump tuples till we're under.
1500                          */
1501                         dumptuples(state, false);
1502                         break;
1503
1504                 default:
1505                         elog(ERROR, "invalid tuplesort state");
1506                         break;
1507         }
1508 }
1509
1510 static bool
1511 consider_abort_common(Tuplesortstate *state)
1512 {
1513         Assert(state->sortKeys[0].abbrev_converter != NULL);
1514         Assert(state->sortKeys[0].abbrev_abort != NULL);
1515         Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1516
1517         /*
1518          * Check effectiveness of abbreviation optimization.  Consider aborting
1519          * when still within memory limit.
1520          */
1521         if (state->status == TSS_INITIAL &&
1522                 state->memtupcount >= state->abbrevNext)
1523         {
1524                 state->abbrevNext *= 2;
1525
1526                 /*
1527                  * Check opclass-supplied abbreviation abort routine.  It may
1528                  * indicate that abbreviation should not proceed.
1529                  */
1530                 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1531                                                                                    state->sortKeys))
1532                         return false;
1533
1534                 /*
1535                  * Finally, restore authoritative comparator, and indicate that
1536                  * abbreviation is not in play by setting abbrev_converter to NULL
1537                  */
1538                 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1539                 state->sortKeys[0].abbrev_converter = NULL;
1540                 /* Not strictly necessary, but be tidy */
1541                 state->sortKeys[0].abbrev_abort = NULL;
1542                 state->sortKeys[0].abbrev_full_comparator = NULL;
1543
1544                 /* Give up - expect original pass-by-value representation */
1545                 return true;
1546         }
1547
1548         return false;
1549 }
1550
1551 /*
1552  * All tuples have been provided; finish the sort.
1553  */
1554 void
1555 tuplesort_performsort(Tuplesortstate *state)
1556 {
1557         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1558
1559 #ifdef TRACE_SORT
1560         if (trace_sort)
1561                 elog(LOG, "performsort starting: %s",
1562                          pg_rusage_show(&state->ru_start));
1563 #endif
1564
1565         switch (state->status)
1566         {
1567                 case TSS_INITIAL:
1568
1569                         /*
1570                          * We were able to accumulate all the tuples within the allowed
1571                          * amount of memory.  Just qsort 'em and we're done.
1572                          */
1573                         if (state->memtupcount > 1)
1574                         {
1575                                 /* Can we use the single-key sort function? */
1576                                 if (state->onlyKey != NULL)
1577                                         qsort_ssup(state->memtuples, state->memtupcount,
1578                                                            state->onlyKey);
1579                                 else
1580                                         qsort_tuple(state->memtuples,
1581                                                                 state->memtupcount,
1582                                                                 state->comparetup,
1583                                                                 state);
1584                         }
1585                         state->current = 0;
1586                         state->eof_reached = false;
1587                         state->markpos_offset = 0;
1588                         state->markpos_eof = false;
1589                         state->status = TSS_SORTEDINMEM;
1590                         break;
1591
1592                 case TSS_BOUNDED:
1593
1594                         /*
1595                          * We were able to accumulate all the tuples required for output
1596                          * in memory, using a heap to eliminate excess tuples.  Now we
1597                          * have to transform the heap to a properly-sorted array.
1598                          */
1599                         sort_bounded_heap(state);
1600                         state->current = 0;
1601                         state->eof_reached = false;
1602                         state->markpos_offset = 0;
1603                         state->markpos_eof = false;
1604                         state->status = TSS_SORTEDINMEM;
1605                         break;
1606
1607                 case TSS_BUILDRUNS:
1608
1609                         /*
1610                          * Finish tape-based sort.  First, flush all tuples remaining in
1611                          * memory out to tape; then merge until we have a single remaining
1612                          * run (or, if !randomAccess, one run per tape). Note that
1613                          * mergeruns sets the correct state->status.
1614                          */
1615                         dumptuples(state, true);
1616                         mergeruns(state);
1617                         state->eof_reached = false;
1618                         state->markpos_block = 0L;
1619                         state->markpos_offset = 0;
1620                         state->markpos_eof = false;
1621                         break;
1622
1623                 default:
1624                         elog(ERROR, "invalid tuplesort state");
1625                         break;
1626         }
1627
1628 #ifdef TRACE_SORT
1629         if (trace_sort)
1630         {
1631                 if (state->status == TSS_FINALMERGE)
1632                         elog(LOG, "performsort done (except %d-way final merge): %s",
1633                                  state->activeTapes,
1634                                  pg_rusage_show(&state->ru_start));
1635                 else
1636                         elog(LOG, "performsort done: %s",
1637                                  pg_rusage_show(&state->ru_start));
1638         }
1639 #endif
1640
1641         MemoryContextSwitchTo(oldcontext);
1642 }
1643
1644 /*
1645  * Internal routine to fetch the next tuple in either forward or back
1646  * direction into *stup.  Returns FALSE if no more tuples.
1647  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1648  */
1649 static bool
1650 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1651                                                   SortTuple *stup, bool *should_free)
1652 {
1653         unsigned int tuplen;
1654
1655         switch (state->status)
1656         {
1657                 case TSS_SORTEDINMEM:
1658                         Assert(forward || state->randomAccess);
1659                         *should_free = false;
1660                         if (forward)
1661                         {
1662                                 if (state->current < state->memtupcount)
1663                                 {
1664                                         *stup = state->memtuples[state->current++];
1665                                         return true;
1666                                 }
1667                                 state->eof_reached = true;
1668
1669                                 /*
1670                                  * Complain if caller tries to retrieve more tuples than
1671                                  * originally asked for in a bounded sort.  This is because
1672                                  * returning EOF here might be the wrong thing.
1673                                  */
1674                                 if (state->bounded && state->current >= state->bound)
1675                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1676
1677                                 return false;
1678                         }
1679                         else
1680                         {
1681                                 if (state->current <= 0)
1682                                         return false;
1683
1684                                 /*
1685                                  * if all tuples are fetched already then we return last
1686                                  * tuple, else - tuple before last returned.
1687                                  */
1688                                 if (state->eof_reached)
1689                                         state->eof_reached = false;
1690                                 else
1691                                 {
1692                                         state->current--;       /* last returned tuple */
1693                                         if (state->current <= 0)
1694                                                 return false;
1695                                 }
1696                                 *stup = state->memtuples[state->current - 1];
1697                                 return true;
1698                         }
1699                         break;
1700
1701                 case TSS_SORTEDONTAPE:
1702                         Assert(forward || state->randomAccess);
1703                         *should_free = true;
1704                         if (forward)
1705                         {
1706                                 if (state->eof_reached)
1707                                         return false;
1708                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1709                                 {
1710                                         READTUP(state, stup, state->result_tape, tuplen);
1711                                         return true;
1712                                 }
1713                                 else
1714                                 {
1715                                         state->eof_reached = true;
1716                                         return false;
1717                                 }
1718                         }
1719
1720                         /*
1721                          * Backward.
1722                          *
1723                          * if all tuples are fetched already then we return last tuple,
1724                          * else - tuple before last returned.
1725                          */
1726                         if (state->eof_reached)
1727                         {
1728                                 /*
1729                                  * Seek position is pointing just past the zero tuplen at the
1730                                  * end of file; back up to fetch last tuple's ending length
1731                                  * word.  If seek fails we must have a completely empty file.
1732                                  */
1733                                 if (!LogicalTapeBackspace(state->tapeset,
1734                                                                                   state->result_tape,
1735                                                                                   2 * sizeof(unsigned int)))
1736                                         return false;
1737                                 state->eof_reached = false;
1738                         }
1739                         else
1740                         {
1741                                 /*
1742                                  * Back up and fetch previously-returned tuple's ending length
1743                                  * word.  If seek fails, assume we are at start of file.
1744                                  */
1745                                 if (!LogicalTapeBackspace(state->tapeset,
1746                                                                                   state->result_tape,
1747                                                                                   sizeof(unsigned int)))
1748                                         return false;
1749                                 tuplen = getlen(state, state->result_tape, false);
1750
1751                                 /*
1752                                  * Back up to get ending length word of tuple before it.
1753                                  */
1754                                 if (!LogicalTapeBackspace(state->tapeset,
1755                                                                                   state->result_tape,
1756                                                                                   tuplen + 2 * sizeof(unsigned int)))
1757                                 {
1758                                         /*
1759                                          * If that fails, presumably the prev tuple is the first
1760                                          * in the file.  Back up so that it becomes next to read
1761                                          * in forward direction (not obviously right, but that is
1762                                          * what in-memory case does).
1763                                          */
1764                                         if (!LogicalTapeBackspace(state->tapeset,
1765                                                                                           state->result_tape,
1766                                                                                           tuplen + sizeof(unsigned int)))
1767                                                 elog(ERROR, "bogus tuple length in backward scan");
1768                                         return false;
1769                                 }
1770                         }
1771
1772                         tuplen = getlen(state, state->result_tape, false);
1773
1774                         /*
1775                          * Now we have the length of the prior tuple, back up and read it.
1776                          * Note: READTUP expects we are positioned after the initial
1777                          * length word of the tuple, so back up to that point.
1778                          */
1779                         if (!LogicalTapeBackspace(state->tapeset,
1780                                                                           state->result_tape,
1781                                                                           tuplen))
1782                                 elog(ERROR, "bogus tuple length in backward scan");
1783                         READTUP(state, stup, state->result_tape, tuplen);
1784                         return true;
1785
1786                 case TSS_FINALMERGE:
1787                         Assert(forward);
1788                         *should_free = true;
1789
1790                         /*
1791                          * This code should match the inner loop of mergeonerun().
1792                          */
1793                         if (state->memtupcount > 0)
1794                         {
1795                                 int                     srcTape = state->memtuples[0].tupindex;
1796                                 Size            tuplen;
1797                                 int                     tupIndex;
1798                                 SortTuple  *newtup;
1799
1800                                 *stup = state->memtuples[0];
1801                                 /* returned tuple is no longer counted in our memory space */
1802                                 if (stup->tuple)
1803                                 {
1804                                         tuplen = GetMemoryChunkSpace(stup->tuple);
1805                                         state->availMem += tuplen;
1806                                         state->mergeavailmem[srcTape] += tuplen;
1807                                 }
1808                                 tuplesort_heap_siftup(state, false);
1809                                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1810                                 {
1811                                         /*
1812                                          * out of preloaded data on this tape, try to read more
1813                                          *
1814                                          * Unlike mergeonerun(), we only preload from the single
1815                                          * tape that's run dry.  See mergepreread() comments.
1816                                          */
1817                                         mergeprereadone(state, srcTape);
1818
1819                                         /*
1820                                          * if still no data, we've reached end of run on this tape
1821                                          */
1822                                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1823                                                 return true;
1824                                 }
1825                                 /* pull next preread tuple from list, insert in heap */
1826                                 newtup = &state->memtuples[tupIndex];
1827                                 state->mergenext[srcTape] = newtup->tupindex;
1828                                 if (state->mergenext[srcTape] == 0)
1829                                         state->mergelast[srcTape] = 0;
1830                                 tuplesort_heap_insert(state, newtup, srcTape, false);
1831                                 /* put the now-unused memtuples entry on the freelist */
1832                                 newtup->tupindex = state->mergefreelist;
1833                                 state->mergefreelist = tupIndex;
1834                                 state->mergeavailslots[srcTape]++;
1835                                 return true;
1836                         }
1837                         return false;
1838
1839                 default:
1840                         elog(ERROR, "invalid tuplesort state");
1841                         return false;           /* keep compiler quiet */
1842         }
1843 }
1844
1845 /*
1846  * Fetch the next tuple in either forward or back direction.
1847  * If successful, put tuple in slot and return TRUE; else, clear the slot
1848  * and return FALSE.
1849  */
1850 bool
1851 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
1852                                            TupleTableSlot *slot)
1853 {
1854         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1855         SortTuple       stup;
1856         bool            should_free;
1857
1858         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1859                 stup.tuple = NULL;
1860
1861         MemoryContextSwitchTo(oldcontext);
1862
1863         if (stup.tuple)
1864         {
1865                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
1866                 return true;
1867         }
1868         else
1869         {
1870                 ExecClearTuple(slot);
1871                 return false;
1872         }
1873 }
1874
1875 /*
1876  * Fetch the next tuple in either forward or back direction.
1877  * Returns NULL if no more tuples.  If *should_free is set, the
1878  * caller must pfree the returned tuple when done with it.
1879  */
1880 HeapTuple
1881 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
1882 {
1883         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1884         SortTuple       stup;
1885
1886         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1887                 stup.tuple = NULL;
1888
1889         MemoryContextSwitchTo(oldcontext);
1890
1891         return stup.tuple;
1892 }
1893
1894 /*
1895  * Fetch the next index tuple in either forward or back direction.
1896  * Returns NULL if no more tuples.  If *should_free is set, the
1897  * caller must pfree the returned tuple when done with it.
1898  */
1899 IndexTuple
1900 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
1901                                                 bool *should_free)
1902 {
1903         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1904         SortTuple       stup;
1905
1906         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1907                 stup.tuple = NULL;
1908
1909         MemoryContextSwitchTo(oldcontext);
1910
1911         return (IndexTuple) stup.tuple;
1912 }
1913
1914 /*
1915  * Fetch the next Datum in either forward or back direction.
1916  * Returns FALSE if no more datums.
1917  *
1918  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1919  * and is now owned by the caller.
1920  */
1921 bool
1922 tuplesort_getdatum(Tuplesortstate *state, bool forward,
1923                                    Datum *val, bool *isNull)
1924 {
1925         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1926         SortTuple       stup;
1927         bool            should_free;
1928
1929         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1930         {
1931                 MemoryContextSwitchTo(oldcontext);
1932                 return false;
1933         }
1934
1935         if (stup.isnull1 || state->datumTypeByVal)
1936         {
1937                 *val = stup.datum1;
1938                 *isNull = stup.isnull1;
1939         }
1940         else
1941         {
1942                 /* use stup.tuple because stup.datum1 may be an abbreviation */
1943
1944                 if (should_free)
1945                         *val = PointerGetDatum(stup.tuple);
1946                 else
1947                         *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
1948                 *isNull = false;
1949         }
1950
1951         MemoryContextSwitchTo(oldcontext);
1952
1953         return true;
1954 }
1955
1956 /*
1957  * Advance over N tuples in either forward or back direction,
1958  * without returning any data.  N==0 is a no-op.
1959  * Returns TRUE if successful, FALSE if ran out of tuples.
1960  */
1961 bool
1962 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
1963 {
1964         MemoryContext oldcontext;
1965
1966         /*
1967          * We don't actually support backwards skip yet, because no callers need
1968          * it.  The API is designed to allow for that later, though.
1969          */
1970         Assert(forward);
1971         Assert(ntuples >= 0);
1972
1973         switch (state->status)
1974         {
1975                 case TSS_SORTEDINMEM:
1976                         if (state->memtupcount - state->current >= ntuples)
1977                         {
1978                                 state->current += ntuples;
1979                                 return true;
1980                         }
1981                         state->current = state->memtupcount;
1982                         state->eof_reached = true;
1983
1984                         /*
1985                          * Complain if caller tries to retrieve more tuples than
1986                          * originally asked for in a bounded sort.  This is because
1987                          * returning EOF here might be the wrong thing.
1988                          */
1989                         if (state->bounded && state->current >= state->bound)
1990                                 elog(ERROR, "retrieved too many tuples in a bounded sort");
1991
1992                         return false;
1993
1994                 case TSS_SORTEDONTAPE:
1995                 case TSS_FINALMERGE:
1996
1997                         /*
1998                          * We could probably optimize these cases better, but for now it's
1999                          * not worth the trouble.
2000                          */
2001                         oldcontext = MemoryContextSwitchTo(state->sortcontext);
2002                         while (ntuples-- > 0)
2003                         {
2004                                 SortTuple       stup;
2005                                 bool            should_free;
2006
2007                                 if (!tuplesort_gettuple_common(state, forward,
2008                                                                                            &stup, &should_free))
2009                                 {
2010                                         MemoryContextSwitchTo(oldcontext);
2011                                         return false;
2012                                 }
2013                                 if (should_free && stup.tuple)
2014                                         pfree(stup.tuple);
2015                                 CHECK_FOR_INTERRUPTS();
2016                         }
2017                         MemoryContextSwitchTo(oldcontext);
2018                         return true;
2019
2020                 default:
2021                         elog(ERROR, "invalid tuplesort state");
2022                         return false;           /* keep compiler quiet */
2023         }
2024 }
2025
2026 /*
2027  * tuplesort_merge_order - report merge order we'll use for given memory
2028  * (note: "merge order" just means the number of input tapes in the merge).
2029  *
2030  * This is exported for use by the planner.  allowedMem is in bytes.
2031  */
2032 int
2033 tuplesort_merge_order(int64 allowedMem)
2034 {
2035         int                     mOrder;
2036
2037         /*
2038          * We need one tape for each merge input, plus another one for the output,
2039          * and each of these tapes needs buffer space.  In addition we want
2040          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2041          * count).
2042          *
2043          * Note: you might be thinking we need to account for the memtuples[]
2044          * array in this calculation, but we effectively treat that as part of the
2045          * MERGE_BUFFER_SIZE workspace.
2046          */
2047         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2048                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2049
2050         /* Even in minimum memory, use at least a MINORDER merge */
2051         mOrder = Max(mOrder, MINORDER);
2052
2053         return mOrder;
2054 }
2055
2056 /*
2057  * inittapes - initialize for tape sorting.
2058  *
2059  * This is called only if we have found we don't have room to sort in memory.
2060  */
2061 static void
2062 inittapes(Tuplesortstate *state)
2063 {
2064         int                     maxTapes,
2065                                 ntuples,
2066                                 j;
2067         int64           tapeSpace;
2068
2069         /* Compute number of tapes to use: merge order plus 1 */
2070         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2071
2072         /*
2073          * We must have at least 2*maxTapes slots in the memtuples[] array, else
2074          * we'd not have room for merge heap plus preread.  It seems unlikely that
2075          * this case would ever occur, but be safe.
2076          */
2077         maxTapes = Min(maxTapes, state->memtupsize / 2);
2078
2079         state->maxTapes = maxTapes;
2080         state->tapeRange = maxTapes - 1;
2081
2082 #ifdef TRACE_SORT
2083         if (trace_sort)
2084                 elog(LOG, "switching to external sort with %d tapes: %s",
2085                          maxTapes, pg_rusage_show(&state->ru_start));
2086 #endif
2087
2088         /*
2089          * Decrease availMem to reflect the space needed for tape buffers; but
2090          * don't decrease it to the point that we have no room for tuples. (That
2091          * case is only likely to occur if sorting pass-by-value Datums; in all
2092          * other scenarios the memtuples[] array is unlikely to occupy more than
2093          * half of allowedMem.  In the pass-by-value case it's not important to
2094          * account for tuple space, so we don't care if LACKMEM becomes
2095          * inaccurate.)
2096          */
2097         tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2098
2099         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2100                 USEMEM(state, tapeSpace);
2101
2102         /*
2103          * Make sure that the temp file(s) underlying the tape set are created in
2104          * suitable temp tablespaces.
2105          */
2106         PrepareTempTablespaces();
2107
2108         /*
2109          * Create the tape set and allocate the per-tape data arrays.
2110          */
2111         state->tapeset = LogicalTapeSetCreate(maxTapes);
2112
2113         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2114         state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
2115         state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
2116         state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
2117         state->mergeavailmem = (int64 *) palloc0(maxTapes * sizeof(int64));
2118         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2119         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2120         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2121         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2122
2123         /*
2124          * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
2125          * marked as belonging to run number zero.
2126          *
2127          * NOTE: we pass false for checkIndex since there's no point in comparing
2128          * indexes in this step, even though we do intend the indexes to be part
2129          * of the sort key...
2130          */
2131         ntuples = state->memtupcount;
2132         state->memtupcount = 0;         /* make the heap empty */
2133         for (j = 0; j < ntuples; j++)
2134         {
2135                 /* Must copy source tuple to avoid possible overwrite */
2136                 SortTuple       stup = state->memtuples[j];
2137
2138                 tuplesort_heap_insert(state, &stup, 0, false);
2139         }
2140         Assert(state->memtupcount == ntuples);
2141
2142         state->currentRun = 0;
2143
2144         /*
2145          * Initialize variables of Algorithm D (step D1).
2146          */
2147         for (j = 0; j < maxTapes; j++)
2148         {
2149                 state->tp_fib[j] = 1;
2150                 state->tp_runs[j] = 0;
2151                 state->tp_dummy[j] = 1;
2152                 state->tp_tapenum[j] = j;
2153         }
2154         state->tp_fib[state->tapeRange] = 0;
2155         state->tp_dummy[state->tapeRange] = 0;
2156
2157         state->Level = 1;
2158         state->destTape = 0;
2159
2160         state->status = TSS_BUILDRUNS;
2161 }
2162
2163 /*
2164  * selectnewtape -- select new tape for new initial run.
2165  *
2166  * This is called after finishing a run when we know another run
2167  * must be started.  This implements steps D3, D4 of Algorithm D.
2168  */
2169 static void
2170 selectnewtape(Tuplesortstate *state)
2171 {
2172         int                     j;
2173         int                     a;
2174
2175         /* Step D3: advance j (destTape) */
2176         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2177         {
2178                 state->destTape++;
2179                 return;
2180         }
2181         if (state->tp_dummy[state->destTape] != 0)
2182         {
2183                 state->destTape = 0;
2184                 return;
2185         }
2186
2187         /* Step D4: increase level */
2188         state->Level++;
2189         a = state->tp_fib[0];
2190         for (j = 0; j < state->tapeRange; j++)
2191         {
2192                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2193                 state->tp_fib[j] = a + state->tp_fib[j + 1];
2194         }
2195         state->destTape = 0;
2196 }
2197
2198 /*
2199  * mergeruns -- merge all the completed initial runs.
2200  *
2201  * This implements steps D5, D6 of Algorithm D.  All input data has
2202  * already been written to initial runs on tape (see dumptuples).
2203  */
2204 static void
2205 mergeruns(Tuplesortstate *state)
2206 {
2207         int                     tapenum,
2208                                 svTape,
2209                                 svRuns,
2210                                 svDummy;
2211
2212         Assert(state->status == TSS_BUILDRUNS);
2213         Assert(state->memtupcount == 0);
2214
2215         /*
2216          * If we produced only one initial run (quite likely if the total data
2217          * volume is between 1X and 2X workMem), we can just use that tape as the
2218          * finished output, rather than doing a useless merge.  (This obvious
2219          * optimization is not in Knuth's algorithm.)
2220          */
2221         if (state->currentRun == 1)
2222         {
2223                 state->result_tape = state->tp_tapenum[state->destTape];
2224                 /* must freeze and rewind the finished output tape */
2225                 LogicalTapeFreeze(state->tapeset, state->result_tape);
2226                 state->status = TSS_SORTEDONTAPE;
2227                 return;
2228         }
2229
2230         if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2231         {
2232                 /*
2233                  * If there are multiple runs to be merged, when we go to read back
2234                  * tuples from disk, abbreviated keys will not have been stored, and we
2235                  * don't care to regenerate them.  Disable abbreviation from this point
2236                  * on.
2237                  */
2238                 state->sortKeys->abbrev_converter = NULL;
2239                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2240
2241                 /* Not strictly necessary, but be tidy */
2242                 state->sortKeys->abbrev_abort = NULL;
2243                 state->sortKeys->abbrev_full_comparator = NULL;
2244         }
2245
2246         /* End of step D2: rewind all output tapes to prepare for merging */
2247         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2248                 LogicalTapeRewind(state->tapeset, tapenum, false);
2249
2250         for (;;)
2251         {
2252                 /*
2253                  * At this point we know that tape[T] is empty.  If there's just one
2254                  * (real or dummy) run left on each input tape, then only one merge
2255                  * pass remains.  If we don't have to produce a materialized sorted
2256                  * tape, we can stop at this point and do the final merge on-the-fly.
2257                  */
2258                 if (!state->randomAccess)
2259                 {
2260                         bool            allOneRun = true;
2261
2262                         Assert(state->tp_runs[state->tapeRange] == 0);
2263                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2264                         {
2265                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2266                                 {
2267                                         allOneRun = false;
2268                                         break;
2269                                 }
2270                         }
2271                         if (allOneRun)
2272                         {
2273                                 /* Tell logtape.c we won't be writing anymore */
2274                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
2275                                 /* Initialize for the final merge pass */
2276                                 beginmerge(state);
2277                                 state->status = TSS_FINALMERGE;
2278                                 return;
2279                         }
2280                 }
2281
2282                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2283                 while (state->tp_runs[state->tapeRange - 1] ||
2284                            state->tp_dummy[state->tapeRange - 1])
2285                 {
2286                         bool            allDummy = true;
2287
2288                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2289                         {
2290                                 if (state->tp_dummy[tapenum] == 0)
2291                                 {
2292                                         allDummy = false;
2293                                         break;
2294                                 }
2295                         }
2296
2297                         if (allDummy)
2298                         {
2299                                 state->tp_dummy[state->tapeRange]++;
2300                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2301                                         state->tp_dummy[tapenum]--;
2302                         }
2303                         else
2304                                 mergeonerun(state);
2305                 }
2306
2307                 /* Step D6: decrease level */
2308                 if (--state->Level == 0)
2309                         break;
2310                 /* rewind output tape T to use as new input */
2311                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
2312                                                   false);
2313                 /* rewind used-up input tape P, and prepare it for write pass */
2314                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
2315                                                   true);
2316                 state->tp_runs[state->tapeRange - 1] = 0;
2317
2318                 /*
2319                  * reassign tape units per step D6; note we no longer care about A[]
2320                  */
2321                 svTape = state->tp_tapenum[state->tapeRange];
2322                 svDummy = state->tp_dummy[state->tapeRange];
2323                 svRuns = state->tp_runs[state->tapeRange];
2324                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2325                 {
2326                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2327                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2328                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2329                 }
2330                 state->tp_tapenum[0] = svTape;
2331                 state->tp_dummy[0] = svDummy;
2332                 state->tp_runs[0] = svRuns;
2333         }
2334
2335         /*
2336          * Done.  Knuth says that the result is on TAPE[1], but since we exited
2337          * the loop without performing the last iteration of step D6, we have not
2338          * rearranged the tape unit assignment, and therefore the result is on
2339          * TAPE[T].  We need to do it this way so that we can freeze the final
2340          * output tape while rewinding it.  The last iteration of step D6 would be
2341          * a waste of cycles anyway...
2342          */
2343         state->result_tape = state->tp_tapenum[state->tapeRange];
2344         LogicalTapeFreeze(state->tapeset, state->result_tape);
2345         state->status = TSS_SORTEDONTAPE;
2346 }
2347
2348 /*
2349  * Merge one run from each input tape, except ones with dummy runs.
2350  *
2351  * This is the inner loop of Algorithm D step D5.  We know that the
2352  * output tape is TAPE[T].
2353  */
2354 static void
2355 mergeonerun(Tuplesortstate *state)
2356 {
2357         int                     destTape = state->tp_tapenum[state->tapeRange];
2358         int                     srcTape;
2359         int                     tupIndex;
2360         SortTuple  *tup;
2361         int64           priorAvail,
2362                                 spaceFreed;
2363
2364         /*
2365          * Start the merge by loading one tuple from each active source tape into
2366          * the heap.  We can also decrease the input run/dummy run counts.
2367          */
2368         beginmerge(state);
2369
2370         /*
2371          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2372          * out, and replacing it with next tuple from same tape (if there is
2373          * another one).
2374          */
2375         while (state->memtupcount > 0)
2376         {
2377                 /* write the tuple to destTape */
2378                 priorAvail = state->availMem;
2379                 srcTape = state->memtuples[0].tupindex;
2380                 WRITETUP(state, destTape, &state->memtuples[0]);
2381                 /* writetup adjusted total free space, now fix per-tape space */
2382                 spaceFreed = state->availMem - priorAvail;
2383                 state->mergeavailmem[srcTape] += spaceFreed;
2384                 /* compact the heap */
2385                 tuplesort_heap_siftup(state, false);
2386                 if ((tupIndex = state->mergenext[srcTape]) == 0)
2387                 {
2388                         /* out of preloaded data on this tape, try to read more */
2389                         mergepreread(state);
2390                         /* if still no data, we've reached end of run on this tape */
2391                         if ((tupIndex = state->mergenext[srcTape]) == 0)
2392                                 continue;
2393                 }
2394                 /* pull next preread tuple from list, insert in heap */
2395                 tup = &state->memtuples[tupIndex];
2396                 state->mergenext[srcTape] = tup->tupindex;
2397                 if (state->mergenext[srcTape] == 0)
2398                         state->mergelast[srcTape] = 0;
2399                 tuplesort_heap_insert(state, tup, srcTape, false);
2400                 /* put the now-unused memtuples entry on the freelist */
2401                 tup->tupindex = state->mergefreelist;
2402                 state->mergefreelist = tupIndex;
2403                 state->mergeavailslots[srcTape]++;
2404         }
2405
2406         /*
2407          * When the heap empties, we're done.  Write an end-of-run marker on the
2408          * output tape, and increment its count of real runs.
2409          */
2410         markrunend(state, destTape);
2411         state->tp_runs[state->tapeRange]++;
2412
2413 #ifdef TRACE_SORT
2414         if (trace_sort)
2415                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2416                          pg_rusage_show(&state->ru_start));
2417 #endif
2418 }
2419
2420 /*
2421  * beginmerge - initialize for a merge pass
2422  *
2423  * We decrease the counts of real and dummy runs for each tape, and mark
2424  * which tapes contain active input runs in mergeactive[].  Then, load
2425  * as many tuples as we can from each active input tape, and finally
2426  * fill the merge heap with the first tuple from each active tape.
2427  */
2428 static void
2429 beginmerge(Tuplesortstate *state)
2430 {
2431         int                     activeTapes;
2432         int                     tapenum;
2433         int                     srcTape;
2434         int                     slotsPerTape;
2435         int64           spacePerTape;
2436
2437         /* Heap should be empty here */
2438         Assert(state->memtupcount == 0);
2439
2440         /* Adjust run counts and mark the active tapes */
2441         memset(state->mergeactive, 0,
2442                    state->maxTapes * sizeof(*state->mergeactive));
2443         activeTapes = 0;
2444         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2445         {
2446                 if (state->tp_dummy[tapenum] > 0)
2447                         state->tp_dummy[tapenum]--;
2448                 else
2449                 {
2450                         Assert(state->tp_runs[tapenum] > 0);
2451                         state->tp_runs[tapenum]--;
2452                         srcTape = state->tp_tapenum[tapenum];
2453                         state->mergeactive[srcTape] = true;
2454                         activeTapes++;
2455                 }
2456         }
2457         state->activeTapes = activeTapes;
2458
2459         /* Clear merge-pass state variables */
2460         memset(state->mergenext, 0,
2461                    state->maxTapes * sizeof(*state->mergenext));
2462         memset(state->mergelast, 0,
2463                    state->maxTapes * sizeof(*state->mergelast));
2464         state->mergefreelist = 0;       /* nothing in the freelist */
2465         state->mergefirstfree = activeTapes;            /* 1st slot avail for preread */
2466
2467         /*
2468          * Initialize space allocation to let each active input tape have an equal
2469          * share of preread space.
2470          */
2471         Assert(activeTapes > 0);
2472         slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2473         Assert(slotsPerTape > 0);
2474         spacePerTape = state->availMem / activeTapes;
2475         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2476         {
2477                 if (state->mergeactive[srcTape])
2478                 {
2479                         state->mergeavailslots[srcTape] = slotsPerTape;
2480                         state->mergeavailmem[srcTape] = spacePerTape;
2481                 }
2482         }
2483
2484         /*
2485          * Preread as many tuples as possible (and at least one) from each active
2486          * tape
2487          */
2488         mergepreread(state);
2489
2490         /* Load the merge heap with the first tuple from each input tape */
2491         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2492         {
2493                 int                     tupIndex = state->mergenext[srcTape];
2494                 SortTuple  *tup;
2495
2496                 if (tupIndex)
2497                 {
2498                         tup = &state->memtuples[tupIndex];
2499                         state->mergenext[srcTape] = tup->tupindex;
2500                         if (state->mergenext[srcTape] == 0)
2501                                 state->mergelast[srcTape] = 0;
2502                         tuplesort_heap_insert(state, tup, srcTape, false);
2503                         /* put the now-unused memtuples entry on the freelist */
2504                         tup->tupindex = state->mergefreelist;
2505                         state->mergefreelist = tupIndex;
2506                         state->mergeavailslots[srcTape]++;
2507                 }
2508         }
2509 }
2510
2511 /*
2512  * mergepreread - load tuples from merge input tapes
2513  *
2514  * This routine exists to improve sequentiality of reads during a merge pass,
2515  * as explained in the header comments of this file.  Load tuples from each
2516  * active source tape until the tape's run is exhausted or it has used up
2517  * its fair share of available memory.  In any case, we guarantee that there
2518  * is at least one preread tuple available from each unexhausted input tape.
2519  *
2520  * We invoke this routine at the start of a merge pass for initial load,
2521  * and then whenever any tape's preread data runs out.  Note that we load
2522  * as much data as possible from all tapes, not just the one that ran out.
2523  * This is because logtape.c works best with a usage pattern that alternates
2524  * between reading a lot of data and writing a lot of data, so whenever we
2525  * are forced to read, we should fill working memory completely.
2526  *
2527  * In FINALMERGE state, we *don't* use this routine, but instead just preread
2528  * from the single tape that ran dry.  There's no read/write alternation in
2529  * that state and so no point in scanning through all the tapes to fix one.
2530  * (Moreover, there may be quite a lot of inactive tapes in that state, since
2531  * we might have had many fewer runs than tapes.  In a regular tape-to-tape
2532  * merge we can expect most of the tapes to be active.)
2533  */
2534 static void
2535 mergepreread(Tuplesortstate *state)
2536 {
2537         int                     srcTape;
2538
2539         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2540                 mergeprereadone(state, srcTape);
2541 }
2542
2543 /*
2544  * mergeprereadone - load tuples from one merge input tape
2545  *
2546  * Read tuples from the specified tape until it has used up its free memory
2547  * or array slots; but ensure that we have at least one tuple, if any are
2548  * to be had.
2549  */
2550 static void
2551 mergeprereadone(Tuplesortstate *state, int srcTape)
2552 {
2553         unsigned int tuplen;
2554         SortTuple       stup;
2555         int                     tupIndex;
2556         int64           priorAvail,
2557                                 spaceUsed;
2558
2559         if (!state->mergeactive[srcTape])
2560                 return;                                 /* tape's run is already exhausted */
2561         priorAvail = state->availMem;
2562         state->availMem = state->mergeavailmem[srcTape];
2563         while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) ||
2564                    state->mergenext[srcTape] == 0)
2565         {
2566                 /* read next tuple, if any */
2567                 if ((tuplen = getlen(state, srcTape, true)) == 0)
2568                 {
2569                         state->mergeactive[srcTape] = false;
2570                         break;
2571                 }
2572                 READTUP(state, &stup, srcTape, tuplen);
2573                 /* find a free slot in memtuples[] for it */
2574                 tupIndex = state->mergefreelist;
2575                 if (tupIndex)
2576                         state->mergefreelist = state->memtuples[tupIndex].tupindex;
2577                 else
2578                 {
2579                         tupIndex = state->mergefirstfree++;
2580                         Assert(tupIndex < state->memtupsize);
2581                 }
2582                 state->mergeavailslots[srcTape]--;
2583                 /* store tuple, append to list for its tape */
2584                 stup.tupindex = 0;
2585                 state->memtuples[tupIndex] = stup;
2586                 if (state->mergelast[srcTape])
2587                         state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
2588                 else
2589                         state->mergenext[srcTape] = tupIndex;
2590                 state->mergelast[srcTape] = tupIndex;
2591         }
2592         /* update per-tape and global availmem counts */
2593         spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
2594         state->mergeavailmem[srcTape] = state->availMem;
2595         state->availMem = priorAvail - spaceUsed;
2596 }
2597
2598 /*
2599  * dumptuples - remove tuples from heap and write to tape
2600  *
2601  * This is used during initial-run building, but not during merging.
2602  *
2603  * When alltuples = false, dump only enough tuples to get under the
2604  * availMem limit (and leave at least one tuple in the heap in any case,
2605  * since puttuple assumes it always has a tuple to compare to).  We also
2606  * insist there be at least one free slot in the memtuples[] array.
2607  *
2608  * When alltuples = true, dump everything currently in memory.
2609  * (This case is only used at end of input data.)
2610  *
2611  * If we empty the heap, close out the current run and return (this should
2612  * only happen at end of input data).  If we see that the tuple run number
2613  * at the top of the heap has changed, start a new run.
2614  */
2615 static void
2616 dumptuples(Tuplesortstate *state, bool alltuples)
2617 {
2618         while (alltuples ||
2619                    (LACKMEM(state) && state->memtupcount > 1) ||
2620                    state->memtupcount >= state->memtupsize)
2621         {
2622                 /*
2623                  * Dump the heap's frontmost entry, and sift up to remove it from the
2624                  * heap.
2625                  */
2626                 Assert(state->memtupcount > 0);
2627                 WRITETUP(state, state->tp_tapenum[state->destTape],
2628                                  &state->memtuples[0]);
2629                 tuplesort_heap_siftup(state, true);
2630
2631                 /*
2632                  * If the heap is empty *or* top run number has changed, we've
2633                  * finished the current run.
2634                  */
2635                 if (state->memtupcount == 0 ||
2636                         state->currentRun != state->memtuples[0].tupindex)
2637                 {
2638                         markrunend(state, state->tp_tapenum[state->destTape]);
2639                         state->currentRun++;
2640                         state->tp_runs[state->destTape]++;
2641                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2642
2643 #ifdef TRACE_SORT
2644                         if (trace_sort)
2645                                 elog(LOG, "finished writing%s run %d to tape %d: %s",
2646                                          (state->memtupcount == 0) ? " final" : "",
2647                                          state->currentRun, state->destTape,
2648                                          pg_rusage_show(&state->ru_start));
2649 #endif
2650
2651                         /*
2652                          * Done if heap is empty, else prepare for new run.
2653                          */
2654                         if (state->memtupcount == 0)
2655                                 break;
2656                         Assert(state->currentRun == state->memtuples[0].tupindex);
2657                         selectnewtape(state);
2658                 }
2659         }
2660 }
2661
2662 /*
2663  * tuplesort_rescan             - rewind and replay the scan
2664  */
2665 void
2666 tuplesort_rescan(Tuplesortstate *state)
2667 {
2668         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2669
2670         Assert(state->randomAccess);
2671
2672         switch (state->status)
2673         {
2674                 case TSS_SORTEDINMEM:
2675                         state->current = 0;
2676                         state->eof_reached = false;
2677                         state->markpos_offset = 0;
2678                         state->markpos_eof = false;
2679                         break;
2680                 case TSS_SORTEDONTAPE:
2681                         LogicalTapeRewind(state->tapeset,
2682                                                           state->result_tape,
2683                                                           false);
2684                         state->eof_reached = false;
2685                         state->markpos_block = 0L;
2686                         state->markpos_offset = 0;
2687                         state->markpos_eof = false;
2688                         break;
2689                 default:
2690                         elog(ERROR, "invalid tuplesort state");
2691                         break;
2692         }
2693
2694         MemoryContextSwitchTo(oldcontext);
2695 }
2696
2697 /*
2698  * tuplesort_markpos    - saves current position in the merged sort file
2699  */
2700 void
2701 tuplesort_markpos(Tuplesortstate *state)
2702 {
2703         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2704
2705         Assert(state->randomAccess);
2706
2707         switch (state->status)
2708         {
2709                 case TSS_SORTEDINMEM:
2710                         state->markpos_offset = state->current;
2711                         state->markpos_eof = state->eof_reached;
2712                         break;
2713                 case TSS_SORTEDONTAPE:
2714                         LogicalTapeTell(state->tapeset,
2715                                                         state->result_tape,
2716                                                         &state->markpos_block,
2717                                                         &state->markpos_offset);
2718                         state->markpos_eof = state->eof_reached;
2719                         break;
2720                 default:
2721                         elog(ERROR, "invalid tuplesort state");
2722                         break;
2723         }
2724
2725         MemoryContextSwitchTo(oldcontext);
2726 }
2727
2728 /*
2729  * tuplesort_restorepos - restores current position in merged sort file to
2730  *                                                last saved position
2731  */
2732 void
2733 tuplesort_restorepos(Tuplesortstate *state)
2734 {
2735         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2736
2737         Assert(state->randomAccess);
2738
2739         switch (state->status)
2740         {
2741                 case TSS_SORTEDINMEM:
2742                         state->current = state->markpos_offset;
2743                         state->eof_reached = state->markpos_eof;
2744                         break;
2745                 case TSS_SORTEDONTAPE:
2746                         if (!LogicalTapeSeek(state->tapeset,
2747                                                                  state->result_tape,
2748                                                                  state->markpos_block,
2749                                                                  state->markpos_offset))
2750                                 elog(ERROR, "tuplesort_restorepos failed");
2751                         state->eof_reached = state->markpos_eof;
2752                         break;
2753                 default:
2754                         elog(ERROR, "invalid tuplesort state");
2755                         break;
2756         }
2757
2758         MemoryContextSwitchTo(oldcontext);
2759 }
2760
2761 /*
2762  * tuplesort_get_stats - extract summary statistics
2763  *
2764  * This can be called after tuplesort_performsort() finishes to obtain
2765  * printable summary information about how the sort was performed.
2766  * spaceUsed is measured in kilobytes.
2767  */
2768 void
2769 tuplesort_get_stats(Tuplesortstate *state,
2770                                         const char **sortMethod,
2771                                         const char **spaceType,
2772                                         long *spaceUsed)
2773 {
2774         /*
2775          * Note: it might seem we should provide both memory and disk usage for a
2776          * disk-based sort.  However, the current code doesn't track memory space
2777          * accurately once we have begun to return tuples to the caller (since we
2778          * don't account for pfree's the caller is expected to do), so we cannot
2779          * rely on availMem in a disk sort.  This does not seem worth the overhead
2780          * to fix.  Is it worth creating an API for the memory context code to
2781          * tell us how much is actually used in sortcontext?
2782          */
2783         if (state->tapeset)
2784         {
2785                 *spaceType = "Disk";
2786                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
2787         }
2788         else
2789         {
2790                 *spaceType = "Memory";
2791                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
2792         }
2793
2794         switch (state->status)
2795         {
2796                 case TSS_SORTEDINMEM:
2797                         if (state->boundUsed)
2798                                 *sortMethod = "top-N heapsort";
2799                         else
2800                                 *sortMethod = "quicksort";
2801                         break;
2802                 case TSS_SORTEDONTAPE:
2803                         *sortMethod = "external sort";
2804                         break;
2805                 case TSS_FINALMERGE:
2806                         *sortMethod = "external merge";
2807                         break;
2808                 default:
2809                         *sortMethod = "still in progress";
2810                         break;
2811         }
2812 }
2813
2814
2815 /*
2816  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2817  *
2818  * Compare two SortTuples.  If checkIndex is true, use the tuple index
2819  * as the front of the sort key; otherwise, no.
2820  */
2821
2822 #define HEAPCOMPARE(tup1,tup2) \
2823         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2824          ((tup1)->tupindex) - ((tup2)->tupindex) : \
2825          COMPARETUP(state, tup1, tup2))
2826
2827 /*
2828  * Convert the existing unordered array of SortTuples to a bounded heap,
2829  * discarding all but the smallest "state->bound" tuples.
2830  *
2831  * When working with a bounded heap, we want to keep the largest entry
2832  * at the root (array entry zero), instead of the smallest as in the normal
2833  * sort case.  This allows us to discard the largest entry cheaply.
2834  * Therefore, we temporarily reverse the sort direction.
2835  *
2836  * We assume that all entries in a bounded heap will always have tupindex
2837  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2838  * the direction of comparison for tupindexes.
2839  */
2840 static void
2841 make_bounded_heap(Tuplesortstate *state)
2842 {
2843         int                     tupcount = state->memtupcount;
2844         int                     i;
2845
2846         Assert(state->status == TSS_INITIAL);
2847         Assert(state->bounded);
2848         Assert(tupcount >= state->bound);
2849
2850         /* Reverse sort direction so largest entry will be at root */
2851         reversedirection(state);
2852
2853         state->memtupcount = 0;         /* make the heap empty */
2854         for (i = 0; i < tupcount; i++)
2855         {
2856                 if (state->memtupcount >= state->bound &&
2857                   COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2858                 {
2859                         /* New tuple would just get thrown out, so skip it */
2860                         free_sort_tuple(state, &state->memtuples[i]);
2861                         CHECK_FOR_INTERRUPTS();
2862                 }
2863                 else
2864                 {
2865                         /* Insert next tuple into heap */
2866                         /* Must copy source tuple to avoid possible overwrite */
2867                         SortTuple       stup = state->memtuples[i];
2868
2869                         tuplesort_heap_insert(state, &stup, 0, false);
2870
2871                         /* If heap too full, discard largest entry */
2872                         if (state->memtupcount > state->bound)
2873                         {
2874                                 free_sort_tuple(state, &state->memtuples[0]);
2875                                 tuplesort_heap_siftup(state, false);
2876                         }
2877                 }
2878         }
2879
2880         Assert(state->memtupcount == state->bound);
2881         state->status = TSS_BOUNDED;
2882 }
2883
2884 /*
2885  * Convert the bounded heap to a properly-sorted array
2886  */
2887 static void
2888 sort_bounded_heap(Tuplesortstate *state)
2889 {
2890         int                     tupcount = state->memtupcount;
2891
2892         Assert(state->status == TSS_BOUNDED);
2893         Assert(state->bounded);
2894         Assert(tupcount == state->bound);
2895
2896         /*
2897          * We can unheapify in place because each sift-up will remove the largest
2898          * entry, which we can promptly store in the newly freed slot at the end.
2899          * Once we're down to a single-entry heap, we're done.
2900          */
2901         while (state->memtupcount > 1)
2902         {
2903                 SortTuple       stup = state->memtuples[0];
2904
2905                 /* this sifts-up the next-largest entry and decreases memtupcount */
2906                 tuplesort_heap_siftup(state, false);
2907                 state->memtuples[state->memtupcount] = stup;
2908         }
2909         state->memtupcount = tupcount;
2910
2911         /*
2912          * Reverse sort direction back to the original state.  This is not
2913          * actually necessary but seems like a good idea for tidiness.
2914          */
2915         reversedirection(state);
2916
2917         state->status = TSS_SORTEDINMEM;
2918         state->boundUsed = true;
2919 }
2920
2921 /*
2922  * Insert a new tuple into an empty or existing heap, maintaining the
2923  * heap invariant.  Caller is responsible for ensuring there's room.
2924  *
2925  * Note: we assume *tuple is a temporary variable that can be scribbled on.
2926  * For some callers, tuple actually points to a memtuples[] entry above the
2927  * end of the heap.  This is safe as long as it's not immediately adjacent
2928  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2929  * is, it might get overwritten before being moved into the heap!
2930  */
2931 static void
2932 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
2933                                           int tupleindex, bool checkIndex)
2934 {
2935         SortTuple  *memtuples;
2936         int                     j;
2937
2938         /*
2939          * Save the tupleindex --- see notes above about writing on *tuple. It's a
2940          * historical artifact that tupleindex is passed as a separate argument
2941          * and not in *tuple, but it's notationally convenient so let's leave it
2942          * that way.
2943          */
2944         tuple->tupindex = tupleindex;
2945
2946         memtuples = state->memtuples;
2947         Assert(state->memtupcount < state->memtupsize);
2948
2949         CHECK_FOR_INTERRUPTS();
2950
2951         /*
2952          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2953          * using 1-based array indexes, not 0-based.
2954          */
2955         j = state->memtupcount++;
2956         while (j > 0)
2957         {
2958                 int                     i = (j - 1) >> 1;
2959
2960                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
2961                         break;
2962                 memtuples[j] = memtuples[i];
2963                 j = i;
2964         }
2965         memtuples[j] = *tuple;
2966 }
2967
2968 /*
2969  * The tuple at state->memtuples[0] has been removed from the heap.
2970  * Decrement memtupcount, and sift up to maintain the heap invariant.
2971  */
2972 static void
2973 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
2974 {
2975         SortTuple  *memtuples = state->memtuples;
2976         SortTuple  *tuple;
2977         int                     i,
2978                                 n;
2979
2980         if (--state->memtupcount <= 0)
2981                 return;
2982
2983         CHECK_FOR_INTERRUPTS();
2984
2985         n = state->memtupcount;
2986         tuple = &memtuples[n];          /* tuple that must be reinserted */
2987         i = 0;                                          /* i is where the "hole" is */
2988         for (;;)
2989         {
2990                 int                     j = 2 * i + 1;
2991
2992                 if (j >= n)
2993                         break;
2994                 if (j + 1 < n &&
2995                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
2996                         j++;
2997                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
2998                         break;
2999                 memtuples[i] = memtuples[j];
3000                 i = j;
3001         }
3002         memtuples[i] = *tuple;
3003 }
3004
3005 /*
3006  * Function to reverse the sort direction from its current state
3007  *
3008  * It is not safe to call this when performing hash tuplesorts
3009  */
3010 static void
3011 reversedirection(Tuplesortstate *state)
3012 {
3013         SortSupport sortKey = state->sortKeys;
3014         int                     nkey;
3015
3016         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3017         {
3018                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3019                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3020         }
3021 }
3022
3023
3024 /*
3025  * Tape interface routines
3026  */
3027
3028 static unsigned int
3029 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3030 {
3031         unsigned int len;
3032
3033         if (LogicalTapeRead(state->tapeset, tapenum,
3034                                                 &len, sizeof(len)) != sizeof(len))
3035                 elog(ERROR, "unexpected end of tape");
3036         if (len == 0 && !eofOK)
3037                 elog(ERROR, "unexpected end of data");
3038         return len;
3039 }
3040
3041 static void
3042 markrunend(Tuplesortstate *state, int tapenum)
3043 {
3044         unsigned int len = 0;
3045
3046         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3047 }
3048
3049
3050 /*
3051  * Routines specialized for HeapTuple (actually MinimalTuple) case
3052  */
3053
3054 static int
3055 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3056 {
3057         SortSupport sortKey = state->sortKeys;
3058         HeapTupleData ltup;
3059         HeapTupleData rtup;
3060         TupleDesc       tupDesc;
3061         int                     nkey;
3062         int32           compare;
3063         AttrNumber      attno;
3064         Datum           datum1,
3065                                 datum2;
3066         bool            isnull1,
3067                                 isnull2;
3068
3069
3070         /* Compare the leading sort key */
3071         compare = ApplySortComparator(a->datum1, a->isnull1,
3072                                                                   b->datum1, b->isnull1,
3073                                                                   sortKey);
3074         if (compare != 0)
3075                 return compare;
3076
3077         /* Compare additional sort keys */
3078         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3079         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3080         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3081         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3082         tupDesc = state->tupDesc;
3083
3084         if (sortKey->abbrev_converter)
3085         {
3086                 attno = sortKey->ssup_attno;
3087
3088                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3089                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3090
3091                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3092                                                                                                 datum2, isnull2,
3093                                                                                                 sortKey);
3094                 if (compare != 0)
3095                         return compare;
3096         }
3097
3098         sortKey++;
3099         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3100         {
3101                 attno = sortKey->ssup_attno;
3102
3103                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3104                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3105
3106                 compare = ApplySortComparator(datum1, isnull1,
3107                                                                           datum2, isnull2,
3108                                                                           sortKey);
3109                 if (compare != 0)
3110                         return compare;
3111         }
3112
3113         return 0;
3114 }
3115
3116 static void
3117 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3118 {
3119         /*
3120          * We expect the passed "tup" to be a TupleTableSlot, and form a
3121          * MinimalTuple using the exported interface for that.
3122          */
3123         TupleTableSlot *slot = (TupleTableSlot *) tup;
3124         Datum                   original;
3125         MinimalTuple tuple;
3126         HeapTupleData htup;
3127
3128         /* copy the tuple into sort storage */
3129         tuple = ExecCopySlotMinimalTuple(slot);
3130         stup->tuple = (void *) tuple;
3131         USEMEM(state, GetMemoryChunkSpace(tuple));
3132         /* set up first-column key value */
3133         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3134         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3135         original = heap_getattr(&htup,
3136                                                         state->sortKeys[0].ssup_attno,
3137                                                         state->tupDesc,
3138                                                         &stup->isnull1);
3139
3140         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3141         {
3142                 /*
3143                  * Store ordinary Datum representation, or NULL value.  If there is a
3144                  * converter it won't expect NULL values, and cost model is not
3145                  * required to account for NULL, so in that case we avoid calling
3146                  * converter and just set datum1 to "void" representation (to be
3147                  * consistent).
3148                  */
3149                 stup->datum1 = original;
3150         }
3151         else if (!consider_abort_common(state))
3152         {
3153                 /* Store abbreviated key representation */
3154                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3155                                                                                                                  state->sortKeys);
3156         }
3157         else
3158         {
3159                 /* Abort abbreviation */
3160                 int             i;
3161
3162                 stup->datum1 = original;
3163
3164                 /*
3165                  * Set state to be consistent with never trying abbreviation.
3166                  *
3167                  * Alter datum1 representation in already-copied tuples, so as to
3168                  * ensure a consistent representation (current tuple was just handled).
3169                  * Note that we rely on all tuples copied so far actually being
3170                  * contained within memtuples array.
3171                  */
3172                 for (i = 0; i < state->memtupcount; i++)
3173                 {
3174                         SortTuple *mtup = &state->memtuples[i];
3175
3176                         htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3177                                                         MINIMAL_TUPLE_OFFSET;
3178                         htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3179                                                         MINIMAL_TUPLE_OFFSET);
3180
3181                         mtup->datum1 = heap_getattr(&htup,
3182                                                                                 state->sortKeys[0].ssup_attno,
3183                                                                                 state->tupDesc,
3184                                                                                 &mtup->isnull1);
3185                 }
3186         }
3187 }
3188
3189 static void
3190 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3191 {
3192         MinimalTuple tuple = (MinimalTuple) stup->tuple;
3193
3194         /* the part of the MinimalTuple we'll write: */
3195         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3196         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3197
3198         /* total on-disk footprint: */
3199         unsigned int tuplen = tupbodylen + sizeof(int);
3200
3201         LogicalTapeWrite(state->tapeset, tapenum,
3202                                          (void *) &tuplen, sizeof(tuplen));
3203         LogicalTapeWrite(state->tapeset, tapenum,
3204                                          (void *) tupbody, tupbodylen);
3205         if (state->randomAccess)        /* need trailing length word? */
3206                 LogicalTapeWrite(state->tapeset, tapenum,
3207                                                  (void *) &tuplen, sizeof(tuplen));
3208
3209         FREEMEM(state, GetMemoryChunkSpace(tuple));
3210         heap_free_minimal_tuple(tuple);
3211 }
3212
3213 static void
3214 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3215                          int tapenum, unsigned int len)
3216 {
3217         unsigned int tupbodylen = len - sizeof(int);
3218         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3219         MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
3220         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3221         HeapTupleData htup;
3222
3223         USEMEM(state, GetMemoryChunkSpace(tuple));
3224         /* read in the tuple proper */
3225         tuple->t_len = tuplen;
3226         LogicalTapeReadExact(state->tapeset, tapenum,
3227                                                  tupbody, tupbodylen);
3228         if (state->randomAccess)        /* need trailing length word? */
3229                 LogicalTapeReadExact(state->tapeset, tapenum,
3230                                                          &tuplen, sizeof(tuplen));
3231         stup->tuple = (void *) tuple;
3232         /* set up first-column key value */
3233         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3234         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3235         stup->datum1 = heap_getattr(&htup,
3236                                                                 state->sortKeys[0].ssup_attno,
3237                                                                 state->tupDesc,
3238                                                                 &stup->isnull1);
3239 }
3240
3241 /*
3242  * Routines specialized for the CLUSTER case (HeapTuple data, with
3243  * comparisons per a btree index definition)
3244  */
3245
3246 static int
3247 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3248                                    Tuplesortstate *state)
3249 {
3250         SortSupport     sortKey = state->sortKeys;
3251         HeapTuple       ltup;
3252         HeapTuple       rtup;
3253         TupleDesc       tupDesc;
3254         int                     nkey;
3255         int32           compare;
3256         Datum           datum1,
3257                                 datum2;
3258         bool            isnull1,
3259                                 isnull2;
3260         AttrNumber      leading = state->indexInfo->ii_KeyAttrNumbers[0];
3261
3262         /* Be prepared to compare additional sort keys */
3263         ltup = (HeapTuple) a->tuple;
3264         rtup = (HeapTuple) b->tuple;
3265         tupDesc = state->tupDesc;
3266
3267         /* Compare the leading sort key, if it's simple */
3268         if (leading != 0)
3269         {
3270                 compare = ApplySortComparator(a->datum1, a->isnull1,
3271                                                                           b->datum1, b->isnull1,
3272                                                                           sortKey);
3273                 if (compare != 0)
3274                         return compare;
3275
3276                 if (sortKey->abbrev_converter)
3277                 {
3278                         datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3279                         datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3280
3281                         compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3282                                                                                                         datum2, isnull2,
3283                                                                                                         sortKey);
3284                 }
3285                 if (compare != 0 || state->nKeys == 1)
3286                         return compare;
3287                 /* Compare additional columns the hard way */
3288                 sortKey++;
3289                 nkey = 1;
3290         }
3291         else
3292         {
3293                 /* Must compare all keys the hard way */
3294                 nkey = 0;
3295         }
3296
3297         if (state->indexInfo->ii_Expressions == NULL)
3298         {
3299                 /* If not expression index, just compare the proper heap attrs */
3300
3301                 for (; nkey < state->nKeys; nkey++, sortKey++)
3302                 {
3303                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3304
3305                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3306                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3307
3308                         compare = ApplySortComparator(datum1, isnull1,
3309                                                                                   datum2, isnull2,
3310                                                                                   sortKey);
3311                         if (compare != 0)
3312                                 return compare;
3313                 }
3314         }
3315         else
3316         {
3317                 /*
3318                  * In the expression index case, compute the whole index tuple and
3319                  * then compare values.  It would perhaps be faster to compute only as
3320                  * many columns as we need to compare, but that would require
3321                  * duplicating all the logic in FormIndexDatum.
3322                  */
3323                 Datum           l_index_values[INDEX_MAX_KEYS];
3324                 bool            l_index_isnull[INDEX_MAX_KEYS];
3325                 Datum           r_index_values[INDEX_MAX_KEYS];
3326                 bool            r_index_isnull[INDEX_MAX_KEYS];
3327                 TupleTableSlot *ecxt_scantuple;
3328
3329                 /* Reset context each time to prevent memory leakage */
3330                 ResetPerTupleExprContext(state->estate);
3331
3332                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3333
3334                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3335                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3336                                            l_index_values, l_index_isnull);
3337
3338                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3339                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3340                                            r_index_values, r_index_isnull);
3341
3342                 for (; nkey < state->nKeys; nkey++, sortKey++)
3343                 {
3344                         compare = ApplySortComparator(l_index_values[nkey],
3345                                                                                   l_index_isnull[nkey],
3346                                                                                   r_index_values[nkey],
3347                                                                                   r_index_isnull[nkey],
3348                                                                                   sortKey);
3349                         if (compare != 0)
3350                                 return compare;
3351                 }
3352         }
3353
3354         return 0;
3355 }
3356
3357 static void
3358 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3359 {
3360         HeapTuple       tuple = (HeapTuple) tup;
3361         Datum           original;
3362
3363         /* copy the tuple into sort storage */
3364         tuple = heap_copytuple(tuple);
3365         stup->tuple = (void *) tuple;
3366         USEMEM(state, GetMemoryChunkSpace(tuple));
3367         /*
3368          * set up first-column key value, and potentially abbreviate, if it's a
3369          * simple column
3370          */
3371         if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3372                 return;
3373
3374         original = heap_getattr(tuple,
3375                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3376                                                         state->tupDesc,
3377                                                         &stup->isnull1);
3378
3379         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3380         {
3381                 /*
3382                  * Store ordinary Datum representation, or NULL value.  If there is a
3383                  * converter it won't expect NULL values, and cost model is not
3384                  * required to account for NULL, so in that case we avoid calling
3385                  * converter and just set datum1 to "void" representation (to be
3386                  * consistent).
3387                  */
3388                 stup->datum1 = original;
3389         }
3390         else if (!consider_abort_common(state))
3391         {
3392                 /* Store abbreviated key representation */
3393                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3394                                                                                                                  state->sortKeys);
3395         }
3396         else
3397         {
3398                 /* Abort abbreviation */
3399                 int             i;
3400
3401                 stup->datum1 = original;
3402
3403                 /*
3404                  * Set state to be consistent with never trying abbreviation.
3405                  *
3406                  * Alter datum1 representation in already-copied tuples, so as to
3407                  * ensure a consistent representation (current tuple was just handled).
3408                  * Note that we rely on all tuples copied so far actually being
3409                  * contained within memtuples array.
3410                  */
3411                 for (i = 0; i < state->memtupcount; i++)
3412                 {
3413                         SortTuple *mtup = &state->memtuples[i];
3414
3415                         tuple = (HeapTuple) mtup->tuple;
3416                         mtup->datum1 = heap_getattr(tuple,
3417                                                                                 state->indexInfo->ii_KeyAttrNumbers[0],
3418                                                                                 state->tupDesc,
3419                                                                                 &stup->isnull1);
3420                 }
3421         }
3422 }
3423
3424 static void
3425 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3426 {
3427         HeapTuple       tuple = (HeapTuple) stup->tuple;
3428         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3429
3430         /* We need to store t_self, but not other fields of HeapTupleData */
3431         LogicalTapeWrite(state->tapeset, tapenum,
3432                                          &tuplen, sizeof(tuplen));
3433         LogicalTapeWrite(state->tapeset, tapenum,
3434                                          &tuple->t_self, sizeof(ItemPointerData));
3435         LogicalTapeWrite(state->tapeset, tapenum,
3436                                          tuple->t_data, tuple->t_len);
3437         if (state->randomAccess)        /* need trailing length word? */
3438                 LogicalTapeWrite(state->tapeset, tapenum,
3439                                                  &tuplen, sizeof(tuplen));
3440
3441         FREEMEM(state, GetMemoryChunkSpace(tuple));
3442         heap_freetuple(tuple);
3443 }
3444
3445 static void
3446 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
3447                                 int tapenum, unsigned int tuplen)
3448 {
3449         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
3450         HeapTuple       tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE);
3451
3452         USEMEM(state, GetMemoryChunkSpace(tuple));
3453         /* Reconstruct the HeapTupleData header */
3454         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
3455         tuple->t_len = t_len;
3456         LogicalTapeReadExact(state->tapeset, tapenum,
3457                                                  &tuple->t_self, sizeof(ItemPointerData));
3458         /* We don't currently bother to reconstruct t_tableOid */
3459         tuple->t_tableOid = InvalidOid;
3460         /* Read in the tuple body */
3461         LogicalTapeReadExact(state->tapeset, tapenum,
3462                                                  tuple->t_data, tuple->t_len);
3463         if (state->randomAccess)        /* need trailing length word? */
3464                 LogicalTapeReadExact(state->tapeset, tapenum,
3465                                                          &tuplen, sizeof(tuplen));
3466         stup->tuple = (void *) tuple;
3467         /* set up first-column key value, if it's a simple column */
3468         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
3469                 stup->datum1 = heap_getattr(tuple,
3470                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3471                                                                         state->tupDesc,
3472                                                                         &stup->isnull1);
3473 }
3474
3475
3476 /*
3477  * Routines specialized for IndexTuple case
3478  *
3479  * The btree and hash cases require separate comparison functions, but the
3480  * IndexTuple representation is the same so the copy/write/read support
3481  * functions can be shared.
3482  */
3483
3484 static int
3485 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
3486                                            Tuplesortstate *state)
3487 {
3488         /*
3489          * This is similar to comparetup_heap(), but expects index tuples.  There
3490          * is also special handling for enforcing uniqueness, and special treatment
3491          * for equal keys at the end.
3492          */
3493         SortSupport     sortKey = state->sortKeys;
3494         IndexTuple      tuple1;
3495         IndexTuple      tuple2;
3496         int                     keysz;
3497         TupleDesc       tupDes;
3498         bool            equal_hasnull = false;
3499         int                     nkey;
3500         int32           compare;
3501         Datum           datum1,
3502                                 datum2;
3503         bool            isnull1,
3504                                 isnull2;
3505
3506
3507         /* Compare the leading sort key */
3508         compare = ApplySortComparator(a->datum1, a->isnull1,
3509                                                                   b->datum1, b->isnull1,
3510                                                                   sortKey);
3511         if (compare != 0)
3512                 return compare;
3513
3514         /* Compare additional sort keys */
3515         tuple1 = (IndexTuple) a->tuple;
3516         tuple2 = (IndexTuple) b->tuple;
3517         keysz = state->nKeys;
3518         tupDes = RelationGetDescr(state->indexRel);
3519
3520         if (sortKey->abbrev_converter)
3521         {
3522                 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
3523                 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
3524
3525                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3526                                                                                                 datum2, isnull2,
3527                                                                                                 sortKey);
3528                 if (compare != 0)
3529                         return compare;
3530         }
3531
3532         /* they are equal, so we only need to examine one null flag */
3533         if (a->isnull1)
3534                 equal_hasnull = true;
3535
3536         sortKey++;
3537         for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
3538         {
3539                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
3540                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
3541
3542                 compare = ApplySortComparator(datum1, isnull1,
3543                                                                           datum2, isnull2,
3544                                                                           sortKey);
3545                 if (compare != 0)
3546                         return compare;         /* done when we find unequal attributes */
3547
3548                 /* they are equal, so we only need to examine one null flag */
3549                 if (isnull1)
3550                         equal_hasnull = true;
3551         }
3552
3553         /*
3554          * If btree has asked us to enforce uniqueness, complain if two equal
3555          * tuples are detected (unless there was at least one NULL field).
3556          *
3557          * It is sufficient to make the test here, because if two tuples are equal
3558          * they *must* get compared at some stage of the sort --- otherwise the
3559          * sort algorithm wouldn't have checked whether one must appear before the
3560          * other.
3561          */
3562         if (state->enforceUnique && !equal_hasnull)
3563         {
3564                 Datum           values[INDEX_MAX_KEYS];
3565                 bool            isnull[INDEX_MAX_KEYS];
3566                 char       *key_desc;
3567
3568                 /*
3569                  * Some rather brain-dead implementations of qsort (such as the one in
3570                  * QNX 4) will sometimes call the comparison routine to compare a
3571                  * value to itself, but we always use our own implementation, which
3572                  * does not.
3573                  */
3574                 Assert(tuple1 != tuple2);
3575
3576                 index_deform_tuple(tuple1, tupDes, values, isnull);
3577
3578                 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
3579
3580                 ereport(ERROR,
3581                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
3582                                  errmsg("could not create unique index \"%s\"",
3583                                                 RelationGetRelationName(state->indexRel)),
3584                                  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
3585                                                         errdetail("Duplicate keys exist."),
3586                                  errtableconstraint(state->heapRel,
3587                                                                  RelationGetRelationName(state->indexRel))));
3588         }
3589
3590         /*
3591          * If key values are equal, we sort on ItemPointer.  This does not affect
3592          * validity of the finished index, but it may be useful to have index
3593          * scans in physical order.
3594          */
3595         {
3596                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3597                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3598
3599                 if (blk1 != blk2)
3600                         return (blk1 < blk2) ? -1 : 1;
3601         }
3602         {
3603                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3604                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3605
3606                 if (pos1 != pos2)
3607                         return (pos1 < pos2) ? -1 : 1;
3608         }
3609
3610         return 0;
3611 }
3612
3613 static int
3614 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
3615                                           Tuplesortstate *state)
3616 {
3617         uint32          hash1;
3618         uint32          hash2;
3619         IndexTuple      tuple1;
3620         IndexTuple      tuple2;
3621
3622         /*
3623          * Fetch hash keys and mask off bits we don't want to sort by. We know
3624          * that the first column of the index tuple is the hash key.
3625          */
3626         Assert(!a->isnull1);
3627         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
3628         Assert(!b->isnull1);
3629         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
3630
3631         if (hash1 > hash2)
3632                 return 1;
3633         else if (hash1 < hash2)
3634                 return -1;
3635
3636         /*
3637          * If hash values are equal, we sort on ItemPointer.  This does not affect
3638          * validity of the finished index, but it may be useful to have index
3639          * scans in physical order.
3640          */
3641         tuple1 = (IndexTuple) a->tuple;
3642         tuple2 = (IndexTuple) b->tuple;
3643
3644         {
3645                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3646                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3647
3648                 if (blk1 != blk2)
3649                         return (blk1 < blk2) ? -1 : 1;
3650         }
3651         {
3652                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3653                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3654
3655                 if (pos1 != pos2)
3656                         return (pos1 < pos2) ? -1 : 1;
3657         }
3658
3659         return 0;
3660 }
3661
3662 static void
3663 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
3664 {
3665         IndexTuple      tuple = (IndexTuple) tup;
3666         unsigned int tuplen = IndexTupleSize(tuple);
3667         IndexTuple      newtuple;
3668         Datum           original;
3669
3670         /* copy the tuple into sort storage */
3671         newtuple = (IndexTuple) palloc(tuplen);
3672         memcpy(newtuple, tuple, tuplen);
3673         USEMEM(state, GetMemoryChunkSpace(newtuple));
3674         stup->tuple = (void *) newtuple;
3675         /* set up first-column key value */
3676         original = index_getattr(newtuple,
3677                                                          1,
3678                                                          RelationGetDescr(state->indexRel),
3679                                                          &stup->isnull1);
3680
3681         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3682         {
3683                 /*
3684                  * Store ordinary Datum representation, or NULL value.  If there is a
3685                  * converter it won't expect NULL values, and cost model is not
3686                  * required to account for NULL, so in that case we avoid calling
3687                  * converter and just set datum1 to "void" representation (to be
3688                  * consistent).
3689                  */
3690                 stup->datum1 = original;
3691         }
3692         else if (!consider_abort_common(state))
3693         {
3694                 /* Store abbreviated key representation */
3695                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3696                                                                                                                  state->sortKeys);
3697         }
3698         else
3699         {
3700                 /* Abort abbreviation */
3701                 int             i;
3702
3703                 stup->datum1 = original;
3704
3705                 /*
3706                  * Set state to be consistent with never trying abbreviation.
3707                  *
3708                  * Alter datum1 representation in already-copied tuples, so as to
3709                  * ensure a consistent representation (current tuple was just handled).
3710                  * Note that we rely on all tuples copied so far actually being
3711                  * contained within memtuples array.
3712                  */
3713                 for (i = 0; i < state->memtupcount; i++)
3714                 {
3715                         SortTuple *mtup = &state->memtuples[i];
3716
3717                         tuple = (IndexTuple) mtup->tuple;
3718                         mtup->datum1 = index_getattr(tuple,
3719                                                                                  1,
3720                                                                                  RelationGetDescr(state->indexRel),
3721                                                                                  &stup->isnull1);
3722                 }
3723         }
3724 }
3725
3726 static void
3727 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
3728 {
3729         IndexTuple      tuple = (IndexTuple) stup->tuple;
3730         unsigned int tuplen;
3731
3732         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
3733         LogicalTapeWrite(state->tapeset, tapenum,
3734                                          (void *) &tuplen, sizeof(tuplen));
3735         LogicalTapeWrite(state->tapeset, tapenum,
3736                                          (void *) tuple, IndexTupleSize(tuple));
3737         if (state->randomAccess)        /* need trailing length word? */
3738                 LogicalTapeWrite(state->tapeset, tapenum,
3739                                                  (void *) &tuplen, sizeof(tuplen));
3740
3741         FREEMEM(state, GetMemoryChunkSpace(tuple));
3742         pfree(tuple);
3743 }
3744
3745 static void
3746 readtup_index(Tuplesortstate *state, SortTuple *stup,
3747                           int tapenum, unsigned int len)
3748 {
3749         unsigned int tuplen = len - sizeof(unsigned int);
3750         IndexTuple      tuple = (IndexTuple) palloc(tuplen);
3751
3752         USEMEM(state, GetMemoryChunkSpace(tuple));
3753         LogicalTapeReadExact(state->tapeset, tapenum,
3754                                                  tuple, tuplen);
3755         if (state->randomAccess)        /* need trailing length word? */
3756                 LogicalTapeReadExact(state->tapeset, tapenum,
3757                                                          &tuplen, sizeof(tuplen));
3758         stup->tuple = (void *) tuple;
3759         /* set up first-column key value */
3760         stup->datum1 = index_getattr(tuple,
3761                                                                  1,
3762                                                                  RelationGetDescr(state->indexRel),
3763                                                                  &stup->isnull1);
3764 }
3765
3766 /*
3767  * Routines specialized for DatumTuple case
3768  */
3769
3770 static int
3771 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3772 {
3773         int             compare;
3774
3775         compare = ApplySortComparator(a->datum1, a->isnull1,
3776                                                                   b->datum1, b->isnull1,
3777                                                                   state->sortKeys);
3778         if (compare != 0)
3779                 return compare;
3780
3781         /* if we have abbreviations, then "tuple" has the original value */
3782
3783         if (state->sortKeys->abbrev_converter)
3784                 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
3785                                                                                                 PointerGetDatum(b->tuple), b->isnull1,
3786                                                                                                 state->sortKeys);
3787
3788         return compare;
3789 }
3790
3791 static void
3792 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
3793 {
3794         /* Not currently needed */
3795         elog(ERROR, "copytup_datum() should not be called");
3796 }
3797
3798 static void
3799 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
3800 {
3801         void       *waddr;
3802         unsigned int tuplen;
3803         unsigned int writtenlen;
3804
3805         if (stup->isnull1)
3806         {
3807                 waddr = NULL;
3808                 tuplen = 0;
3809         }
3810         else if (state->datumTypeByVal)
3811         {
3812                 waddr = &stup->datum1;
3813                 tuplen = sizeof(Datum);
3814         }
3815         else
3816         {
3817                 waddr = stup->tuple;
3818                 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
3819                 Assert(tuplen != 0);
3820         }
3821
3822         writtenlen = tuplen + sizeof(unsigned int);
3823
3824         LogicalTapeWrite(state->tapeset, tapenum,
3825                                          (void *) &writtenlen, sizeof(writtenlen));
3826         LogicalTapeWrite(state->tapeset, tapenum,
3827                                          waddr, tuplen);
3828         if (state->randomAccess)        /* need trailing length word? */
3829                 LogicalTapeWrite(state->tapeset, tapenum,
3830                                                  (void *) &writtenlen, sizeof(writtenlen));
3831
3832         if (stup->tuple)
3833         {
3834                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3835                 pfree(stup->tuple);
3836         }
3837 }
3838
3839 static void
3840 readtup_datum(Tuplesortstate *state, SortTuple *stup,
3841                           int tapenum, unsigned int len)
3842 {
3843         unsigned int tuplen = len - sizeof(unsigned int);
3844
3845         if (tuplen == 0)
3846         {
3847                 /* it's NULL */
3848                 stup->datum1 = (Datum) 0;
3849                 stup->isnull1 = true;
3850                 stup->tuple = NULL;
3851         }
3852         else if (state->datumTypeByVal)
3853         {
3854                 Assert(tuplen == sizeof(Datum));
3855                 LogicalTapeReadExact(state->tapeset, tapenum,
3856                                                          &stup->datum1, tuplen);
3857                 stup->isnull1 = false;
3858                 stup->tuple = NULL;
3859         }
3860         else
3861         {
3862                 void       *raddr = palloc(tuplen);
3863
3864                 LogicalTapeReadExact(state->tapeset, tapenum,
3865                                                          raddr, tuplen);
3866                 stup->datum1 = PointerGetDatum(raddr);
3867                 stup->isnull1 = false;
3868                 stup->tuple = raddr;
3869                 USEMEM(state, GetMemoryChunkSpace(raddr));
3870         }
3871
3872         if (state->randomAccess)        /* need trailing length word? */
3873                 LogicalTapeReadExact(state->tapeset, tapenum,
3874                                                          &tuplen, sizeof(tuplen));
3875 }
3876
3877 /*
3878  * Convenience routine to free a tuple previously loaded into sort memory
3879  */
3880 static void
3881 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
3882 {
3883         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3884         pfree(stup->tuple);
3885 }