]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Don't use abbreviated keys for the final merge pass.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  We divide the input into sorted runs using replacement
15  * selection, in the form of a priority tree implemented as a heap
16  * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17  * merge, Knuth's Algorithm 5.4.2D.  The logical "tapes" used by Algorithm D
18  * are implemented by logtape.c, which avoids space wastage by recycling
19  * disk space as soon as each block is read from its "tape".
20  *
21  * We do not form the initial runs using Knuth's recommended replacement
22  * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23  * number of records in memory at all times.  Since we are dealing with
24  * tuples that may vary considerably in size, we want to be able to vary
25  * the number of records kept in memory to ensure full utilization of the
26  * allowed sort memory space.  So, we keep the tuples in a variable-size
27  * heap, with the next record to go out at the top of the heap.  Like
28  * Algorithm 5.4.1R, each record is stored with the run number that it
29  * must go into, and we use (run number, key) as the ordering key for the
30  * heap.  When the run number at the top of the heap changes, we know that
31  * no more records of the prior run are left in the heap.
32  *
33  * The approximate amount of memory allowed for any one sort operation
34  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
35  * we absorb tuples and simply store them in an unsorted array as long as
36  * we haven't exceeded workMem.  If we reach the end of the input without
37  * exceeding workMem, we sort the array using qsort() and subsequently return
38  * tuples just by scanning the tuple array sequentially.  If we do exceed
39  * workMem, we construct a heap using Algorithm H and begin to emit tuples
40  * into sorted runs in temporary tapes, emitting just enough tuples at each
41  * step to get back within the workMem limit.  Whenever the run number at
42  * the top of the heap changes, we begin a new run with a new output tape
43  * (selected per Algorithm D).  After the end of the input is reached,
44  * we dump out remaining tuples in memory into a final run (or two),
45  * then merge the runs using Algorithm D.
46  *
47  * When merging runs, we use a heap containing just the frontmost tuple from
48  * each source run; we repeatedly output the smallest tuple and insert the
49  * next tuple from its source tape (if any).  When the heap empties, the merge
50  * is complete.  The basic merge algorithm thus needs very little memory ---
51  * only M tuples for an M-way merge, and M is constrained to a small number.
52  * However, we can still make good use of our full workMem allocation by
53  * pre-reading additional tuples from each source tape.  Without prereading,
54  * our access pattern to the temporary file would be very erratic; on average
55  * we'd read one block from each of M source tapes during the same time that
56  * we're writing M blocks to the output tape, so there is no sequentiality of
57  * access at all, defeating the read-ahead methods used by most Unix kernels.
58  * Worse, the output tape gets written into a very random sequence of blocks
59  * of the temp file, ensuring that things will be even worse when it comes
60  * time to read that tape.  A straightforward merge pass thus ends up doing a
61  * lot of waiting for disk seeks.  We can improve matters by prereading from
62  * each source tape sequentially, loading about workMem/M bytes from each tape
63  * in turn.  Then we run the merge algorithm, writing but not reading until
64  * one of the preloaded tuple series runs out.  Then we switch back to preread
65  * mode, fill memory again, and repeat.  This approach helps to localize both
66  * read and write accesses.
67  *
68  * When the caller requests random access to the sort result, we form
69  * the final sorted run on a logical tape which is then "frozen", so
70  * that we can access it randomly.  When the caller does not need random
71  * access, we return from tuplesort_performsort() as soon as we are down
72  * to one run per logical tape.  The final merge is then performed
73  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74  * saves one cycle of writing all the data out to disk and reading it in.
75  *
76  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
79  * tape drives are expensive beasts, and in particular that there will always
80  * be many more runs than tape drives.  In our implementation a "tape drive"
81  * doesn't cost much more than a few Kb of memory buffers, so we can afford
82  * to have lots of them.  In particular, if we can have as many tape drives
83  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
84  * code we determine the number of tapes M on the basis of workMem: we want
85  * workMem/M to be large enough that we read a fair amount of data each time
86  * we preread from a tape, so as to maintain the locality of access described
87  * above.  Nonetheless, with large workMem we can have many tapes.
88  *
89  *
90  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
91  * Portions Copyright (c) 1994, Regents of the University of California
92  *
93  * IDENTIFICATION
94  *        src/backend/utils/sort/tuplesort.c
95  *
96  *-------------------------------------------------------------------------
97  */
98
99 #include "postgres.h"
100
101 #include <limits.h>
102
103 #include "access/htup_details.h"
104 #include "access/nbtree.h"
105 #include "catalog/index.h"
106 #include "commands/tablespace.h"
107 #include "executor/executor.h"
108 #include "miscadmin.h"
109 #include "pg_trace.h"
110 #include "utils/datum.h"
111 #include "utils/logtape.h"
112 #include "utils/lsyscache.h"
113 #include "utils/memutils.h"
114 #include "utils/pg_rusage.h"
115 #include "utils/rel.h"
116 #include "utils/sortsupport.h"
117 #include "utils/tuplesort.h"
118
119
120 /* sort-type codes for sort__start probes */
121 #define HEAP_SORT               0
122 #define INDEX_SORT              1
123 #define DATUM_SORT              2
124 #define CLUSTER_SORT    3
125
126 /* GUC variables */
127 #ifdef TRACE_SORT
128 bool            trace_sort = false;
129 #endif
130
131 #ifdef DEBUG_BOUNDED_SORT
132 bool            optimize_bounded_sort = true;
133 #endif
134
135
136 /*
137  * The objects we actually sort are SortTuple structs.  These contain
138  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
139  * which is a separate palloc chunk --- we assume it is just one chunk and
140  * can be freed by a simple pfree().  SortTuples also contain the tuple's
141  * first key column in Datum/nullflag format, and an index integer.
142  *
143  * Storing the first key column lets us save heap_getattr or index_getattr
144  * calls during tuple comparisons.  We could extract and save all the key
145  * columns not just the first, but this would increase code complexity and
146  * overhead, and wouldn't actually save any comparison cycles in the common
147  * case where the first key determines the comparison result.  Note that
148  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
149  *
150  * When sorting single Datums, the data value is represented directly by
151  * datum1/isnull1.  If the datatype is pass-by-reference and isnull1 is false,
152  * then datum1 points to a separately palloc'd data value that is also pointed
153  * to by the "tuple" pointer; otherwise "tuple" is NULL.  There is one special
154  * case:  when the sort support infrastructure provides an "abbreviated key"
155  * representation, where the key is (typically) a pass by value proxy for a
156  * pass by reference type.
157  *
158  * While building initial runs, tupindex holds the tuple's run number.  During
159  * merge passes, we re-use it to hold the input tape number that each tuple in
160  * the heap was read from, or to hold the index of the next tuple pre-read
161  * from the same tape in the case of pre-read entries.  tupindex goes unused
162  * if the sort occurs entirely in memory.
163  */
164 typedef struct
165 {
166         void       *tuple;                      /* the tuple proper */
167         Datum           datum1;                 /* value of first key column */
168         bool            isnull1;                /* is first key column NULL? */
169         int                     tupindex;               /* see notes above */
170 } SortTuple;
171
172
173 /*
174  * Possible states of a Tuplesort object.  These denote the states that
175  * persist between calls of Tuplesort routines.
176  */
177 typedef enum
178 {
179         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
180         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
181         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
182         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
183         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
184         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
185 } TupSortStatus;
186
187 /*
188  * Parameters for calculation of number of tapes to use --- see inittapes()
189  * and tuplesort_merge_order().
190  *
191  * In this calculation we assume that each tape will cost us about 3 blocks
192  * worth of buffer space (which is an underestimate for very large data
193  * volumes, but it's probably close enough --- see logtape.c).
194  *
195  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
196  * tape during a preread cycle (see discussion at top of file).
197  */
198 #define MINORDER                6               /* minimum merge order */
199 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
200 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
201
202 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
203                                                                                                 Tuplesortstate *state);
204
205 /*
206  * Private state of a Tuplesort operation.
207  */
208 struct Tuplesortstate
209 {
210         TupSortStatus status;           /* enumerated value as shown above */
211         int                     nKeys;                  /* number of columns in sort key */
212         bool            randomAccess;   /* did caller request random access? */
213         bool            bounded;                /* did caller specify a maximum number of
214                                                                  * tuples to return? */
215         bool            boundUsed;              /* true if we made use of a bounded heap */
216         int                     bound;                  /* if bounded, the maximum number of tuples */
217         int64           availMem;               /* remaining memory available, in bytes */
218         int64           allowedMem;             /* total memory allowed, in bytes */
219         int                     maxTapes;               /* number of tapes (Knuth's T) */
220         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
221         MemoryContext sortcontext;      /* memory context holding all sort data */
222         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
223
224         /*
225          * These function pointers decouple the routines that must know what kind
226          * of tuple we are sorting from the routines that don't need to know it.
227          * They are set up by the tuplesort_begin_xxx routines.
228          *
229          * Function to compare two tuples; result is per qsort() convention, ie:
230          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
231          * qsort_arg_comparator.
232          */
233         SortTupleComparator comparetup;
234
235         /*
236          * Function to copy a supplied input tuple into palloc'd space and set up
237          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
238          * state->availMem must be decreased by the amount of space used for the
239          * tuple copy (note the SortTuple struct itself is not counted).
240          */
241         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
242
243         /*
244          * Function to write a stored tuple onto tape.  The representation of the
245          * tuple on tape need not be the same as it is in memory; requirements on
246          * the tape representation are given below.  After writing the tuple,
247          * pfree() the out-of-line data (not the SortTuple struct!), and increase
248          * state->availMem by the amount of memory space thereby released.
249          */
250         void            (*writetup) (Tuplesortstate *state, int tapenum,
251                                                                                  SortTuple *stup);
252
253         /*
254          * Function to read a stored tuple from tape back into memory. 'len' is
255          * the already-read length of the stored tuple.  Create a palloc'd copy,
256          * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
257          * decrease state->availMem by the amount of memory space consumed.
258          */
259         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
260                                                                                 int tapenum, unsigned int len);
261
262         /*
263          * This array holds the tuples now in sort memory.  If we are in state
264          * INITIAL, the tuples are in no particular order; if we are in state
265          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
266          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
267          * H.  (Note that memtupcount only counts the tuples that are part of the
268          * heap --- during merge passes, memtuples[] entries beyond tapeRange are
269          * never in the heap and are used to hold pre-read tuples.)  In state
270          * SORTEDONTAPE, the array is not used.
271          */
272         SortTuple  *memtuples;          /* array of SortTuple structs */
273         int                     memtupcount;    /* number of tuples currently present */
274         int                     memtupsize;             /* allocated length of memtuples array */
275         bool            growmemtuples;  /* memtuples' growth still underway? */
276
277         /*
278          * While building initial runs, this is the current output run number
279          * (starting at 0).  Afterwards, it is the number of initial runs we made.
280          */
281         int                     currentRun;
282
283         /*
284          * Unless otherwise noted, all pointer variables below are pointers to
285          * arrays of length maxTapes, holding per-tape data.
286          */
287
288         /*
289          * These variables are only used during merge passes.  mergeactive[i] is
290          * true if we are reading an input run from (actual) tape number i and
291          * have not yet exhausted that run.  mergenext[i] is the memtuples index
292          * of the next pre-read tuple (next to be loaded into the heap) for tape
293          * i, or 0 if we are out of pre-read tuples.  mergelast[i] similarly
294          * points to the last pre-read tuple from each tape.  mergeavailslots[i]
295          * is the number of unused memtuples[] slots reserved for tape i, and
296          * mergeavailmem[i] is the amount of unused space allocated for tape i.
297          * mergefreelist and mergefirstfree keep track of unused locations in the
298          * memtuples[] array.  The memtuples[].tupindex fields link together
299          * pre-read tuples for each tape as well as recycled locations in
300          * mergefreelist. It is OK to use 0 as a null link in these lists, because
301          * memtuples[0] is part of the merge heap and is never a pre-read tuple.
302          */
303         bool       *mergeactive;        /* active input run source? */
304         int                *mergenext;          /* first preread tuple for each source */
305         int                *mergelast;          /* last preread tuple for each source */
306         int                *mergeavailslots;    /* slots left for prereading each tape */
307         int64      *mergeavailmem;      /* availMem for prereading each tape */
308         int                     mergefreelist;  /* head of freelist of recycled slots */
309         int                     mergefirstfree; /* first slot never used in this merge */
310
311         /*
312          * Variables for Algorithm D.  Note that destTape is a "logical" tape
313          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
314          * "logical" and "actual" tape numbers straight!
315          */
316         int                     Level;                  /* Knuth's l */
317         int                     destTape;               /* current output tape (Knuth's j, less 1) */
318         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
319         int                *tp_runs;            /* # of real runs on each tape */
320         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
321         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
322         int                     activeTapes;    /* # of active input tapes in merge pass */
323
324         /*
325          * These variables are used after completion of sorting to keep track of
326          * the next tuple to return.  (In the tape case, the tape's current read
327          * position is also critical state.)
328          */
329         int                     result_tape;    /* actual tape number of finished output */
330         int                     current;                /* array index (only used if SORTEDINMEM) */
331         bool            eof_reached;    /* reached EOF (needed for cursors) */
332
333         /* markpos_xxx holds marked position for mark and restore */
334         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
335         int                     markpos_offset; /* saved "current", or offset in tape block */
336         bool            markpos_eof;    /* saved "eof_reached" */
337
338         /*
339          * The sortKeys variable is used by every case other than the hash index
340          * case; it is set by tuplesort_begin_xxx.  tupDesc is only used by the
341          * MinimalTuple and CLUSTER routines, though.
342          */
343         TupleDesc       tupDesc;
344         SortSupport sortKeys;           /* array of length nKeys */
345
346         /*
347          * This variable is shared by the single-key MinimalTuple case and the
348          * Datum case (which both use qsort_ssup()).  Otherwise it's NULL.
349          */
350         SortSupport onlyKey;
351
352         /*
353          * Additional state for managing "abbreviated key" sortsupport routines
354          * (which currently may be used by all cases except the Datum sort case and
355          * hash index case).  Tracks the intervals at which the optimization's
356          * effectiveness is tested.
357          */
358         int64           abbrevNext;             /* Tuple # at which to next check applicability */
359
360         /*
361          * These variables are specific to the CLUSTER case; they are set by
362          * tuplesort_begin_cluster.
363          */
364         IndexInfo  *indexInfo;          /* info about index being used for reference */
365         EState     *estate;                     /* for evaluating index expressions */
366
367         /*
368          * These variables are specific to the IndexTuple case; they are set by
369          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
370          */
371         Relation        heapRel;                /* table the index is being built on */
372         Relation        indexRel;               /* index being built */
373
374         /* These are specific to the index_btree subcase: */
375         bool            enforceUnique;  /* complain if we find duplicate tuples */
376
377         /* These are specific to the index_hash subcase: */
378         uint32          hash_mask;              /* mask for sortable part of hash code */
379
380         /*
381          * These variables are specific to the Datum case; they are set by
382          * tuplesort_begin_datum and used only by the DatumTuple routines.
383          */
384         Oid                     datumType;
385         /* we need typelen and byval in order to know how to copy the Datums. */
386         int                     datumTypeLen;
387         bool            datumTypeByVal;
388
389         /*
390          * Resource snapshot for time of sort start.
391          */
392 #ifdef TRACE_SORT
393         PGRUsage        ru_start;
394 #endif
395 };
396
397 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
398 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
399 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
400 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
401 #define LACKMEM(state)          ((state)->availMem < 0)
402 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
403 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
404
405 /*
406  * NOTES about on-tape representation of tuples:
407  *
408  * We require the first "unsigned int" of a stored tuple to be the total size
409  * on-tape of the tuple, including itself (so it is never zero; an all-zero
410  * unsigned int is used to delimit runs).  The remainder of the stored tuple
411  * may or may not match the in-memory representation of the tuple ---
412  * any conversion needed is the job of the writetup and readtup routines.
413  *
414  * If state->randomAccess is true, then the stored representation of the
415  * tuple must be followed by another "unsigned int" that is a copy of the
416  * length --- so the total tape space used is actually sizeof(unsigned int)
417  * more than the stored length value.  This allows read-backwards.  When
418  * randomAccess is not true, the write/read routines may omit the extra
419  * length word.
420  *
421  * writetup is expected to write both length words as well as the tuple
422  * data.  When readtup is called, the tape is positioned just after the
423  * front length word; readtup must read the tuple data and advance past
424  * the back length word (if present).
425  *
426  * The write/read routines can make use of the tuple description data
427  * stored in the Tuplesortstate record, if needed.  They are also expected
428  * to adjust state->availMem by the amount of memory space (not tape space!)
429  * released or consumed.  There is no error return from either writetup
430  * or readtup; they should ereport() on failure.
431  *
432  *
433  * NOTES about memory consumption calculations:
434  *
435  * We count space allocated for tuples against the workMem limit, plus
436  * the space used by the variable-size memtuples array.  Fixed-size space
437  * is not counted; it's small enough to not be interesting.
438  *
439  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
440  * rather than the originally-requested size.  This is important since
441  * palloc can add substantial overhead.  It's not a complete answer since
442  * we won't count any wasted space in palloc allocation blocks, but it's
443  * a lot better than what we were doing before 7.3.
444  */
445
446 /* When using this macro, beware of double evaluation of len */
447 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
448         do { \
449                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
450                         elog(ERROR, "unexpected end of data"); \
451         } while(0)
452
453
454 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
455 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
456 static bool consider_abort_common(Tuplesortstate *state);
457 static void inittapes(Tuplesortstate *state);
458 static void selectnewtape(Tuplesortstate *state);
459 static void mergeruns(Tuplesortstate *state);
460 static void mergeonerun(Tuplesortstate *state);
461 static void beginmerge(Tuplesortstate *state);
462 static void mergepreread(Tuplesortstate *state);
463 static void mergeprereadone(Tuplesortstate *state, int srcTape);
464 static void dumptuples(Tuplesortstate *state, bool alltuples);
465 static void make_bounded_heap(Tuplesortstate *state);
466 static void sort_bounded_heap(Tuplesortstate *state);
467 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
468                                           int tupleindex, bool checkIndex);
469 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
470 static void reversedirection(Tuplesortstate *state);
471 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
472 static void markrunend(Tuplesortstate *state, int tapenum);
473 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
474                                 Tuplesortstate *state);
475 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
476 static void writetup_heap(Tuplesortstate *state, int tapenum,
477                           SortTuple *stup);
478 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
479                          int tapenum, unsigned int len);
480 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
481                                    Tuplesortstate *state);
482 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
483 static void writetup_cluster(Tuplesortstate *state, int tapenum,
484                                  SortTuple *stup);
485 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
486                                 int tapenum, unsigned int len);
487 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
488                                            Tuplesortstate *state);
489 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
490                                           Tuplesortstate *state);
491 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
492 static void writetup_index(Tuplesortstate *state, int tapenum,
493                            SortTuple *stup);
494 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
495                           int tapenum, unsigned int len);
496 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
497                                  Tuplesortstate *state);
498 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
499 static void writetup_datum(Tuplesortstate *state, int tapenum,
500                            SortTuple *stup);
501 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
502                           int tapenum, unsigned int len);
503 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
504
505 /*
506  * Special versions of qsort just for SortTuple objects.  qsort_tuple() sorts
507  * any variant of SortTuples, using the appropriate comparetup function.
508  * qsort_ssup() is specialized for the case where the comparetup function
509  * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
510  * and Datum sorts.
511  */
512 #include "qsort_tuple.c"
513
514
515 /*
516  *              tuplesort_begin_xxx
517  *
518  * Initialize for a tuple sort operation.
519  *
520  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
521  * zero or more times, then call tuplesort_performsort when all the tuples
522  * have been supplied.  After performsort, retrieve the tuples in sorted
523  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
524  * access was requested, rescan, markpos, and restorepos can also be called.)
525  * Call tuplesort_end to terminate the operation and release memory/disk space.
526  *
527  * Each variant of tuplesort_begin has a workMem parameter specifying the
528  * maximum number of kilobytes of RAM to use before spilling data to disk.
529  * (The normal value of this parameter is work_mem, but some callers use
530  * other values.)  Each variant also has a randomAccess parameter specifying
531  * whether the caller needs non-sequential access to the sort result.
532  */
533
534 static Tuplesortstate *
535 tuplesort_begin_common(int workMem, bool randomAccess)
536 {
537         Tuplesortstate *state;
538         MemoryContext sortcontext;
539         MemoryContext oldcontext;
540
541         /*
542          * Create a working memory context for this sort operation. All data
543          * needed by the sort will live inside this context.
544          */
545         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
546                                                                                 "TupleSort",
547                                                                                 ALLOCSET_DEFAULT_MINSIZE,
548                                                                                 ALLOCSET_DEFAULT_INITSIZE,
549                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
550
551         /*
552          * Make the Tuplesortstate within the per-sort context.  This way, we
553          * don't need a separate pfree() operation for it at shutdown.
554          */
555         oldcontext = MemoryContextSwitchTo(sortcontext);
556
557         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
558
559 #ifdef TRACE_SORT
560         if (trace_sort)
561                 pg_rusage_init(&state->ru_start);
562 #endif
563
564         state->status = TSS_INITIAL;
565         state->randomAccess = randomAccess;
566         state->bounded = false;
567         state->boundUsed = false;
568         state->allowedMem = workMem * (int64) 1024;
569         state->availMem = state->allowedMem;
570         state->sortcontext = sortcontext;
571         state->tapeset = NULL;
572
573         state->memtupcount = 0;
574         state->memtupsize = 1024;       /* initial guess */
575         state->growmemtuples = true;
576         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
577
578         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
579
580         /* workMem must be large enough for the minimal memtuples array */
581         if (LACKMEM(state))
582                 elog(ERROR, "insufficient memory allowed for sort");
583
584         state->currentRun = 0;
585
586         /*
587          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
588          * inittapes(), if needed
589          */
590
591         state->result_tape = -1;        /* flag that result tape has not been formed */
592
593         MemoryContextSwitchTo(oldcontext);
594
595         return state;
596 }
597
598 Tuplesortstate *
599 tuplesort_begin_heap(TupleDesc tupDesc,
600                                          int nkeys, AttrNumber *attNums,
601                                          Oid *sortOperators, Oid *sortCollations,
602                                          bool *nullsFirstFlags,
603                                          int workMem, bool randomAccess)
604 {
605         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
606         MemoryContext oldcontext;
607         int                     i;
608
609         oldcontext = MemoryContextSwitchTo(state->sortcontext);
610
611         AssertArg(nkeys > 0);
612
613 #ifdef TRACE_SORT
614         if (trace_sort)
615                 elog(LOG,
616                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
617                          nkeys, workMem, randomAccess ? 't' : 'f');
618 #endif
619
620         state->nKeys = nkeys;
621
622         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
623                                                                 false,  /* no unique check */
624                                                                 nkeys,
625                                                                 workMem,
626                                                                 randomAccess);
627
628         state->comparetup = comparetup_heap;
629         state->copytup = copytup_heap;
630         state->writetup = writetup_heap;
631         state->readtup = readtup_heap;
632
633         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
634         state->abbrevNext = 10;
635
636         /* Prepare SortSupport data for each column */
637         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
638
639         for (i = 0; i < nkeys; i++)
640         {
641                 SortSupport sortKey = state->sortKeys + i;
642
643                 AssertArg(attNums[i] != 0);
644                 AssertArg(sortOperators[i] != 0);
645
646                 sortKey->ssup_cxt = CurrentMemoryContext;
647                 sortKey->ssup_collation = sortCollations[i];
648                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
649                 sortKey->ssup_attno = attNums[i];
650                 /* Convey if abbreviation optimization is applicable in principle */
651                 sortKey->abbreviate = (i == 0);
652
653                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
654         }
655
656         /*
657          * The "onlyKey" optimization cannot be used with abbreviated keys, since
658          * tie-breaker comparisons may be required.  Typically, the optimization is
659          * only of value to pass-by-value types anyway, whereas abbreviated keys
660          * are typically only of value to pass-by-reference types.
661          */
662         if (nkeys == 1 && !state->sortKeys->abbrev_converter)
663                 state->onlyKey = state->sortKeys;
664
665         MemoryContextSwitchTo(oldcontext);
666
667         return state;
668 }
669
670 Tuplesortstate *
671 tuplesort_begin_cluster(TupleDesc tupDesc,
672                                                 Relation indexRel,
673                                                 int workMem, bool randomAccess)
674 {
675         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
676         ScanKey                 indexScanKey;
677         MemoryContext oldcontext;
678         int                             i;
679
680         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
681
682         oldcontext = MemoryContextSwitchTo(state->sortcontext);
683
684 #ifdef TRACE_SORT
685         if (trace_sort)
686                 elog(LOG,
687                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
688                          RelationGetNumberOfAttributes(indexRel),
689                          workMem, randomAccess ? 't' : 'f');
690 #endif
691
692         state->nKeys = RelationGetNumberOfAttributes(indexRel);
693
694         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
695                                                                 false,  /* no unique check */
696                                                                 state->nKeys,
697                                                                 workMem,
698                                                                 randomAccess);
699
700         state->comparetup = comparetup_cluster;
701         state->copytup = copytup_cluster;
702         state->writetup = writetup_cluster;
703         state->readtup = readtup_cluster;
704         state->abbrevNext = 10;
705
706         state->indexInfo = BuildIndexInfo(indexRel);
707
708         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
709
710         indexScanKey = _bt_mkscankey_nodata(indexRel);
711
712         if (state->indexInfo->ii_Expressions != NULL)
713         {
714                 TupleTableSlot *slot;
715                 ExprContext *econtext;
716
717                 /*
718                  * We will need to use FormIndexDatum to evaluate the index
719                  * expressions.  To do that, we need an EState, as well as a
720                  * TupleTableSlot to put the table tuples into.  The econtext's
721                  * scantuple has to point to that slot, too.
722                  */
723                 state->estate = CreateExecutorState();
724                 slot = MakeSingleTupleTableSlot(tupDesc);
725                 econtext = GetPerTupleExprContext(state->estate);
726                 econtext->ecxt_scantuple = slot;
727         }
728
729         /* Prepare SortSupport data for each column */
730         state->sortKeys = (SortSupport) palloc0(state->nKeys *
731                                                                                         sizeof(SortSupportData));
732
733         for (i = 0; i < state->nKeys; i++)
734         {
735                 SortSupport sortKey = state->sortKeys + i;
736                 ScanKey         scanKey = indexScanKey + i;
737                 int16           strategy;
738
739                 sortKey->ssup_cxt = CurrentMemoryContext;
740                 sortKey->ssup_collation = scanKey->sk_collation;
741                 sortKey->ssup_nulls_first =
742                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
743                 sortKey->ssup_attno = scanKey->sk_attno;
744                 /* Convey if abbreviation optimization is applicable in principle */
745                 sortKey->abbreviate = (i == 0);
746
747                 AssertState(sortKey->ssup_attno != 0);
748
749                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
750                         BTGreaterStrategyNumber : BTLessStrategyNumber;
751
752                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
753         }
754
755         _bt_freeskey(indexScanKey);
756
757         MemoryContextSwitchTo(oldcontext);
758
759         return state;
760 }
761
762 Tuplesortstate *
763 tuplesort_begin_index_btree(Relation heapRel,
764                                                         Relation indexRel,
765                                                         bool enforceUnique,
766                                                         int workMem, bool randomAccess)
767 {
768         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
769         ScanKey                 indexScanKey;
770         MemoryContext oldcontext;
771         int                             i;
772
773         oldcontext = MemoryContextSwitchTo(state->sortcontext);
774
775 #ifdef TRACE_SORT
776         if (trace_sort)
777                 elog(LOG,
778                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
779                          enforceUnique ? 't' : 'f',
780                          workMem, randomAccess ? 't' : 'f');
781 #endif
782
783         state->nKeys = RelationGetNumberOfAttributes(indexRel);
784
785         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
786                                                                 enforceUnique,
787                                                                 state->nKeys,
788                                                                 workMem,
789                                                                 randomAccess);
790
791         state->comparetup = comparetup_index_btree;
792         state->copytup = copytup_index;
793         state->writetup = writetup_index;
794         state->readtup = readtup_index;
795         state->abbrevNext = 10;
796
797         state->heapRel = heapRel;
798         state->indexRel = indexRel;
799         state->enforceUnique = enforceUnique;
800
801         indexScanKey = _bt_mkscankey_nodata(indexRel);
802         state->nKeys = RelationGetNumberOfAttributes(indexRel);
803
804         /* Prepare SortSupport data for each column */
805         state->sortKeys = (SortSupport) palloc0(state->nKeys *
806                                                                                         sizeof(SortSupportData));
807
808         for (i = 0; i < state->nKeys; i++)
809         {
810                 SortSupport sortKey = state->sortKeys + i;
811                 ScanKey         scanKey = indexScanKey + i;
812                 int16           strategy;
813
814                 sortKey->ssup_cxt = CurrentMemoryContext;
815                 sortKey->ssup_collation = scanKey->sk_collation;
816                 sortKey->ssup_nulls_first =
817                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
818                 sortKey->ssup_attno = scanKey->sk_attno;
819                 /* Convey if abbreviation optimization is applicable in principle */
820                 sortKey->abbreviate = (i == 0);
821
822                 AssertState(sortKey->ssup_attno != 0);
823
824                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
825                         BTGreaterStrategyNumber : BTLessStrategyNumber;
826
827                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
828         }
829
830         _bt_freeskey(indexScanKey);
831
832         MemoryContextSwitchTo(oldcontext);
833
834         return state;
835 }
836
837 Tuplesortstate *
838 tuplesort_begin_index_hash(Relation heapRel,
839                                                    Relation indexRel,
840                                                    uint32 hash_mask,
841                                                    int workMem, bool randomAccess)
842 {
843         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
844         MemoryContext oldcontext;
845
846         oldcontext = MemoryContextSwitchTo(state->sortcontext);
847
848 #ifdef TRACE_SORT
849         if (trace_sort)
850                 elog(LOG,
851                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
852                          hash_mask,
853                          workMem, randomAccess ? 't' : 'f');
854 #endif
855
856         state->nKeys = 1;                       /* Only one sort column, the hash code */
857
858         state->comparetup = comparetup_index_hash;
859         state->copytup = copytup_index;
860         state->writetup = writetup_index;
861         state->readtup = readtup_index;
862
863         state->heapRel = heapRel;
864         state->indexRel = indexRel;
865
866         state->hash_mask = hash_mask;
867
868         MemoryContextSwitchTo(oldcontext);
869
870         return state;
871 }
872
873 Tuplesortstate *
874 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
875                                           bool nullsFirstFlag,
876                                           int workMem, bool randomAccess)
877 {
878         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
879         MemoryContext oldcontext;
880         int16           typlen;
881         bool            typbyval;
882
883         oldcontext = MemoryContextSwitchTo(state->sortcontext);
884
885 #ifdef TRACE_SORT
886         if (trace_sort)
887                 elog(LOG,
888                          "begin datum sort: workMem = %d, randomAccess = %c",
889                          workMem, randomAccess ? 't' : 'f');
890 #endif
891
892         state->nKeys = 1;                       /* always a one-column sort */
893
894         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
895                                                                 false,  /* no unique check */
896                                                                 1,
897                                                                 workMem,
898                                                                 randomAccess);
899
900         state->comparetup = comparetup_datum;
901         state->copytup = copytup_datum;
902         state->writetup = writetup_datum;
903         state->readtup = readtup_datum;
904
905         state->datumType = datumType;
906
907         /* Prepare SortSupport data */
908         state->onlyKey = (SortSupport) palloc0(sizeof(SortSupportData));
909
910         state->onlyKey->ssup_cxt = CurrentMemoryContext;
911         state->onlyKey->ssup_collation = sortCollation;
912         state->onlyKey->ssup_nulls_first = nullsFirstFlag;
913         /*
914          * Conversion to abbreviated representation infeasible in the Datum case.
915          * It must be possible to subsequently fetch original datum values within
916          * tuplesort_getdatum(), which would require special-case preservation of
917          * original values.
918          */
919         state->onlyKey->abbreviate = false;
920
921         PrepareSortSupportFromOrderingOp(sortOperator, state->onlyKey);
922
923         /* lookup necessary attributes of the datum type */
924         get_typlenbyval(datumType, &typlen, &typbyval);
925         state->datumTypeLen = typlen;
926         state->datumTypeByVal = typbyval;
927
928         MemoryContextSwitchTo(oldcontext);
929
930         return state;
931 }
932
933 /*
934  * tuplesort_set_bound
935  *
936  *      Advise tuplesort that at most the first N result tuples are required.
937  *
938  * Must be called before inserting any tuples.  (Actually, we could allow it
939  * as long as the sort hasn't spilled to disk, but there seems no need for
940  * delayed calls at the moment.)
941  *
942  * This is a hint only. The tuplesort may still return more tuples than
943  * requested.
944  */
945 void
946 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
947 {
948         /* Assert we're called before loading any tuples */
949         Assert(state->status == TSS_INITIAL);
950         Assert(state->memtupcount == 0);
951         Assert(!state->bounded);
952
953 #ifdef DEBUG_BOUNDED_SORT
954         /* Honor GUC setting that disables the feature (for easy testing) */
955         if (!optimize_bounded_sort)
956                 return;
957 #endif
958
959         /* We want to be able to compute bound * 2, so limit the setting */
960         if (bound > (int64) (INT_MAX / 2))
961                 return;
962
963         state->bounded = true;
964         state->bound = (int) bound;
965
966         /*
967          * Bounded sorts are not an effective target for abbreviated key
968          * optimization.  Disable by setting state to be consistent with no
969          * abbreviation support.
970          */
971         state->sortKeys->abbrev_converter = NULL;
972         if (state->sortKeys->abbrev_full_comparator)
973                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
974
975         /* Not strictly necessary, but be tidy */
976         state->sortKeys->abbrev_abort = NULL;
977         state->sortKeys->abbrev_full_comparator = NULL;
978 }
979
980 /*
981  * tuplesort_end
982  *
983  *      Release resources and clean up.
984  *
985  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
986  * pointing to garbage.  Be careful not to attempt to use or free such
987  * pointers afterwards!
988  */
989 void
990 tuplesort_end(Tuplesortstate *state)
991 {
992         /* context swap probably not needed, but let's be safe */
993         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
994
995 #ifdef TRACE_SORT
996         long            spaceUsed;
997
998         if (state->tapeset)
999                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1000         else
1001                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1002 #endif
1003
1004         /*
1005          * Delete temporary "tape" files, if any.
1006          *
1007          * Note: want to include this in reported total cost of sort, hence need
1008          * for two #ifdef TRACE_SORT sections.
1009          */
1010         if (state->tapeset)
1011                 LogicalTapeSetClose(state->tapeset);
1012
1013 #ifdef TRACE_SORT
1014         if (trace_sort)
1015         {
1016                 if (state->tapeset)
1017                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
1018                                  spaceUsed, pg_rusage_show(&state->ru_start));
1019                 else
1020                         elog(LOG, "internal sort ended, %ld KB used: %s",
1021                                  spaceUsed, pg_rusage_show(&state->ru_start));
1022         }
1023
1024         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1025 #else
1026
1027         /*
1028          * If you disabled TRACE_SORT, you can still probe sort__done, but you
1029          * ain't getting space-used stats.
1030          */
1031         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1032 #endif
1033
1034         /* Free any execution state created for CLUSTER case */
1035         if (state->estate != NULL)
1036         {
1037                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1038
1039                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1040                 FreeExecutorState(state->estate);
1041         }
1042
1043         MemoryContextSwitchTo(oldcontext);
1044
1045         /*
1046          * Free the per-sort memory context, thereby releasing all working memory,
1047          * including the Tuplesortstate struct itself.
1048          */
1049         MemoryContextDelete(state->sortcontext);
1050 }
1051
1052 /*
1053  * Grow the memtuples[] array, if possible within our memory constraint.  We
1054  * must not exceed INT_MAX tuples in memory or the caller-provided memory
1055  * limit.  Return TRUE if we were able to enlarge the array, FALSE if not.
1056  *
1057  * Normally, at each increment we double the size of the array.  When doing
1058  * that would exceed a limit, we attempt one last, smaller increase (and then
1059  * clear the growmemtuples flag so we don't try any more).  That allows us to
1060  * use memory as fully as permitted; sticking to the pure doubling rule could
1061  * result in almost half going unused.  Because availMem moves around with
1062  * tuple addition/removal, we need some rule to prevent making repeated small
1063  * increases in memtupsize, which would just be useless thrashing.  The
1064  * growmemtuples flag accomplishes that and also prevents useless
1065  * recalculations in this function.
1066  */
1067 static bool
1068 grow_memtuples(Tuplesortstate *state)
1069 {
1070         int                     newmemtupsize;
1071         int                     memtupsize = state->memtupsize;
1072         int64           memNowUsed = state->allowedMem - state->availMem;
1073
1074         /* Forget it if we've already maxed out memtuples, per comment above */
1075         if (!state->growmemtuples)
1076                 return false;
1077
1078         /* Select new value of memtupsize */
1079         if (memNowUsed <= state->availMem)
1080         {
1081                 /*
1082                  * We've used no more than half of allowedMem; double our usage,
1083                  * clamping at INT_MAX tuples.
1084                  */
1085                 if (memtupsize < INT_MAX / 2)
1086                         newmemtupsize = memtupsize * 2;
1087                 else
1088                 {
1089                         newmemtupsize = INT_MAX;
1090                         state->growmemtuples = false;
1091                 }
1092         }
1093         else
1094         {
1095                 /*
1096                  * This will be the last increment of memtupsize.  Abandon doubling
1097                  * strategy and instead increase as much as we safely can.
1098                  *
1099                  * To stay within allowedMem, we can't increase memtupsize by more
1100                  * than availMem / sizeof(SortTuple) elements.  In practice, we want
1101                  * to increase it by considerably less, because we need to leave some
1102                  * space for the tuples to which the new array slots will refer.  We
1103                  * assume the new tuples will be about the same size as the tuples
1104                  * we've already seen, and thus we can extrapolate from the space
1105                  * consumption so far to estimate an appropriate new size for the
1106                  * memtuples array.  The optimal value might be higher or lower than
1107                  * this estimate, but it's hard to know that in advance.  We again
1108                  * clamp at INT_MAX tuples.
1109                  *
1110                  * This calculation is safe against enlarging the array so much that
1111                  * LACKMEM becomes true, because the memory currently used includes
1112                  * the present array; thus, there would be enough allowedMem for the
1113                  * new array elements even if no other memory were currently used.
1114                  *
1115                  * We do the arithmetic in float8, because otherwise the product of
1116                  * memtupsize and allowedMem could overflow.  Any inaccuracy in the
1117                  * result should be insignificant; but even if we computed a
1118                  * completely insane result, the checks below will prevent anything
1119                  * really bad from happening.
1120                  */
1121                 double          grow_ratio;
1122
1123                 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1124                 if (memtupsize * grow_ratio < INT_MAX)
1125                         newmemtupsize = (int) (memtupsize * grow_ratio);
1126                 else
1127                         newmemtupsize = INT_MAX;
1128
1129                 /* We won't make any further enlargement attempts */
1130                 state->growmemtuples = false;
1131         }
1132
1133         /* Must enlarge array by at least one element, else report failure */
1134         if (newmemtupsize <= memtupsize)
1135                 goto noalloc;
1136
1137         /*
1138          * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize.  Clamp
1139          * to ensure our request won't be rejected.  Note that we can easily
1140          * exhaust address space before facing this outcome.  (This is presently
1141          * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1142          * don't rely on that at this distance.)
1143          */
1144         if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1145         {
1146                 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1147                 state->growmemtuples = false;   /* can't grow any more */
1148         }
1149
1150         /*
1151          * We need to be sure that we do not cause LACKMEM to become true, else
1152          * the space management algorithm will go nuts.  The code above should
1153          * never generate a dangerous request, but to be safe, check explicitly
1154          * that the array growth fits within availMem.  (We could still cause
1155          * LACKMEM if the memory chunk overhead associated with the memtuples
1156          * array were to increase.  That shouldn't happen with any sane value of
1157          * allowedMem, because at any array size large enough to risk LACKMEM,
1158          * palloc would be treating both old and new arrays as separate chunks.
1159          * But we'll check LACKMEM explicitly below just in case.)
1160          */
1161         if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1162                 goto noalloc;
1163
1164         /* OK, do it */
1165         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1166         state->memtupsize = newmemtupsize;
1167         state->memtuples = (SortTuple *)
1168                 repalloc_huge(state->memtuples,
1169                                           state->memtupsize * sizeof(SortTuple));
1170         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1171         if (LACKMEM(state))
1172                 elog(ERROR, "unexpected out-of-memory situation during sort");
1173         return true;
1174
1175 noalloc:
1176         /* If for any reason we didn't realloc, shut off future attempts */
1177         state->growmemtuples = false;
1178         return false;
1179 }
1180
1181 /*
1182  * Accept one tuple while collecting input data for sort.
1183  *
1184  * Note that the input data is always copied; the caller need not save it.
1185  */
1186 void
1187 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1188 {
1189         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1190         SortTuple       stup;
1191
1192         /*
1193          * Copy the given tuple into memory we control, and decrease availMem.
1194          * Then call the common code.
1195          */
1196         COPYTUP(state, &stup, (void *) slot);
1197
1198         puttuple_common(state, &stup);
1199
1200         MemoryContextSwitchTo(oldcontext);
1201 }
1202
1203 /*
1204  * Accept one tuple while collecting input data for sort.
1205  *
1206  * Note that the input data is always copied; the caller need not save it.
1207  */
1208 void
1209 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1210 {
1211         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1212         SortTuple       stup;
1213
1214         /*
1215          * Copy the given tuple into memory we control, and decrease availMem.
1216          * Then call the common code.
1217          */
1218         COPYTUP(state, &stup, (void *) tup);
1219
1220         puttuple_common(state, &stup);
1221
1222         MemoryContextSwitchTo(oldcontext);
1223 }
1224
1225 /*
1226  * Collect one index tuple while collecting input data for sort, building
1227  * it from caller-supplied values.
1228  */
1229 void
1230 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1231                                                           ItemPointer self, Datum *values,
1232                                                           bool *isnull)
1233 {
1234         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1235         SortTuple       stup;
1236         Datum           original;
1237         IndexTuple      tuple;
1238
1239         stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1240         tuple = ((IndexTuple) stup.tuple);
1241         tuple->t_tid = *self;
1242         USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1243         /* set up first-column key value */
1244         original = index_getattr(tuple,
1245                                                          1,
1246                                                          RelationGetDescr(state->indexRel),
1247                                                          &stup.isnull1);
1248
1249         if (!state->sortKeys->abbrev_converter || stup.isnull1)
1250         {
1251                 /*
1252                  * Store ordinary Datum representation, or NULL value.  If there is a
1253                  * converter it won't expect NULL values, and cost model is not
1254                  * required to account for NULL, so in that case we avoid calling
1255                  * converter and just set datum1 to "void" representation (to be
1256                  * consistent).
1257                  */
1258                 stup.datum1 = original;
1259         }
1260         else if (!consider_abort_common(state))
1261         {
1262                 /* Store abbreviated key representation */
1263                 stup.datum1 = state->sortKeys->abbrev_converter(original,
1264                                                                                                                 state->sortKeys);
1265         }
1266         else
1267         {
1268                 /* Abort abbreviation */
1269                 int             i;
1270
1271                 stup.datum1 = original;
1272
1273                 /*
1274                  * Set state to be consistent with never trying abbreviation.
1275                  *
1276                  * Alter datum1 representation in already-copied tuples, so as to
1277                  * ensure a consistent representation (current tuple was just handled).
1278                  * Note that we rely on all tuples copied so far actually being
1279                  * contained within memtuples array.
1280                  */
1281                 for (i = 0; i < state->memtupcount; i++)
1282                 {
1283                         SortTuple *mtup = &state->memtuples[i];
1284
1285                         tuple = mtup->tuple;
1286                         mtup->datum1 = index_getattr(tuple,
1287                                                                                  1,
1288                                                                                  RelationGetDescr(state->indexRel),
1289                                                                                  &stup.isnull1);
1290                 }
1291         }
1292
1293         puttuple_common(state, &stup);
1294
1295         MemoryContextSwitchTo(oldcontext);
1296 }
1297
1298 /*
1299  * Accept one Datum while collecting input data for sort.
1300  *
1301  * If the Datum is pass-by-ref type, the value will be copied.
1302  */
1303 void
1304 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1305 {
1306         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1307         SortTuple       stup;
1308
1309         /*
1310          * If it's a pass-by-reference value, copy it into memory we control, and
1311          * decrease availMem.  Then call the common code.
1312          */
1313         if (isNull || state->datumTypeByVal)
1314         {
1315                 stup.datum1 = val;
1316                 stup.isnull1 = isNull;
1317                 stup.tuple = NULL;              /* no separate storage */
1318         }
1319         else
1320         {
1321                 stup.datum1 = datumCopy(val, false, state->datumTypeLen);
1322                 stup.isnull1 = false;
1323                 stup.tuple = DatumGetPointer(stup.datum1);
1324                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1325         }
1326
1327         puttuple_common(state, &stup);
1328
1329         MemoryContextSwitchTo(oldcontext);
1330 }
1331
1332 /*
1333  * Shared code for tuple and datum cases.
1334  */
1335 static void
1336 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1337 {
1338         switch (state->status)
1339         {
1340                 case TSS_INITIAL:
1341
1342                         /*
1343                          * Save the tuple into the unsorted array.  First, grow the array
1344                          * as needed.  Note that we try to grow the array when there is
1345                          * still one free slot remaining --- if we fail, there'll still be
1346                          * room to store the incoming tuple, and then we'll switch to
1347                          * tape-based operation.
1348                          */
1349                         if (state->memtupcount >= state->memtupsize - 1)
1350                         {
1351                                 (void) grow_memtuples(state);
1352                                 Assert(state->memtupcount < state->memtupsize);
1353                         }
1354                         state->memtuples[state->memtupcount++] = *tuple;
1355
1356                         /*
1357                          * Check if it's time to switch over to a bounded heapsort. We do
1358                          * so if the input tuple count exceeds twice the desired tuple
1359                          * count (this is a heuristic for where heapsort becomes cheaper
1360                          * than a quicksort), or if we've just filled workMem and have
1361                          * enough tuples to meet the bound.
1362                          *
1363                          * Note that once we enter TSS_BOUNDED state we will always try to
1364                          * complete the sort that way.  In the worst case, if later input
1365                          * tuples are larger than earlier ones, this might cause us to
1366                          * exceed workMem significantly.
1367                          */
1368                         if (state->bounded &&
1369                                 (state->memtupcount > state->bound * 2 ||
1370                                  (state->memtupcount > state->bound && LACKMEM(state))))
1371                         {
1372 #ifdef TRACE_SORT
1373                                 if (trace_sort)
1374                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1375                                                  state->memtupcount,
1376                                                  pg_rusage_show(&state->ru_start));
1377 #endif
1378                                 make_bounded_heap(state);
1379                                 return;
1380                         }
1381
1382                         /*
1383                          * Done if we still fit in available memory and have array slots.
1384                          */
1385                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1386                                 return;
1387
1388                         /*
1389                          * Nope; time to switch to tape-based operation.
1390                          */
1391                         inittapes(state);
1392
1393                         /*
1394                          * Dump tuples until we are back under the limit.
1395                          */
1396                         dumptuples(state, false);
1397                         break;
1398
1399                 case TSS_BOUNDED:
1400
1401                         /*
1402                          * We don't want to grow the array here, so check whether the new
1403                          * tuple can be discarded before putting it in.  This should be a
1404                          * good speed optimization, too, since when there are many more
1405                          * input tuples than the bound, most input tuples can be discarded
1406                          * with just this one comparison.  Note that because we currently
1407                          * have the sort direction reversed, we must check for <= not >=.
1408                          */
1409                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1410                         {
1411                                 /* new tuple <= top of the heap, so we can discard it */
1412                                 free_sort_tuple(state, tuple);
1413                                 CHECK_FOR_INTERRUPTS();
1414                         }
1415                         else
1416                         {
1417                                 /* discard top of heap, sift up, insert new tuple */
1418                                 free_sort_tuple(state, &state->memtuples[0]);
1419                                 tuplesort_heap_siftup(state, false);
1420                                 tuplesort_heap_insert(state, tuple, 0, false);
1421                         }
1422                         break;
1423
1424                 case TSS_BUILDRUNS:
1425
1426                         /*
1427                          * Insert the tuple into the heap, with run number currentRun if
1428                          * it can go into the current run, else run number currentRun+1.
1429                          * The tuple can go into the current run if it is >= the first
1430                          * not-yet-output tuple.  (Actually, it could go into the current
1431                          * run if it is >= the most recently output tuple ... but that
1432                          * would require keeping around the tuple we last output, and it's
1433                          * simplest to let writetup free each tuple as soon as it's
1434                          * written.)
1435                          *
1436                          * Note there will always be at least one tuple in the heap at
1437                          * this point; see dumptuples.
1438                          */
1439                         Assert(state->memtupcount > 0);
1440                         if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1441                                 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1442                         else
1443                                 tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
1444
1445                         /*
1446                          * If we are over the memory limit, dump tuples till we're under.
1447                          */
1448                         dumptuples(state, false);
1449                         break;
1450
1451                 default:
1452                         elog(ERROR, "invalid tuplesort state");
1453                         break;
1454         }
1455 }
1456
1457 static bool
1458 consider_abort_common(Tuplesortstate *state)
1459 {
1460         Assert(state->sortKeys[0].abbrev_converter != NULL);
1461         Assert(state->sortKeys[0].abbrev_abort != NULL);
1462         Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1463
1464         /*
1465          * Check effectiveness of abbreviation optimization.  Consider aborting
1466          * when still within memory limit.
1467          */
1468         if (state->status == TSS_INITIAL &&
1469                 state->memtupcount >= state->abbrevNext)
1470         {
1471                 state->abbrevNext *= 2;
1472
1473                 /*
1474                  * Check opclass-supplied abbreviation abort routine.  It may
1475                  * indicate that abbreviation should not proceed.
1476                  */
1477                 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1478                                                                                    state->sortKeys))
1479                         return false;
1480
1481                 /*
1482                  * Finally, restore authoritative comparator, and indicate that
1483                  * abbreviation is not in play by setting abbrev_converter to NULL
1484                  */
1485                 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1486                 state->sortKeys[0].abbrev_converter = NULL;
1487                 /* Not strictly necessary, but be tidy */
1488                 state->sortKeys[0].abbrev_abort = NULL;
1489                 state->sortKeys[0].abbrev_full_comparator = NULL;
1490
1491                 /* Give up - expect original pass-by-value representation */
1492                 return true;
1493         }
1494
1495         return false;
1496 }
1497
1498 /*
1499  * All tuples have been provided; finish the sort.
1500  */
1501 void
1502 tuplesort_performsort(Tuplesortstate *state)
1503 {
1504         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1505
1506 #ifdef TRACE_SORT
1507         if (trace_sort)
1508                 elog(LOG, "performsort starting: %s",
1509                          pg_rusage_show(&state->ru_start));
1510 #endif
1511
1512         switch (state->status)
1513         {
1514                 case TSS_INITIAL:
1515
1516                         /*
1517                          * We were able to accumulate all the tuples within the allowed
1518                          * amount of memory.  Just qsort 'em and we're done.
1519                          */
1520                         if (state->memtupcount > 1)
1521                         {
1522                                 /* Can we use the single-key sort function? */
1523                                 if (state->onlyKey != NULL)
1524                                         qsort_ssup(state->memtuples, state->memtupcount,
1525                                                            state->onlyKey);
1526                                 else
1527                                         qsort_tuple(state->memtuples,
1528                                                                 state->memtupcount,
1529                                                                 state->comparetup,
1530                                                                 state);
1531                         }
1532                         state->current = 0;
1533                         state->eof_reached = false;
1534                         state->markpos_offset = 0;
1535                         state->markpos_eof = false;
1536                         state->status = TSS_SORTEDINMEM;
1537                         break;
1538
1539                 case TSS_BOUNDED:
1540
1541                         /*
1542                          * We were able to accumulate all the tuples required for output
1543                          * in memory, using a heap to eliminate excess tuples.  Now we
1544                          * have to transform the heap to a properly-sorted array.
1545                          */
1546                         sort_bounded_heap(state);
1547                         state->current = 0;
1548                         state->eof_reached = false;
1549                         state->markpos_offset = 0;
1550                         state->markpos_eof = false;
1551                         state->status = TSS_SORTEDINMEM;
1552                         break;
1553
1554                 case TSS_BUILDRUNS:
1555
1556                         /*
1557                          * Finish tape-based sort.  First, flush all tuples remaining in
1558                          * memory out to tape; then merge until we have a single remaining
1559                          * run (or, if !randomAccess, one run per tape). Note that
1560                          * mergeruns sets the correct state->status.
1561                          */
1562                         dumptuples(state, true);
1563                         mergeruns(state);
1564                         state->eof_reached = false;
1565                         state->markpos_block = 0L;
1566                         state->markpos_offset = 0;
1567                         state->markpos_eof = false;
1568                         break;
1569
1570                 default:
1571                         elog(ERROR, "invalid tuplesort state");
1572                         break;
1573         }
1574
1575 #ifdef TRACE_SORT
1576         if (trace_sort)
1577         {
1578                 if (state->status == TSS_FINALMERGE)
1579                         elog(LOG, "performsort done (except %d-way final merge): %s",
1580                                  state->activeTapes,
1581                                  pg_rusage_show(&state->ru_start));
1582                 else
1583                         elog(LOG, "performsort done: %s",
1584                                  pg_rusage_show(&state->ru_start));
1585         }
1586 #endif
1587
1588         MemoryContextSwitchTo(oldcontext);
1589 }
1590
1591 /*
1592  * Internal routine to fetch the next tuple in either forward or back
1593  * direction into *stup.  Returns FALSE if no more tuples.
1594  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1595  */
1596 static bool
1597 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1598                                                   SortTuple *stup, bool *should_free)
1599 {
1600         unsigned int tuplen;
1601
1602         switch (state->status)
1603         {
1604                 case TSS_SORTEDINMEM:
1605                         Assert(forward || state->randomAccess);
1606                         *should_free = false;
1607                         if (forward)
1608                         {
1609                                 if (state->current < state->memtupcount)
1610                                 {
1611                                         *stup = state->memtuples[state->current++];
1612                                         return true;
1613                                 }
1614                                 state->eof_reached = true;
1615
1616                                 /*
1617                                  * Complain if caller tries to retrieve more tuples than
1618                                  * originally asked for in a bounded sort.  This is because
1619                                  * returning EOF here might be the wrong thing.
1620                                  */
1621                                 if (state->bounded && state->current >= state->bound)
1622                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1623
1624                                 return false;
1625                         }
1626                         else
1627                         {
1628                                 if (state->current <= 0)
1629                                         return false;
1630
1631                                 /*
1632                                  * if all tuples are fetched already then we return last
1633                                  * tuple, else - tuple before last returned.
1634                                  */
1635                                 if (state->eof_reached)
1636                                         state->eof_reached = false;
1637                                 else
1638                                 {
1639                                         state->current--;       /* last returned tuple */
1640                                         if (state->current <= 0)
1641                                                 return false;
1642                                 }
1643                                 *stup = state->memtuples[state->current - 1];
1644                                 return true;
1645                         }
1646                         break;
1647
1648                 case TSS_SORTEDONTAPE:
1649                         Assert(forward || state->randomAccess);
1650                         *should_free = true;
1651                         if (forward)
1652                         {
1653                                 if (state->eof_reached)
1654                                         return false;
1655                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1656                                 {
1657                                         READTUP(state, stup, state->result_tape, tuplen);
1658                                         return true;
1659                                 }
1660                                 else
1661                                 {
1662                                         state->eof_reached = true;
1663                                         return false;
1664                                 }
1665                         }
1666
1667                         /*
1668                          * Backward.
1669                          *
1670                          * if all tuples are fetched already then we return last tuple,
1671                          * else - tuple before last returned.
1672                          */
1673                         if (state->eof_reached)
1674                         {
1675                                 /*
1676                                  * Seek position is pointing just past the zero tuplen at the
1677                                  * end of file; back up to fetch last tuple's ending length
1678                                  * word.  If seek fails we must have a completely empty file.
1679                                  */
1680                                 if (!LogicalTapeBackspace(state->tapeset,
1681                                                                                   state->result_tape,
1682                                                                                   2 * sizeof(unsigned int)))
1683                                         return false;
1684                                 state->eof_reached = false;
1685                         }
1686                         else
1687                         {
1688                                 /*
1689                                  * Back up and fetch previously-returned tuple's ending length
1690                                  * word.  If seek fails, assume we are at start of file.
1691                                  */
1692                                 if (!LogicalTapeBackspace(state->tapeset,
1693                                                                                   state->result_tape,
1694                                                                                   sizeof(unsigned int)))
1695                                         return false;
1696                                 tuplen = getlen(state, state->result_tape, false);
1697
1698                                 /*
1699                                  * Back up to get ending length word of tuple before it.
1700                                  */
1701                                 if (!LogicalTapeBackspace(state->tapeset,
1702                                                                                   state->result_tape,
1703                                                                                   tuplen + 2 * sizeof(unsigned int)))
1704                                 {
1705                                         /*
1706                                          * If that fails, presumably the prev tuple is the first
1707                                          * in the file.  Back up so that it becomes next to read
1708                                          * in forward direction (not obviously right, but that is
1709                                          * what in-memory case does).
1710                                          */
1711                                         if (!LogicalTapeBackspace(state->tapeset,
1712                                                                                           state->result_tape,
1713                                                                                           tuplen + sizeof(unsigned int)))
1714                                                 elog(ERROR, "bogus tuple length in backward scan");
1715                                         return false;
1716                                 }
1717                         }
1718
1719                         tuplen = getlen(state, state->result_tape, false);
1720
1721                         /*
1722                          * Now we have the length of the prior tuple, back up and read it.
1723                          * Note: READTUP expects we are positioned after the initial
1724                          * length word of the tuple, so back up to that point.
1725                          */
1726                         if (!LogicalTapeBackspace(state->tapeset,
1727                                                                           state->result_tape,
1728                                                                           tuplen))
1729                                 elog(ERROR, "bogus tuple length in backward scan");
1730                         READTUP(state, stup, state->result_tape, tuplen);
1731                         return true;
1732
1733                 case TSS_FINALMERGE:
1734                         Assert(forward);
1735                         *should_free = true;
1736
1737                         /*
1738                          * This code should match the inner loop of mergeonerun().
1739                          */
1740                         if (state->memtupcount > 0)
1741                         {
1742                                 int                     srcTape = state->memtuples[0].tupindex;
1743                                 Size            tuplen;
1744                                 int                     tupIndex;
1745                                 SortTuple  *newtup;
1746
1747                                 *stup = state->memtuples[0];
1748                                 /* returned tuple is no longer counted in our memory space */
1749                                 if (stup->tuple)
1750                                 {
1751                                         tuplen = GetMemoryChunkSpace(stup->tuple);
1752                                         state->availMem += tuplen;
1753                                         state->mergeavailmem[srcTape] += tuplen;
1754                                 }
1755                                 tuplesort_heap_siftup(state, false);
1756                                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1757                                 {
1758                                         /*
1759                                          * out of preloaded data on this tape, try to read more
1760                                          *
1761                                          * Unlike mergeonerun(), we only preload from the single
1762                                          * tape that's run dry.  See mergepreread() comments.
1763                                          */
1764                                         mergeprereadone(state, srcTape);
1765
1766                                         /*
1767                                          * if still no data, we've reached end of run on this tape
1768                                          */
1769                                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1770                                                 return true;
1771                                 }
1772                                 /* pull next preread tuple from list, insert in heap */
1773                                 newtup = &state->memtuples[tupIndex];
1774                                 state->mergenext[srcTape] = newtup->tupindex;
1775                                 if (state->mergenext[srcTape] == 0)
1776                                         state->mergelast[srcTape] = 0;
1777                                 tuplesort_heap_insert(state, newtup, srcTape, false);
1778                                 /* put the now-unused memtuples entry on the freelist */
1779                                 newtup->tupindex = state->mergefreelist;
1780                                 state->mergefreelist = tupIndex;
1781                                 state->mergeavailslots[srcTape]++;
1782                                 return true;
1783                         }
1784                         return false;
1785
1786                 default:
1787                         elog(ERROR, "invalid tuplesort state");
1788                         return false;           /* keep compiler quiet */
1789         }
1790 }
1791
1792 /*
1793  * Fetch the next tuple in either forward or back direction.
1794  * If successful, put tuple in slot and return TRUE; else, clear the slot
1795  * and return FALSE.
1796  */
1797 bool
1798 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
1799                                            TupleTableSlot *slot)
1800 {
1801         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1802         SortTuple       stup;
1803         bool            should_free;
1804
1805         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1806                 stup.tuple = NULL;
1807
1808         MemoryContextSwitchTo(oldcontext);
1809
1810         if (stup.tuple)
1811         {
1812                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
1813                 return true;
1814         }
1815         else
1816         {
1817                 ExecClearTuple(slot);
1818                 return false;
1819         }
1820 }
1821
1822 /*
1823  * Fetch the next tuple in either forward or back direction.
1824  * Returns NULL if no more tuples.  If *should_free is set, the
1825  * caller must pfree the returned tuple when done with it.
1826  */
1827 HeapTuple
1828 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
1829 {
1830         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1831         SortTuple       stup;
1832
1833         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1834                 stup.tuple = NULL;
1835
1836         MemoryContextSwitchTo(oldcontext);
1837
1838         return stup.tuple;
1839 }
1840
1841 /*
1842  * Fetch the next index tuple in either forward or back direction.
1843  * Returns NULL if no more tuples.  If *should_free is set, the
1844  * caller must pfree the returned tuple when done with it.
1845  */
1846 IndexTuple
1847 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
1848                                                 bool *should_free)
1849 {
1850         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1851         SortTuple       stup;
1852
1853         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1854                 stup.tuple = NULL;
1855
1856         MemoryContextSwitchTo(oldcontext);
1857
1858         return (IndexTuple) stup.tuple;
1859 }
1860
1861 /*
1862  * Fetch the next Datum in either forward or back direction.
1863  * Returns FALSE if no more datums.
1864  *
1865  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1866  * and is now owned by the caller.
1867  */
1868 bool
1869 tuplesort_getdatum(Tuplesortstate *state, bool forward,
1870                                    Datum *val, bool *isNull)
1871 {
1872         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1873         SortTuple       stup;
1874         bool            should_free;
1875
1876         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1877         {
1878                 MemoryContextSwitchTo(oldcontext);
1879                 return false;
1880         }
1881
1882         if (stup.isnull1 || state->datumTypeByVal)
1883         {
1884                 *val = stup.datum1;
1885                 *isNull = stup.isnull1;
1886         }
1887         else
1888         {
1889                 if (should_free)
1890                         *val = stup.datum1;
1891                 else
1892                         *val = datumCopy(stup.datum1, false, state->datumTypeLen);
1893                 *isNull = false;
1894         }
1895
1896         MemoryContextSwitchTo(oldcontext);
1897
1898         return true;
1899 }
1900
1901 /*
1902  * Advance over N tuples in either forward or back direction,
1903  * without returning any data.  N==0 is a no-op.
1904  * Returns TRUE if successful, FALSE if ran out of tuples.
1905  */
1906 bool
1907 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
1908 {
1909         MemoryContext oldcontext;
1910
1911         /*
1912          * We don't actually support backwards skip yet, because no callers need
1913          * it.  The API is designed to allow for that later, though.
1914          */
1915         Assert(forward);
1916         Assert(ntuples >= 0);
1917
1918         switch (state->status)
1919         {
1920                 case TSS_SORTEDINMEM:
1921                         if (state->memtupcount - state->current >= ntuples)
1922                         {
1923                                 state->current += ntuples;
1924                                 return true;
1925                         }
1926                         state->current = state->memtupcount;
1927                         state->eof_reached = true;
1928
1929                         /*
1930                          * Complain if caller tries to retrieve more tuples than
1931                          * originally asked for in a bounded sort.  This is because
1932                          * returning EOF here might be the wrong thing.
1933                          */
1934                         if (state->bounded && state->current >= state->bound)
1935                                 elog(ERROR, "retrieved too many tuples in a bounded sort");
1936
1937                         return false;
1938
1939                 case TSS_SORTEDONTAPE:
1940                 case TSS_FINALMERGE:
1941
1942                         /*
1943                          * We could probably optimize these cases better, but for now it's
1944                          * not worth the trouble.
1945                          */
1946                         oldcontext = MemoryContextSwitchTo(state->sortcontext);
1947                         while (ntuples-- > 0)
1948                         {
1949                                 SortTuple       stup;
1950                                 bool            should_free;
1951
1952                                 if (!tuplesort_gettuple_common(state, forward,
1953                                                                                            &stup, &should_free))
1954                                 {
1955                                         MemoryContextSwitchTo(oldcontext);
1956                                         return false;
1957                                 }
1958                                 if (should_free && stup.tuple)
1959                                         pfree(stup.tuple);
1960                                 CHECK_FOR_INTERRUPTS();
1961                         }
1962                         MemoryContextSwitchTo(oldcontext);
1963                         return true;
1964
1965                 default:
1966                         elog(ERROR, "invalid tuplesort state");
1967                         return false;           /* keep compiler quiet */
1968         }
1969 }
1970
1971 /*
1972  * tuplesort_merge_order - report merge order we'll use for given memory
1973  * (note: "merge order" just means the number of input tapes in the merge).
1974  *
1975  * This is exported for use by the planner.  allowedMem is in bytes.
1976  */
1977 int
1978 tuplesort_merge_order(int64 allowedMem)
1979 {
1980         int                     mOrder;
1981
1982         /*
1983          * We need one tape for each merge input, plus another one for the output,
1984          * and each of these tapes needs buffer space.  In addition we want
1985          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
1986          * count).
1987          *
1988          * Note: you might be thinking we need to account for the memtuples[]
1989          * array in this calculation, but we effectively treat that as part of the
1990          * MERGE_BUFFER_SIZE workspace.
1991          */
1992         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
1993                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
1994
1995         /* Even in minimum memory, use at least a MINORDER merge */
1996         mOrder = Max(mOrder, MINORDER);
1997
1998         return mOrder;
1999 }
2000
2001 /*
2002  * inittapes - initialize for tape sorting.
2003  *
2004  * This is called only if we have found we don't have room to sort in memory.
2005  */
2006 static void
2007 inittapes(Tuplesortstate *state)
2008 {
2009         int                     maxTapes,
2010                                 ntuples,
2011                                 j;
2012         int64           tapeSpace;
2013
2014         /* Compute number of tapes to use: merge order plus 1 */
2015         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2016
2017         /*
2018          * We must have at least 2*maxTapes slots in the memtuples[] array, else
2019          * we'd not have room for merge heap plus preread.  It seems unlikely that
2020          * this case would ever occur, but be safe.
2021          */
2022         maxTapes = Min(maxTapes, state->memtupsize / 2);
2023
2024         state->maxTapes = maxTapes;
2025         state->tapeRange = maxTapes - 1;
2026
2027 #ifdef TRACE_SORT
2028         if (trace_sort)
2029                 elog(LOG, "switching to external sort with %d tapes: %s",
2030                          maxTapes, pg_rusage_show(&state->ru_start));
2031 #endif
2032
2033         /*
2034          * Decrease availMem to reflect the space needed for tape buffers; but
2035          * don't decrease it to the point that we have no room for tuples. (That
2036          * case is only likely to occur if sorting pass-by-value Datums; in all
2037          * other scenarios the memtuples[] array is unlikely to occupy more than
2038          * half of allowedMem.  In the pass-by-value case it's not important to
2039          * account for tuple space, so we don't care if LACKMEM becomes
2040          * inaccurate.)
2041          */
2042         tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2043
2044         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2045                 USEMEM(state, tapeSpace);
2046
2047         /*
2048          * Make sure that the temp file(s) underlying the tape set are created in
2049          * suitable temp tablespaces.
2050          */
2051         PrepareTempTablespaces();
2052
2053         /*
2054          * Create the tape set and allocate the per-tape data arrays.
2055          */
2056         state->tapeset = LogicalTapeSetCreate(maxTapes);
2057
2058         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2059         state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
2060         state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
2061         state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
2062         state->mergeavailmem = (int64 *) palloc0(maxTapes * sizeof(int64));
2063         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2064         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2065         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2066         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2067
2068         /*
2069          * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
2070          * marked as belonging to run number zero.
2071          *
2072          * NOTE: we pass false for checkIndex since there's no point in comparing
2073          * indexes in this step, even though we do intend the indexes to be part
2074          * of the sort key...
2075          */
2076         ntuples = state->memtupcount;
2077         state->memtupcount = 0;         /* make the heap empty */
2078         for (j = 0; j < ntuples; j++)
2079         {
2080                 /* Must copy source tuple to avoid possible overwrite */
2081                 SortTuple       stup = state->memtuples[j];
2082
2083                 tuplesort_heap_insert(state, &stup, 0, false);
2084         }
2085         Assert(state->memtupcount == ntuples);
2086
2087         state->currentRun = 0;
2088
2089         /*
2090          * Initialize variables of Algorithm D (step D1).
2091          */
2092         for (j = 0; j < maxTapes; j++)
2093         {
2094                 state->tp_fib[j] = 1;
2095                 state->tp_runs[j] = 0;
2096                 state->tp_dummy[j] = 1;
2097                 state->tp_tapenum[j] = j;
2098         }
2099         state->tp_fib[state->tapeRange] = 0;
2100         state->tp_dummy[state->tapeRange] = 0;
2101
2102         state->Level = 1;
2103         state->destTape = 0;
2104
2105         state->status = TSS_BUILDRUNS;
2106 }
2107
2108 /*
2109  * selectnewtape -- select new tape for new initial run.
2110  *
2111  * This is called after finishing a run when we know another run
2112  * must be started.  This implements steps D3, D4 of Algorithm D.
2113  */
2114 static void
2115 selectnewtape(Tuplesortstate *state)
2116 {
2117         int                     j;
2118         int                     a;
2119
2120         /* Step D3: advance j (destTape) */
2121         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2122         {
2123                 state->destTape++;
2124                 return;
2125         }
2126         if (state->tp_dummy[state->destTape] != 0)
2127         {
2128                 state->destTape = 0;
2129                 return;
2130         }
2131
2132         /* Step D4: increase level */
2133         state->Level++;
2134         a = state->tp_fib[0];
2135         for (j = 0; j < state->tapeRange; j++)
2136         {
2137                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2138                 state->tp_fib[j] = a + state->tp_fib[j + 1];
2139         }
2140         state->destTape = 0;
2141 }
2142
2143 /*
2144  * mergeruns -- merge all the completed initial runs.
2145  *
2146  * This implements steps D5, D6 of Algorithm D.  All input data has
2147  * already been written to initial runs on tape (see dumptuples).
2148  */
2149 static void
2150 mergeruns(Tuplesortstate *state)
2151 {
2152         int                     tapenum,
2153                                 svTape,
2154                                 svRuns,
2155                                 svDummy;
2156
2157         Assert(state->status == TSS_BUILDRUNS);
2158         Assert(state->memtupcount == 0);
2159
2160         /*
2161          * If we produced only one initial run (quite likely if the total data
2162          * volume is between 1X and 2X workMem), we can just use that tape as the
2163          * finished output, rather than doing a useless merge.  (This obvious
2164          * optimization is not in Knuth's algorithm.)
2165          */
2166         if (state->currentRun == 1)
2167         {
2168                 state->result_tape = state->tp_tapenum[state->destTape];
2169                 /* must freeze and rewind the finished output tape */
2170                 LogicalTapeFreeze(state->tapeset, state->result_tape);
2171                 state->status = TSS_SORTEDONTAPE;
2172                 return;
2173         }
2174
2175         if (state->sortKeys->abbrev_converter)
2176         {
2177                 /*
2178                  * If there are multiple runs to be merged, when we go to read back
2179                  * tuples from disk, abbreviated keys will not have been stored, and we
2180                  * don't care to regenerate them.  Disable abbreviation from this point
2181                  * on.
2182                  */
2183                 state->sortKeys->abbrev_converter = NULL;
2184                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2185
2186                 /* Not strictly necessary, but be tidy */
2187                 state->sortKeys->abbrev_abort = NULL;
2188                 state->sortKeys->abbrev_full_comparator = NULL;
2189         }
2190
2191         /* End of step D2: rewind all output tapes to prepare for merging */
2192         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2193                 LogicalTapeRewind(state->tapeset, tapenum, false);
2194
2195         for (;;)
2196         {
2197                 /*
2198                  * At this point we know that tape[T] is empty.  If there's just one
2199                  * (real or dummy) run left on each input tape, then only one merge
2200                  * pass remains.  If we don't have to produce a materialized sorted
2201                  * tape, we can stop at this point and do the final merge on-the-fly.
2202                  */
2203                 if (!state->randomAccess)
2204                 {
2205                         bool            allOneRun = true;
2206
2207                         Assert(state->tp_runs[state->tapeRange] == 0);
2208                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2209                         {
2210                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2211                                 {
2212                                         allOneRun = false;
2213                                         break;
2214                                 }
2215                         }
2216                         if (allOneRun)
2217                         {
2218                                 /* Tell logtape.c we won't be writing anymore */
2219                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
2220                                 /* Initialize for the final merge pass */
2221                                 beginmerge(state);
2222                                 state->status = TSS_FINALMERGE;
2223                                 return;
2224                         }
2225                 }
2226
2227                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2228                 while (state->tp_runs[state->tapeRange - 1] ||
2229                            state->tp_dummy[state->tapeRange - 1])
2230                 {
2231                         bool            allDummy = true;
2232
2233                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2234                         {
2235                                 if (state->tp_dummy[tapenum] == 0)
2236                                 {
2237                                         allDummy = false;
2238                                         break;
2239                                 }
2240                         }
2241
2242                         if (allDummy)
2243                         {
2244                                 state->tp_dummy[state->tapeRange]++;
2245                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2246                                         state->tp_dummy[tapenum]--;
2247                         }
2248                         else
2249                                 mergeonerun(state);
2250                 }
2251
2252                 /* Step D6: decrease level */
2253                 if (--state->Level == 0)
2254                         break;
2255                 /* rewind output tape T to use as new input */
2256                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
2257                                                   false);
2258                 /* rewind used-up input tape P, and prepare it for write pass */
2259                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
2260                                                   true);
2261                 state->tp_runs[state->tapeRange - 1] = 0;
2262
2263                 /*
2264                  * reassign tape units per step D6; note we no longer care about A[]
2265                  */
2266                 svTape = state->tp_tapenum[state->tapeRange];
2267                 svDummy = state->tp_dummy[state->tapeRange];
2268                 svRuns = state->tp_runs[state->tapeRange];
2269                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2270                 {
2271                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2272                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2273                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2274                 }
2275                 state->tp_tapenum[0] = svTape;
2276                 state->tp_dummy[0] = svDummy;
2277                 state->tp_runs[0] = svRuns;
2278         }
2279
2280         /*
2281          * Done.  Knuth says that the result is on TAPE[1], but since we exited
2282          * the loop without performing the last iteration of step D6, we have not
2283          * rearranged the tape unit assignment, and therefore the result is on
2284          * TAPE[T].  We need to do it this way so that we can freeze the final
2285          * output tape while rewinding it.  The last iteration of step D6 would be
2286          * a waste of cycles anyway...
2287          */
2288         state->result_tape = state->tp_tapenum[state->tapeRange];
2289         LogicalTapeFreeze(state->tapeset, state->result_tape);
2290         state->status = TSS_SORTEDONTAPE;
2291 }
2292
2293 /*
2294  * Merge one run from each input tape, except ones with dummy runs.
2295  *
2296  * This is the inner loop of Algorithm D step D5.  We know that the
2297  * output tape is TAPE[T].
2298  */
2299 static void
2300 mergeonerun(Tuplesortstate *state)
2301 {
2302         int                     destTape = state->tp_tapenum[state->tapeRange];
2303         int                     srcTape;
2304         int                     tupIndex;
2305         SortTuple  *tup;
2306         int64           priorAvail,
2307                                 spaceFreed;
2308
2309         /*
2310          * Start the merge by loading one tuple from each active source tape into
2311          * the heap.  We can also decrease the input run/dummy run counts.
2312          */
2313         beginmerge(state);
2314
2315         /*
2316          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2317          * out, and replacing it with next tuple from same tape (if there is
2318          * another one).
2319          */
2320         while (state->memtupcount > 0)
2321         {
2322                 /* write the tuple to destTape */
2323                 priorAvail = state->availMem;
2324                 srcTape = state->memtuples[0].tupindex;
2325                 WRITETUP(state, destTape, &state->memtuples[0]);
2326                 /* writetup adjusted total free space, now fix per-tape space */
2327                 spaceFreed = state->availMem - priorAvail;
2328                 state->mergeavailmem[srcTape] += spaceFreed;
2329                 /* compact the heap */
2330                 tuplesort_heap_siftup(state, false);
2331                 if ((tupIndex = state->mergenext[srcTape]) == 0)
2332                 {
2333                         /* out of preloaded data on this tape, try to read more */
2334                         mergepreread(state);
2335                         /* if still no data, we've reached end of run on this tape */
2336                         if ((tupIndex = state->mergenext[srcTape]) == 0)
2337                                 continue;
2338                 }
2339                 /* pull next preread tuple from list, insert in heap */
2340                 tup = &state->memtuples[tupIndex];
2341                 state->mergenext[srcTape] = tup->tupindex;
2342                 if (state->mergenext[srcTape] == 0)
2343                         state->mergelast[srcTape] = 0;
2344                 tuplesort_heap_insert(state, tup, srcTape, false);
2345                 /* put the now-unused memtuples entry on the freelist */
2346                 tup->tupindex = state->mergefreelist;
2347                 state->mergefreelist = tupIndex;
2348                 state->mergeavailslots[srcTape]++;
2349         }
2350
2351         /*
2352          * When the heap empties, we're done.  Write an end-of-run marker on the
2353          * output tape, and increment its count of real runs.
2354          */
2355         markrunend(state, destTape);
2356         state->tp_runs[state->tapeRange]++;
2357
2358 #ifdef TRACE_SORT
2359         if (trace_sort)
2360                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2361                          pg_rusage_show(&state->ru_start));
2362 #endif
2363 }
2364
2365 /*
2366  * beginmerge - initialize for a merge pass
2367  *
2368  * We decrease the counts of real and dummy runs for each tape, and mark
2369  * which tapes contain active input runs in mergeactive[].  Then, load
2370  * as many tuples as we can from each active input tape, and finally
2371  * fill the merge heap with the first tuple from each active tape.
2372  */
2373 static void
2374 beginmerge(Tuplesortstate *state)
2375 {
2376         int                     activeTapes;
2377         int                     tapenum;
2378         int                     srcTape;
2379         int                     slotsPerTape;
2380         int64           spacePerTape;
2381
2382         /* Heap should be empty here */
2383         Assert(state->memtupcount == 0);
2384
2385         /* Adjust run counts and mark the active tapes */
2386         memset(state->mergeactive, 0,
2387                    state->maxTapes * sizeof(*state->mergeactive));
2388         activeTapes = 0;
2389         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2390         {
2391                 if (state->tp_dummy[tapenum] > 0)
2392                         state->tp_dummy[tapenum]--;
2393                 else
2394                 {
2395                         Assert(state->tp_runs[tapenum] > 0);
2396                         state->tp_runs[tapenum]--;
2397                         srcTape = state->tp_tapenum[tapenum];
2398                         state->mergeactive[srcTape] = true;
2399                         activeTapes++;
2400                 }
2401         }
2402         state->activeTapes = activeTapes;
2403
2404         /* Clear merge-pass state variables */
2405         memset(state->mergenext, 0,
2406                    state->maxTapes * sizeof(*state->mergenext));
2407         memset(state->mergelast, 0,
2408                    state->maxTapes * sizeof(*state->mergelast));
2409         state->mergefreelist = 0;       /* nothing in the freelist */
2410         state->mergefirstfree = activeTapes;            /* 1st slot avail for preread */
2411
2412         /*
2413          * Initialize space allocation to let each active input tape have an equal
2414          * share of preread space.
2415          */
2416         Assert(activeTapes > 0);
2417         slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2418         Assert(slotsPerTape > 0);
2419         spacePerTape = state->availMem / activeTapes;
2420         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2421         {
2422                 if (state->mergeactive[srcTape])
2423                 {
2424                         state->mergeavailslots[srcTape] = slotsPerTape;
2425                         state->mergeavailmem[srcTape] = spacePerTape;
2426                 }
2427         }
2428
2429         /*
2430          * Preread as many tuples as possible (and at least one) from each active
2431          * tape
2432          */
2433         mergepreread(state);
2434
2435         /* Load the merge heap with the first tuple from each input tape */
2436         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2437         {
2438                 int                     tupIndex = state->mergenext[srcTape];
2439                 SortTuple  *tup;
2440
2441                 if (tupIndex)
2442                 {
2443                         tup = &state->memtuples[tupIndex];
2444                         state->mergenext[srcTape] = tup->tupindex;
2445                         if (state->mergenext[srcTape] == 0)
2446                                 state->mergelast[srcTape] = 0;
2447                         tuplesort_heap_insert(state, tup, srcTape, false);
2448                         /* put the now-unused memtuples entry on the freelist */
2449                         tup->tupindex = state->mergefreelist;
2450                         state->mergefreelist = tupIndex;
2451                         state->mergeavailslots[srcTape]++;
2452                 }
2453         }
2454 }
2455
2456 /*
2457  * mergepreread - load tuples from merge input tapes
2458  *
2459  * This routine exists to improve sequentiality of reads during a merge pass,
2460  * as explained in the header comments of this file.  Load tuples from each
2461  * active source tape until the tape's run is exhausted or it has used up
2462  * its fair share of available memory.  In any case, we guarantee that there
2463  * is at least one preread tuple available from each unexhausted input tape.
2464  *
2465  * We invoke this routine at the start of a merge pass for initial load,
2466  * and then whenever any tape's preread data runs out.  Note that we load
2467  * as much data as possible from all tapes, not just the one that ran out.
2468  * This is because logtape.c works best with a usage pattern that alternates
2469  * between reading a lot of data and writing a lot of data, so whenever we
2470  * are forced to read, we should fill working memory completely.
2471  *
2472  * In FINALMERGE state, we *don't* use this routine, but instead just preread
2473  * from the single tape that ran dry.  There's no read/write alternation in
2474  * that state and so no point in scanning through all the tapes to fix one.
2475  * (Moreover, there may be quite a lot of inactive tapes in that state, since
2476  * we might have had many fewer runs than tapes.  In a regular tape-to-tape
2477  * merge we can expect most of the tapes to be active.)
2478  */
2479 static void
2480 mergepreread(Tuplesortstate *state)
2481 {
2482         int                     srcTape;
2483
2484         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2485                 mergeprereadone(state, srcTape);
2486 }
2487
2488 /*
2489  * mergeprereadone - load tuples from one merge input tape
2490  *
2491  * Read tuples from the specified tape until it has used up its free memory
2492  * or array slots; but ensure that we have at least one tuple, if any are
2493  * to be had.
2494  */
2495 static void
2496 mergeprereadone(Tuplesortstate *state, int srcTape)
2497 {
2498         unsigned int tuplen;
2499         SortTuple       stup;
2500         int                     tupIndex;
2501         int64           priorAvail,
2502                                 spaceUsed;
2503
2504         if (!state->mergeactive[srcTape])
2505                 return;                                 /* tape's run is already exhausted */
2506         priorAvail = state->availMem;
2507         state->availMem = state->mergeavailmem[srcTape];
2508         while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) ||
2509                    state->mergenext[srcTape] == 0)
2510         {
2511                 /* read next tuple, if any */
2512                 if ((tuplen = getlen(state, srcTape, true)) == 0)
2513                 {
2514                         state->mergeactive[srcTape] = false;
2515                         break;
2516                 }
2517                 READTUP(state, &stup, srcTape, tuplen);
2518                 /* find a free slot in memtuples[] for it */
2519                 tupIndex = state->mergefreelist;
2520                 if (tupIndex)
2521                         state->mergefreelist = state->memtuples[tupIndex].tupindex;
2522                 else
2523                 {
2524                         tupIndex = state->mergefirstfree++;
2525                         Assert(tupIndex < state->memtupsize);
2526                 }
2527                 state->mergeavailslots[srcTape]--;
2528                 /* store tuple, append to list for its tape */
2529                 stup.tupindex = 0;
2530                 state->memtuples[tupIndex] = stup;
2531                 if (state->mergelast[srcTape])
2532                         state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
2533                 else
2534                         state->mergenext[srcTape] = tupIndex;
2535                 state->mergelast[srcTape] = tupIndex;
2536         }
2537         /* update per-tape and global availmem counts */
2538         spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
2539         state->mergeavailmem[srcTape] = state->availMem;
2540         state->availMem = priorAvail - spaceUsed;
2541 }
2542
2543 /*
2544  * dumptuples - remove tuples from heap and write to tape
2545  *
2546  * This is used during initial-run building, but not during merging.
2547  *
2548  * When alltuples = false, dump only enough tuples to get under the
2549  * availMem limit (and leave at least one tuple in the heap in any case,
2550  * since puttuple assumes it always has a tuple to compare to).  We also
2551  * insist there be at least one free slot in the memtuples[] array.
2552  *
2553  * When alltuples = true, dump everything currently in memory.
2554  * (This case is only used at end of input data.)
2555  *
2556  * If we empty the heap, close out the current run and return (this should
2557  * only happen at end of input data).  If we see that the tuple run number
2558  * at the top of the heap has changed, start a new run.
2559  */
2560 static void
2561 dumptuples(Tuplesortstate *state, bool alltuples)
2562 {
2563         while (alltuples ||
2564                    (LACKMEM(state) && state->memtupcount > 1) ||
2565                    state->memtupcount >= state->memtupsize)
2566         {
2567                 /*
2568                  * Dump the heap's frontmost entry, and sift up to remove it from the
2569                  * heap.
2570                  */
2571                 Assert(state->memtupcount > 0);
2572                 WRITETUP(state, state->tp_tapenum[state->destTape],
2573                                  &state->memtuples[0]);
2574                 tuplesort_heap_siftup(state, true);
2575
2576                 /*
2577                  * If the heap is empty *or* top run number has changed, we've
2578                  * finished the current run.
2579                  */
2580                 if (state->memtupcount == 0 ||
2581                         state->currentRun != state->memtuples[0].tupindex)
2582                 {
2583                         markrunend(state, state->tp_tapenum[state->destTape]);
2584                         state->currentRun++;
2585                         state->tp_runs[state->destTape]++;
2586                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2587
2588 #ifdef TRACE_SORT
2589                         if (trace_sort)
2590                                 elog(LOG, "finished writing%s run %d to tape %d: %s",
2591                                          (state->memtupcount == 0) ? " final" : "",
2592                                          state->currentRun, state->destTape,
2593                                          pg_rusage_show(&state->ru_start));
2594 #endif
2595
2596                         /*
2597                          * Done if heap is empty, else prepare for new run.
2598                          */
2599                         if (state->memtupcount == 0)
2600                                 break;
2601                         Assert(state->currentRun == state->memtuples[0].tupindex);
2602                         selectnewtape(state);
2603                 }
2604         }
2605 }
2606
2607 /*
2608  * tuplesort_rescan             - rewind and replay the scan
2609  */
2610 void
2611 tuplesort_rescan(Tuplesortstate *state)
2612 {
2613         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2614
2615         Assert(state->randomAccess);
2616
2617         switch (state->status)
2618         {
2619                 case TSS_SORTEDINMEM:
2620                         state->current = 0;
2621                         state->eof_reached = false;
2622                         state->markpos_offset = 0;
2623                         state->markpos_eof = false;
2624                         break;
2625                 case TSS_SORTEDONTAPE:
2626                         LogicalTapeRewind(state->tapeset,
2627                                                           state->result_tape,
2628                                                           false);
2629                         state->eof_reached = false;
2630                         state->markpos_block = 0L;
2631                         state->markpos_offset = 0;
2632                         state->markpos_eof = false;
2633                         break;
2634                 default:
2635                         elog(ERROR, "invalid tuplesort state");
2636                         break;
2637         }
2638
2639         MemoryContextSwitchTo(oldcontext);
2640 }
2641
2642 /*
2643  * tuplesort_markpos    - saves current position in the merged sort file
2644  */
2645 void
2646 tuplesort_markpos(Tuplesortstate *state)
2647 {
2648         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2649
2650         Assert(state->randomAccess);
2651
2652         switch (state->status)
2653         {
2654                 case TSS_SORTEDINMEM:
2655                         state->markpos_offset = state->current;
2656                         state->markpos_eof = state->eof_reached;
2657                         break;
2658                 case TSS_SORTEDONTAPE:
2659                         LogicalTapeTell(state->tapeset,
2660                                                         state->result_tape,
2661                                                         &state->markpos_block,
2662                                                         &state->markpos_offset);
2663                         state->markpos_eof = state->eof_reached;
2664                         break;
2665                 default:
2666                         elog(ERROR, "invalid tuplesort state");
2667                         break;
2668         }
2669
2670         MemoryContextSwitchTo(oldcontext);
2671 }
2672
2673 /*
2674  * tuplesort_restorepos - restores current position in merged sort file to
2675  *                                                last saved position
2676  */
2677 void
2678 tuplesort_restorepos(Tuplesortstate *state)
2679 {
2680         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2681
2682         Assert(state->randomAccess);
2683
2684         switch (state->status)
2685         {
2686                 case TSS_SORTEDINMEM:
2687                         state->current = state->markpos_offset;
2688                         state->eof_reached = state->markpos_eof;
2689                         break;
2690                 case TSS_SORTEDONTAPE:
2691                         if (!LogicalTapeSeek(state->tapeset,
2692                                                                  state->result_tape,
2693                                                                  state->markpos_block,
2694                                                                  state->markpos_offset))
2695                                 elog(ERROR, "tuplesort_restorepos failed");
2696                         state->eof_reached = state->markpos_eof;
2697                         break;
2698                 default:
2699                         elog(ERROR, "invalid tuplesort state");
2700                         break;
2701         }
2702
2703         MemoryContextSwitchTo(oldcontext);
2704 }
2705
2706 /*
2707  * tuplesort_get_stats - extract summary statistics
2708  *
2709  * This can be called after tuplesort_performsort() finishes to obtain
2710  * printable summary information about how the sort was performed.
2711  * spaceUsed is measured in kilobytes.
2712  */
2713 void
2714 tuplesort_get_stats(Tuplesortstate *state,
2715                                         const char **sortMethod,
2716                                         const char **spaceType,
2717                                         long *spaceUsed)
2718 {
2719         /*
2720          * Note: it might seem we should provide both memory and disk usage for a
2721          * disk-based sort.  However, the current code doesn't track memory space
2722          * accurately once we have begun to return tuples to the caller (since we
2723          * don't account for pfree's the caller is expected to do), so we cannot
2724          * rely on availMem in a disk sort.  This does not seem worth the overhead
2725          * to fix.  Is it worth creating an API for the memory context code to
2726          * tell us how much is actually used in sortcontext?
2727          */
2728         if (state->tapeset)
2729         {
2730                 *spaceType = "Disk";
2731                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
2732         }
2733         else
2734         {
2735                 *spaceType = "Memory";
2736                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
2737         }
2738
2739         switch (state->status)
2740         {
2741                 case TSS_SORTEDINMEM:
2742                         if (state->boundUsed)
2743                                 *sortMethod = "top-N heapsort";
2744                         else
2745                                 *sortMethod = "quicksort";
2746                         break;
2747                 case TSS_SORTEDONTAPE:
2748                         *sortMethod = "external sort";
2749                         break;
2750                 case TSS_FINALMERGE:
2751                         *sortMethod = "external merge";
2752                         break;
2753                 default:
2754                         *sortMethod = "still in progress";
2755                         break;
2756         }
2757 }
2758
2759
2760 /*
2761  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2762  *
2763  * Compare two SortTuples.  If checkIndex is true, use the tuple index
2764  * as the front of the sort key; otherwise, no.
2765  */
2766
2767 #define HEAPCOMPARE(tup1,tup2) \
2768         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2769          ((tup1)->tupindex) - ((tup2)->tupindex) : \
2770          COMPARETUP(state, tup1, tup2))
2771
2772 /*
2773  * Convert the existing unordered array of SortTuples to a bounded heap,
2774  * discarding all but the smallest "state->bound" tuples.
2775  *
2776  * When working with a bounded heap, we want to keep the largest entry
2777  * at the root (array entry zero), instead of the smallest as in the normal
2778  * sort case.  This allows us to discard the largest entry cheaply.
2779  * Therefore, we temporarily reverse the sort direction.
2780  *
2781  * We assume that all entries in a bounded heap will always have tupindex
2782  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2783  * the direction of comparison for tupindexes.
2784  */
2785 static void
2786 make_bounded_heap(Tuplesortstate *state)
2787 {
2788         int                     tupcount = state->memtupcount;
2789         int                     i;
2790
2791         Assert(state->status == TSS_INITIAL);
2792         Assert(state->bounded);
2793         Assert(tupcount >= state->bound);
2794
2795         /* Reverse sort direction so largest entry will be at root */
2796         reversedirection(state);
2797
2798         state->memtupcount = 0;         /* make the heap empty */
2799         for (i = 0; i < tupcount; i++)
2800         {
2801                 if (state->memtupcount >= state->bound &&
2802                   COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2803                 {
2804                         /* New tuple would just get thrown out, so skip it */
2805                         free_sort_tuple(state, &state->memtuples[i]);
2806                         CHECK_FOR_INTERRUPTS();
2807                 }
2808                 else
2809                 {
2810                         /* Insert next tuple into heap */
2811                         /* Must copy source tuple to avoid possible overwrite */
2812                         SortTuple       stup = state->memtuples[i];
2813
2814                         tuplesort_heap_insert(state, &stup, 0, false);
2815
2816                         /* If heap too full, discard largest entry */
2817                         if (state->memtupcount > state->bound)
2818                         {
2819                                 free_sort_tuple(state, &state->memtuples[0]);
2820                                 tuplesort_heap_siftup(state, false);
2821                         }
2822                 }
2823         }
2824
2825         Assert(state->memtupcount == state->bound);
2826         state->status = TSS_BOUNDED;
2827 }
2828
2829 /*
2830  * Convert the bounded heap to a properly-sorted array
2831  */
2832 static void
2833 sort_bounded_heap(Tuplesortstate *state)
2834 {
2835         int                     tupcount = state->memtupcount;
2836
2837         Assert(state->status == TSS_BOUNDED);
2838         Assert(state->bounded);
2839         Assert(tupcount == state->bound);
2840
2841         /*
2842          * We can unheapify in place because each sift-up will remove the largest
2843          * entry, which we can promptly store in the newly freed slot at the end.
2844          * Once we're down to a single-entry heap, we're done.
2845          */
2846         while (state->memtupcount > 1)
2847         {
2848                 SortTuple       stup = state->memtuples[0];
2849
2850                 /* this sifts-up the next-largest entry and decreases memtupcount */
2851                 tuplesort_heap_siftup(state, false);
2852                 state->memtuples[state->memtupcount] = stup;
2853         }
2854         state->memtupcount = tupcount;
2855
2856         /*
2857          * Reverse sort direction back to the original state.  This is not
2858          * actually necessary but seems like a good idea for tidiness.
2859          */
2860         reversedirection(state);
2861
2862         state->status = TSS_SORTEDINMEM;
2863         state->boundUsed = true;
2864 }
2865
2866 /*
2867  * Insert a new tuple into an empty or existing heap, maintaining the
2868  * heap invariant.  Caller is responsible for ensuring there's room.
2869  *
2870  * Note: we assume *tuple is a temporary variable that can be scribbled on.
2871  * For some callers, tuple actually points to a memtuples[] entry above the
2872  * end of the heap.  This is safe as long as it's not immediately adjacent
2873  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2874  * is, it might get overwritten before being moved into the heap!
2875  */
2876 static void
2877 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
2878                                           int tupleindex, bool checkIndex)
2879 {
2880         SortTuple  *memtuples;
2881         int                     j;
2882
2883         /*
2884          * Save the tupleindex --- see notes above about writing on *tuple. It's a
2885          * historical artifact that tupleindex is passed as a separate argument
2886          * and not in *tuple, but it's notationally convenient so let's leave it
2887          * that way.
2888          */
2889         tuple->tupindex = tupleindex;
2890
2891         memtuples = state->memtuples;
2892         Assert(state->memtupcount < state->memtupsize);
2893
2894         CHECK_FOR_INTERRUPTS();
2895
2896         /*
2897          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2898          * using 1-based array indexes, not 0-based.
2899          */
2900         j = state->memtupcount++;
2901         while (j > 0)
2902         {
2903                 int                     i = (j - 1) >> 1;
2904
2905                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
2906                         break;
2907                 memtuples[j] = memtuples[i];
2908                 j = i;
2909         }
2910         memtuples[j] = *tuple;
2911 }
2912
2913 /*
2914  * The tuple at state->memtuples[0] has been removed from the heap.
2915  * Decrement memtupcount, and sift up to maintain the heap invariant.
2916  */
2917 static void
2918 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
2919 {
2920         SortTuple  *memtuples = state->memtuples;
2921         SortTuple  *tuple;
2922         int                     i,
2923                                 n;
2924
2925         if (--state->memtupcount <= 0)
2926                 return;
2927
2928         CHECK_FOR_INTERRUPTS();
2929
2930         n = state->memtupcount;
2931         tuple = &memtuples[n];          /* tuple that must be reinserted */
2932         i = 0;                                          /* i is where the "hole" is */
2933         for (;;)
2934         {
2935                 int                     j = 2 * i + 1;
2936
2937                 if (j >= n)
2938                         break;
2939                 if (j + 1 < n &&
2940                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
2941                         j++;
2942                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
2943                         break;
2944                 memtuples[i] = memtuples[j];
2945                 i = j;
2946         }
2947         memtuples[i] = *tuple;
2948 }
2949
2950 /*
2951  * Function to reverse the sort direction from its current state
2952  *
2953  * It is not safe to call this when performing hash tuplesorts
2954  */
2955 static void
2956 reversedirection(Tuplesortstate *state)
2957 {
2958         SortSupport sortKey = state->sortKeys;
2959         int                     nkey;
2960
2961         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
2962         {
2963                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
2964                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
2965         }
2966 }
2967
2968
2969 /*
2970  * Tape interface routines
2971  */
2972
2973 static unsigned int
2974 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
2975 {
2976         unsigned int len;
2977
2978         if (LogicalTapeRead(state->tapeset, tapenum,
2979                                                 &len, sizeof(len)) != sizeof(len))
2980                 elog(ERROR, "unexpected end of tape");
2981         if (len == 0 && !eofOK)
2982                 elog(ERROR, "unexpected end of data");
2983         return len;
2984 }
2985
2986 static void
2987 markrunend(Tuplesortstate *state, int tapenum)
2988 {
2989         unsigned int len = 0;
2990
2991         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
2992 }
2993
2994
2995 /*
2996  * Routines specialized for HeapTuple (actually MinimalTuple) case
2997  */
2998
2999 static int
3000 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3001 {
3002         SortSupport sortKey = state->sortKeys;
3003         HeapTupleData ltup;
3004         HeapTupleData rtup;
3005         TupleDesc       tupDesc;
3006         int                     nkey;
3007         int32           compare;
3008         AttrNumber      attno;
3009         Datum           datum1,
3010                                 datum2;
3011         bool            isnull1,
3012                                 isnull2;
3013
3014
3015         /* Compare the leading sort key */
3016         compare = ApplySortComparator(a->datum1, a->isnull1,
3017                                                                   b->datum1, b->isnull1,
3018                                                                   sortKey);
3019         if (compare != 0)
3020                 return compare;
3021
3022         /* Compare additional sort keys */
3023         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3024         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3025         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3026         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3027         tupDesc = state->tupDesc;
3028
3029         if (sortKey->abbrev_converter)
3030         {
3031                 attno = sortKey->ssup_attno;
3032
3033                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3034                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3035
3036                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3037                                                                                                 datum2, isnull2,
3038                                                                                                 sortKey);
3039                 if (compare != 0)
3040                         return compare;
3041         }
3042
3043         sortKey++;
3044         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3045         {
3046                 attno = sortKey->ssup_attno;
3047
3048                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3049                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3050
3051                 compare = ApplySortComparator(datum1, isnull1,
3052                                                                           datum2, isnull2,
3053                                                                           sortKey);
3054                 if (compare != 0)
3055                         return compare;
3056         }
3057
3058         return 0;
3059 }
3060
3061 static void
3062 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3063 {
3064         /*
3065          * We expect the passed "tup" to be a TupleTableSlot, and form a
3066          * MinimalTuple using the exported interface for that.
3067          */
3068         TupleTableSlot *slot = (TupleTableSlot *) tup;
3069         Datum                   original;
3070         MinimalTuple tuple;
3071         HeapTupleData htup;
3072
3073         /* copy the tuple into sort storage */
3074         tuple = ExecCopySlotMinimalTuple(slot);
3075         stup->tuple = (void *) tuple;
3076         USEMEM(state, GetMemoryChunkSpace(tuple));
3077         /* set up first-column key value */
3078         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3079         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3080         original = heap_getattr(&htup,
3081                                                         state->sortKeys[0].ssup_attno,
3082                                                         state->tupDesc,
3083                                                         &stup->isnull1);
3084
3085         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3086         {
3087                 /*
3088                  * Store ordinary Datum representation, or NULL value.  If there is a
3089                  * converter it won't expect NULL values, and cost model is not
3090                  * required to account for NULL, so in that case we avoid calling
3091                  * converter and just set datum1 to "void" representation (to be
3092                  * consistent).
3093                  */
3094                 stup->datum1 = original;
3095         }
3096         else if (!consider_abort_common(state))
3097         {
3098                 /* Store abbreviated key representation */
3099                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3100                                                                                                                  state->sortKeys);
3101         }
3102         else
3103         {
3104                 /* Abort abbreviation */
3105                 int             i;
3106
3107                 stup->datum1 = original;
3108
3109                 /*
3110                  * Set state to be consistent with never trying abbreviation.
3111                  *
3112                  * Alter datum1 representation in already-copied tuples, so as to
3113                  * ensure a consistent representation (current tuple was just handled).
3114                  * Note that we rely on all tuples copied so far actually being
3115                  * contained within memtuples array.
3116                  */
3117                 for (i = 0; i < state->memtupcount; i++)
3118                 {
3119                         SortTuple *mtup = &state->memtuples[i];
3120
3121                         htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3122                                                         MINIMAL_TUPLE_OFFSET;
3123                         htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3124                                                         MINIMAL_TUPLE_OFFSET);
3125
3126                         mtup->datum1 = heap_getattr(&htup,
3127                                                                                 state->sortKeys[0].ssup_attno,
3128                                                                                 state->tupDesc,
3129                                                                                 &mtup->isnull1);
3130                 }
3131         }
3132 }
3133
3134 static void
3135 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3136 {
3137         MinimalTuple tuple = (MinimalTuple) stup->tuple;
3138
3139         /* the part of the MinimalTuple we'll write: */
3140         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3141         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3142
3143         /* total on-disk footprint: */
3144         unsigned int tuplen = tupbodylen + sizeof(int);
3145
3146         LogicalTapeWrite(state->tapeset, tapenum,
3147                                          (void *) &tuplen, sizeof(tuplen));
3148         LogicalTapeWrite(state->tapeset, tapenum,
3149                                          (void *) tupbody, tupbodylen);
3150         if (state->randomAccess)        /* need trailing length word? */
3151                 LogicalTapeWrite(state->tapeset, tapenum,
3152                                                  (void *) &tuplen, sizeof(tuplen));
3153
3154         FREEMEM(state, GetMemoryChunkSpace(tuple));
3155         heap_free_minimal_tuple(tuple);
3156 }
3157
3158 static void
3159 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3160                          int tapenum, unsigned int len)
3161 {
3162         unsigned int tupbodylen = len - sizeof(int);
3163         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3164         MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
3165         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3166         HeapTupleData htup;
3167
3168         USEMEM(state, GetMemoryChunkSpace(tuple));
3169         /* read in the tuple proper */
3170         tuple->t_len = tuplen;
3171         LogicalTapeReadExact(state->tapeset, tapenum,
3172                                                  tupbody, tupbodylen);
3173         if (state->randomAccess)        /* need trailing length word? */
3174                 LogicalTapeReadExact(state->tapeset, tapenum,
3175                                                          &tuplen, sizeof(tuplen));
3176         stup->tuple = (void *) tuple;
3177         /* set up first-column key value */
3178         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3179         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3180         stup->datum1 = heap_getattr(&htup,
3181                                                                 state->sortKeys[0].ssup_attno,
3182                                                                 state->tupDesc,
3183                                                                 &stup->isnull1);
3184 }
3185
3186 /*
3187  * Routines specialized for the CLUSTER case (HeapTuple data, with
3188  * comparisons per a btree index definition)
3189  */
3190
3191 static int
3192 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3193                                    Tuplesortstate *state)
3194 {
3195         SortSupport     sortKey = state->sortKeys;
3196         HeapTuple       ltup;
3197         HeapTuple       rtup;
3198         TupleDesc       tupDesc;
3199         int                     nkey;
3200         int32           compare;
3201         Datum           datum1,
3202                                 datum2;
3203         bool            isnull1,
3204                                 isnull2;
3205         AttrNumber      leading = state->indexInfo->ii_KeyAttrNumbers[0];
3206
3207         /* Be prepared to compare additional sort keys */
3208         ltup = (HeapTuple) a->tuple;
3209         rtup = (HeapTuple) b->tuple;
3210         tupDesc = state->tupDesc;
3211
3212         /* Compare the leading sort key, if it's simple */
3213         if (leading != 0)
3214         {
3215                 compare = ApplySortComparator(a->datum1, a->isnull1,
3216                                                                           b->datum1, b->isnull1,
3217                                                                           sortKey);
3218                 if (compare != 0)
3219                         return compare;
3220
3221                 if (sortKey->abbrev_converter)
3222                 {
3223                         datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3224                         datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3225
3226                         compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3227                                                                                                         datum2, isnull2,
3228                                                                                                         sortKey);
3229                 }
3230                 if (compare != 0 || state->nKeys == 1)
3231                         return compare;
3232                 /* Compare additional columns the hard way */
3233                 sortKey++;
3234                 nkey = 1;
3235         }
3236         else
3237         {
3238                 /* Must compare all keys the hard way */
3239                 nkey = 0;
3240         }
3241
3242         if (state->indexInfo->ii_Expressions == NULL)
3243         {
3244                 /* If not expression index, just compare the proper heap attrs */
3245
3246                 for (; nkey < state->nKeys; nkey++, sortKey++)
3247                 {
3248                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3249
3250                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3251                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3252
3253                         compare = ApplySortComparator(datum1, isnull1,
3254                                                                                   datum2, isnull2,
3255                                                                                   sortKey);
3256                         if (compare != 0)
3257                                 return compare;
3258                 }
3259         }
3260         else
3261         {
3262                 /*
3263                  * In the expression index case, compute the whole index tuple and
3264                  * then compare values.  It would perhaps be faster to compute only as
3265                  * many columns as we need to compare, but that would require
3266                  * duplicating all the logic in FormIndexDatum.
3267                  */
3268                 Datum           l_index_values[INDEX_MAX_KEYS];
3269                 bool            l_index_isnull[INDEX_MAX_KEYS];
3270                 Datum           r_index_values[INDEX_MAX_KEYS];
3271                 bool            r_index_isnull[INDEX_MAX_KEYS];
3272                 TupleTableSlot *ecxt_scantuple;
3273
3274                 /* Reset context each time to prevent memory leakage */
3275                 ResetPerTupleExprContext(state->estate);
3276
3277                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3278
3279                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3280                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3281                                            l_index_values, l_index_isnull);
3282
3283                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3284                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3285                                            r_index_values, r_index_isnull);
3286
3287                 for (; nkey < state->nKeys; nkey++, sortKey++)
3288                 {
3289                         compare = ApplySortComparator(l_index_values[nkey],
3290                                                                                   l_index_isnull[nkey],
3291                                                                                   r_index_values[nkey],
3292                                                                                   r_index_isnull[nkey],
3293                                                                                   sortKey);
3294                         if (compare != 0)
3295                                 return compare;
3296                 }
3297         }
3298
3299         return 0;
3300 }
3301
3302 static void
3303 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3304 {
3305         HeapTuple       tuple = (HeapTuple) tup;
3306         Datum           original;
3307
3308         /* copy the tuple into sort storage */
3309         tuple = heap_copytuple(tuple);
3310         stup->tuple = (void *) tuple;
3311         USEMEM(state, GetMemoryChunkSpace(tuple));
3312         /*
3313          * set up first-column key value, and potentially abbreviate, if it's a
3314          * simple column
3315          */
3316         if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3317                 return;
3318
3319         original = heap_getattr(tuple,
3320                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3321                                                         state->tupDesc,
3322                                                         &stup->isnull1);
3323
3324         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3325         {
3326                 /*
3327                  * Store ordinary Datum representation, or NULL value.  If there is a
3328                  * converter it won't expect NULL values, and cost model is not
3329                  * required to account for NULL, so in that case we avoid calling
3330                  * converter and just set datum1 to "void" representation (to be
3331                  * consistent).
3332                  */
3333                 stup->datum1 = original;
3334         }
3335         else if (!consider_abort_common(state))
3336         {
3337                 /* Store abbreviated key representation */
3338                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3339                                                                                                                  state->sortKeys);
3340         }
3341         else
3342         {
3343                 /* Abort abbreviation */
3344                 int             i;
3345
3346                 stup->datum1 = original;
3347
3348                 /*
3349                  * Set state to be consistent with never trying abbreviation.
3350                  *
3351                  * Alter datum1 representation in already-copied tuples, so as to
3352                  * ensure a consistent representation (current tuple was just handled).
3353                  * Note that we rely on all tuples copied so far actually being
3354                  * contained within memtuples array.
3355                  */
3356                 for (i = 0; i < state->memtupcount; i++)
3357                 {
3358                         SortTuple *mtup = &state->memtuples[i];
3359
3360                         tuple = (HeapTuple) mtup->tuple;
3361                         mtup->datum1 = heap_getattr(tuple,
3362                                                                                 state->indexInfo->ii_KeyAttrNumbers[0],
3363                                                                                 state->tupDesc,
3364                                                                                 &stup->isnull1);
3365                 }
3366         }
3367 }
3368
3369 static void
3370 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3371 {
3372         HeapTuple       tuple = (HeapTuple) stup->tuple;
3373         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3374
3375         /* We need to store t_self, but not other fields of HeapTupleData */
3376         LogicalTapeWrite(state->tapeset, tapenum,
3377                                          &tuplen, sizeof(tuplen));
3378         LogicalTapeWrite(state->tapeset, tapenum,
3379                                          &tuple->t_self, sizeof(ItemPointerData));
3380         LogicalTapeWrite(state->tapeset, tapenum,
3381                                          tuple->t_data, tuple->t_len);
3382         if (state->randomAccess)        /* need trailing length word? */
3383                 LogicalTapeWrite(state->tapeset, tapenum,
3384                                                  &tuplen, sizeof(tuplen));
3385
3386         FREEMEM(state, GetMemoryChunkSpace(tuple));
3387         heap_freetuple(tuple);
3388 }
3389
3390 static void
3391 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
3392                                 int tapenum, unsigned int tuplen)
3393 {
3394         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
3395         HeapTuple       tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE);
3396
3397         USEMEM(state, GetMemoryChunkSpace(tuple));
3398         /* Reconstruct the HeapTupleData header */
3399         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
3400         tuple->t_len = t_len;
3401         LogicalTapeReadExact(state->tapeset, tapenum,
3402                                                  &tuple->t_self, sizeof(ItemPointerData));
3403         /* We don't currently bother to reconstruct t_tableOid */
3404         tuple->t_tableOid = InvalidOid;
3405         /* Read in the tuple body */
3406         LogicalTapeReadExact(state->tapeset, tapenum,
3407                                                  tuple->t_data, tuple->t_len);
3408         if (state->randomAccess)        /* need trailing length word? */
3409                 LogicalTapeReadExact(state->tapeset, tapenum,
3410                                                          &tuplen, sizeof(tuplen));
3411         stup->tuple = (void *) tuple;
3412         /* set up first-column key value, if it's a simple column */
3413         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
3414                 stup->datum1 = heap_getattr(tuple,
3415                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3416                                                                         state->tupDesc,
3417                                                                         &stup->isnull1);
3418 }
3419
3420
3421 /*
3422  * Routines specialized for IndexTuple case
3423  *
3424  * The btree and hash cases require separate comparison functions, but the
3425  * IndexTuple representation is the same so the copy/write/read support
3426  * functions can be shared.
3427  */
3428
3429 static int
3430 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
3431                                            Tuplesortstate *state)
3432 {
3433         /*
3434          * This is similar to comparetup_heap(), but expects index tuples.  There
3435          * is also special handling for enforcing uniqueness, and special treatment
3436          * for equal keys at the end.
3437          */
3438         SortSupport     sortKey = state->sortKeys;
3439         IndexTuple      tuple1;
3440         IndexTuple      tuple2;
3441         int                     keysz;
3442         TupleDesc       tupDes;
3443         bool            equal_hasnull = false;
3444         int                     nkey;
3445         int32           compare;
3446         Datum           datum1,
3447                                 datum2;
3448         bool            isnull1,
3449                                 isnull2;
3450
3451
3452         /* Compare the leading sort key */
3453         compare = ApplySortComparator(a->datum1, a->isnull1,
3454                                                                   b->datum1, b->isnull1,
3455                                                                   sortKey);
3456         if (compare != 0)
3457                 return compare;
3458
3459         /* Compare additional sort keys */
3460         tuple1 = (IndexTuple) a->tuple;
3461         tuple2 = (IndexTuple) b->tuple;
3462         keysz = state->nKeys;
3463         tupDes = RelationGetDescr(state->indexRel);
3464
3465         if (sortKey->abbrev_converter)
3466         {
3467                 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
3468                 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
3469
3470                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3471                                                                                                 datum2, isnull2,
3472                                                                                                 sortKey);
3473                 if (compare != 0)
3474                         return compare;
3475         }
3476
3477         /* they are equal, so we only need to examine one null flag */
3478         if (a->isnull1)
3479                 equal_hasnull = true;
3480
3481         sortKey++;
3482         for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
3483         {
3484                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
3485                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
3486
3487                 compare = ApplySortComparator(datum1, isnull1,
3488                                                                           datum2, isnull2,
3489                                                                           sortKey);
3490                 if (compare != 0)
3491                         return compare;         /* done when we find unequal attributes */
3492
3493                 /* they are equal, so we only need to examine one null flag */
3494                 if (isnull1)
3495                         equal_hasnull = true;
3496         }
3497
3498         /*
3499          * If btree has asked us to enforce uniqueness, complain if two equal
3500          * tuples are detected (unless there was at least one NULL field).
3501          *
3502          * It is sufficient to make the test here, because if two tuples are equal
3503          * they *must* get compared at some stage of the sort --- otherwise the
3504          * sort algorithm wouldn't have checked whether one must appear before the
3505          * other.
3506          */
3507         if (state->enforceUnique && !equal_hasnull)
3508         {
3509                 Datum           values[INDEX_MAX_KEYS];
3510                 bool            isnull[INDEX_MAX_KEYS];
3511
3512                 /*
3513                  * Some rather brain-dead implementations of qsort (such as the one in
3514                  * QNX 4) will sometimes call the comparison routine to compare a
3515                  * value to itself, but we always use our own implementation, which
3516                  * does not.
3517                  */
3518                 Assert(tuple1 != tuple2);
3519
3520                 index_deform_tuple(tuple1, tupDes, values, isnull);
3521                 ereport(ERROR,
3522                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
3523                                  errmsg("could not create unique index \"%s\"",
3524                                                 RelationGetRelationName(state->indexRel)),
3525                                  errdetail("Key %s is duplicated.",
3526                                                    BuildIndexValueDescription(state->indexRel,
3527                                                                                                           values, isnull)),
3528                                  errtableconstraint(state->heapRel,
3529                                                                  RelationGetRelationName(state->indexRel))));
3530         }
3531
3532         /*
3533          * If key values are equal, we sort on ItemPointer.  This does not affect
3534          * validity of the finished index, but it may be useful to have index
3535          * scans in physical order.
3536          */
3537         {
3538                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3539                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3540
3541                 if (blk1 != blk2)
3542                         return (blk1 < blk2) ? -1 : 1;
3543         }
3544         {
3545                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3546                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3547
3548                 if (pos1 != pos2)
3549                         return (pos1 < pos2) ? -1 : 1;
3550         }
3551
3552         return 0;
3553 }
3554
3555 static int
3556 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
3557                                           Tuplesortstate *state)
3558 {
3559         uint32          hash1;
3560         uint32          hash2;
3561         IndexTuple      tuple1;
3562         IndexTuple      tuple2;
3563
3564         /*
3565          * Fetch hash keys and mask off bits we don't want to sort by. We know
3566          * that the first column of the index tuple is the hash key.
3567          */
3568         Assert(!a->isnull1);
3569         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
3570         Assert(!b->isnull1);
3571         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
3572
3573         if (hash1 > hash2)
3574                 return 1;
3575         else if (hash1 < hash2)
3576                 return -1;
3577
3578         /*
3579          * If hash values are equal, we sort on ItemPointer.  This does not affect
3580          * validity of the finished index, but it may be useful to have index
3581          * scans in physical order.
3582          */
3583         tuple1 = (IndexTuple) a->tuple;
3584         tuple2 = (IndexTuple) b->tuple;
3585
3586         {
3587                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3588                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3589
3590                 if (blk1 != blk2)
3591                         return (blk1 < blk2) ? -1 : 1;
3592         }
3593         {
3594                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3595                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3596
3597                 if (pos1 != pos2)
3598                         return (pos1 < pos2) ? -1 : 1;
3599         }
3600
3601         return 0;
3602 }
3603
3604 static void
3605 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
3606 {
3607         IndexTuple      tuple = (IndexTuple) tup;
3608         unsigned int tuplen = IndexTupleSize(tuple);
3609         IndexTuple      newtuple;
3610         Datum           original;
3611
3612         /* copy the tuple into sort storage */
3613         newtuple = (IndexTuple) palloc(tuplen);
3614         memcpy(newtuple, tuple, tuplen);
3615         USEMEM(state, GetMemoryChunkSpace(newtuple));
3616         stup->tuple = (void *) newtuple;
3617         /* set up first-column key value */
3618         original = index_getattr(newtuple,
3619                                                          1,
3620                                                          RelationGetDescr(state->indexRel),
3621                                                          &stup->isnull1);
3622
3623         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3624         {
3625                 /*
3626                  * Store ordinary Datum representation, or NULL value.  If there is a
3627                  * converter it won't expect NULL values, and cost model is not
3628                  * required to account for NULL, so in that case we avoid calling
3629                  * converter and just set datum1 to "void" representation (to be
3630                  * consistent).
3631                  */
3632                 stup->datum1 = original;
3633         }
3634         else if (!consider_abort_common(state))
3635         {
3636                 /* Store abbreviated key representation */
3637                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3638                                                                                                                  state->sortKeys);
3639         }
3640         else
3641         {
3642                 /* Abort abbreviation */
3643                 int             i;
3644
3645                 stup->datum1 = original;
3646
3647                 /*
3648                  * Set state to be consistent with never trying abbreviation.
3649                  *
3650                  * Alter datum1 representation in already-copied tuples, so as to
3651                  * ensure a consistent representation (current tuple was just handled).
3652                  * Note that we rely on all tuples copied so far actually being
3653                  * contained within memtuples array.
3654                  */
3655                 for (i = 0; i < state->memtupcount; i++)
3656                 {
3657                         SortTuple *mtup = &state->memtuples[i];
3658
3659                         tuple = (IndexTuple) mtup->tuple;
3660                         mtup->datum1 = index_getattr(tuple,
3661                                                                                  1,
3662                                                                                  RelationGetDescr(state->indexRel),
3663                                                                                  &stup->isnull1);
3664                 }
3665         }
3666 }
3667
3668 static void
3669 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
3670 {
3671         IndexTuple      tuple = (IndexTuple) stup->tuple;
3672         unsigned int tuplen;
3673
3674         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
3675         LogicalTapeWrite(state->tapeset, tapenum,
3676                                          (void *) &tuplen, sizeof(tuplen));
3677         LogicalTapeWrite(state->tapeset, tapenum,
3678                                          (void *) tuple, IndexTupleSize(tuple));
3679         if (state->randomAccess)        /* need trailing length word? */
3680                 LogicalTapeWrite(state->tapeset, tapenum,
3681                                                  (void *) &tuplen, sizeof(tuplen));
3682
3683         FREEMEM(state, GetMemoryChunkSpace(tuple));
3684         pfree(tuple);
3685 }
3686
3687 static void
3688 readtup_index(Tuplesortstate *state, SortTuple *stup,
3689                           int tapenum, unsigned int len)
3690 {
3691         unsigned int tuplen = len - sizeof(unsigned int);
3692         IndexTuple      tuple = (IndexTuple) palloc(tuplen);
3693
3694         USEMEM(state, GetMemoryChunkSpace(tuple));
3695         LogicalTapeReadExact(state->tapeset, tapenum,
3696                                                  tuple, tuplen);
3697         if (state->randomAccess)        /* need trailing length word? */
3698                 LogicalTapeReadExact(state->tapeset, tapenum,
3699                                                          &tuplen, sizeof(tuplen));
3700         stup->tuple = (void *) tuple;
3701         /* set up first-column key value */
3702         stup->datum1 = index_getattr(tuple,
3703                                                                  1,
3704                                                                  RelationGetDescr(state->indexRel),
3705                                                                  &stup->isnull1);
3706 }
3707
3708 /*
3709  * Routines specialized for DatumTuple case
3710  */
3711
3712 static int
3713 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3714 {
3715         return ApplySortComparator(a->datum1, a->isnull1,
3716                                                            b->datum1, b->isnull1,
3717                                                            state->onlyKey);
3718 }
3719
3720 static void
3721 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
3722 {
3723         /* Not currently needed */
3724         elog(ERROR, "copytup_datum() should not be called");
3725 }
3726
3727 static void
3728 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
3729 {
3730         void       *waddr;
3731         unsigned int tuplen;
3732         unsigned int writtenlen;
3733
3734         if (stup->isnull1)
3735         {
3736                 waddr = NULL;
3737                 tuplen = 0;
3738         }
3739         else if (state->datumTypeByVal)
3740         {
3741                 waddr = &stup->datum1;
3742                 tuplen = sizeof(Datum);
3743         }
3744         else
3745         {
3746                 waddr = DatumGetPointer(stup->datum1);
3747                 tuplen = datumGetSize(stup->datum1, false, state->datumTypeLen);
3748                 Assert(tuplen != 0);
3749         }
3750
3751         writtenlen = tuplen + sizeof(unsigned int);
3752
3753         LogicalTapeWrite(state->tapeset, tapenum,
3754                                          (void *) &writtenlen, sizeof(writtenlen));
3755         LogicalTapeWrite(state->tapeset, tapenum,
3756                                          waddr, tuplen);
3757         if (state->randomAccess)        /* need trailing length word? */
3758                 LogicalTapeWrite(state->tapeset, tapenum,
3759                                                  (void *) &writtenlen, sizeof(writtenlen));
3760
3761         if (stup->tuple)
3762         {
3763                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3764                 pfree(stup->tuple);
3765         }
3766 }
3767
3768 static void
3769 readtup_datum(Tuplesortstate *state, SortTuple *stup,
3770                           int tapenum, unsigned int len)
3771 {
3772         unsigned int tuplen = len - sizeof(unsigned int);
3773
3774         if (tuplen == 0)
3775         {
3776                 /* it's NULL */
3777                 stup->datum1 = (Datum) 0;
3778                 stup->isnull1 = true;
3779                 stup->tuple = NULL;
3780         }
3781         else if (state->datumTypeByVal)
3782         {
3783                 Assert(tuplen == sizeof(Datum));
3784                 LogicalTapeReadExact(state->tapeset, tapenum,
3785                                                          &stup->datum1, tuplen);
3786                 stup->isnull1 = false;
3787                 stup->tuple = NULL;
3788         }
3789         else
3790         {
3791                 void       *raddr = palloc(tuplen);
3792
3793                 LogicalTapeReadExact(state->tapeset, tapenum,
3794                                                          raddr, tuplen);
3795                 stup->datum1 = PointerGetDatum(raddr);
3796                 stup->isnull1 = false;
3797                 stup->tuple = raddr;
3798                 USEMEM(state, GetMemoryChunkSpace(raddr));
3799         }
3800
3801         if (state->randomAccess)        /* need trailing length word? */
3802                 LogicalTapeReadExact(state->tapeset, tapenum,
3803                                                          &tuplen, sizeof(tuplen));
3804 }
3805
3806 /*
3807  * Convenience routine to free a tuple previously loaded into sort memory
3808  */
3809 static void
3810 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
3811 {
3812         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3813         pfree(stup->tuple);
3814 }