]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Fix accounting of memory needed for merge heap.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  Historically, we divided the input into sorted runs
15  * using replacement selection, in the form of a priority tree implemented
16  * as a heap (essentially his Algorithm 5.2.3H), but now we only do that
17  * for the first run, and only if the run would otherwise end up being very
18  * short.  We merge the runs using polyphase merge, Knuth's Algorithm
19  * 5.4.2D.  The logical "tapes" used by Algorithm D are implemented by
20  * logtape.c, which avoids space wastage by recycling disk space as soon
21  * as each block is read from its "tape".
22  *
23  * We do not use Knuth's recommended data structure (Algorithm 5.4.1R) for
24  * the replacement selection, because it uses a fixed number of records
25  * in memory at all times.  Since we are dealing with tuples that may vary
26  * considerably in size, we want to be able to vary the number of records
27  * kept in memory to ensure full utilization of the allowed sort memory
28  * space.  So, we keep the tuples in a variable-size heap, with the next
29  * record to go out at the top of the heap.  Like Algorithm 5.4.1R, each
30  * record is stored with the run number that it must go into, and we use
31  * (run number, key) as the ordering key for the heap.  When the run number
32  * at the top of the heap changes, we know that no more records of the prior
33  * run are left in the heap.  Note that there are in practice only ever two
34  * distinct run numbers, because since PostgreSQL 9.6, we only use
35  * replacement selection to form the first run.
36  *
37  * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small
38  * customizations) is only used with the aim of producing just one run,
39  * thereby avoiding all merging.  Only the first run can use replacement
40  * selection, which is why there are now only two possible valid run
41  * numbers, and why heapification is customized to not distinguish between
42  * tuples in the second run (those will be quicksorted).  We generally
43  * prefer a simple hybrid sort-merge strategy, where runs are sorted in much
44  * the same way as the entire input of an internal sort is sorted (using
45  * qsort()).  The replacement_sort_tuples GUC controls the limited remaining
46  * use of replacement selection for the first run.
47  *
48  * There are several reasons to favor a hybrid sort-merge strategy.
49  * Maintaining a priority tree/heap has poor CPU cache characteristics.
50  * Furthermore, the growth in main memory sizes has greatly diminished the
51  * value of having runs that are larger than available memory, even in the
52  * case where there is partially sorted input and runs can be made far
53  * larger by using a heap.  In most cases, a single-pass merge step is all
54  * that is required even when runs are no larger than available memory.
55  * Avoiding multiple merge passes was traditionally considered to be the
56  * major advantage of using replacement selection.
57  *
58  * The approximate amount of memory allowed for any one sort operation
59  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
60  * we absorb tuples and simply store them in an unsorted array as long as
61  * we haven't exceeded workMem.  If we reach the end of the input without
62  * exceeding workMem, we sort the array using qsort() and subsequently return
63  * tuples just by scanning the tuple array sequentially.  If we do exceed
64  * workMem, we begin to emit tuples into sorted runs in temporary tapes.
65  * When tuples are dumped in batch after quicksorting, we begin a new run
66  * with a new output tape (selected per Algorithm D).  After the end of the
67  * input is reached, we dump out remaining tuples in memory into a final run
68  * (or two, when replacement selection is still used), then merge the runs
69  * using Algorithm D.
70  *
71  * When merging runs, we use a heap containing just the frontmost tuple from
72  * each source run; we repeatedly output the smallest tuple and replace it
73  * with the next tuple from its source tape (if any).  When the heap empties,
74  * the merge is complete.  The basic merge algorithm thus needs very little
75  * memory --- only M tuples for an M-way merge, and M is constrained to a
76  * small number.  However, we can still make good use of our full workMem
77  * allocation by pre-reading additional blocks from each source tape.  Without
78  * prereading, our access pattern to the temporary file would be very erratic;
79  * on average we'd read one block from each of M source tapes during the same
80  * time that we're writing M blocks to the output tape, so there is no
81  * sequentiality of access at all, defeating the read-ahead methods used by
82  * most Unix kernels.  Worse, the output tape gets written into a very random
83  * sequence of blocks of the temp file, ensuring that things will be even
84  * worse when it comes time to read that tape.  A straightforward merge pass
85  * thus ends up doing a lot of waiting for disk seeks.  We can improve matters
86  * by prereading from each source tape sequentially, loading about workMem/M
87  * bytes from each tape in turn, and making the sequential blocks immediately
88  * available for reuse.  This approach helps to localize both read and write
89  * accesses.  The pre-reading is handled by logtape.c, we just tell it how
90  * much memory to use for the buffers.
91  *
92  * When the caller requests random access to the sort result, we form
93  * the final sorted run on a logical tape which is then "frozen", so
94  * that we can access it randomly.  When the caller does not need random
95  * access, we return from tuplesort_performsort() as soon as we are down
96  * to one run per logical tape.  The final merge is then performed
97  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
98  * saves one cycle of writing all the data out to disk and reading it in.
99  *
100  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
101  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
102  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
103  * tape drives are expensive beasts, and in particular that there will always
104  * be many more runs than tape drives.  In our implementation a "tape drive"
105  * doesn't cost much more than a few Kb of memory buffers, so we can afford
106  * to have lots of them.  In particular, if we can have as many tape drives
107  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
108  * code we determine the number of tapes M on the basis of workMem: we want
109  * workMem/M to be large enough that we read a fair amount of data each time
110  * we preread from a tape, so as to maintain the locality of access described
111  * above.  Nonetheless, with large workMem we can have many tapes (but not
112  * too many -- see the comments in tuplesort_merge_order).
113  *
114  *
115  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
116  * Portions Copyright (c) 1994, Regents of the University of California
117  *
118  * IDENTIFICATION
119  *        src/backend/utils/sort/tuplesort.c
120  *
121  *-------------------------------------------------------------------------
122  */
123
124 #include "postgres.h"
125
126 #include <limits.h>
127
128 #include "access/htup_details.h"
129 #include "access/nbtree.h"
130 #include "catalog/index.h"
131 #include "catalog/pg_am.h"
132 #include "commands/tablespace.h"
133 #include "executor/executor.h"
134 #include "miscadmin.h"
135 #include "pg_trace.h"
136 #include "utils/datum.h"
137 #include "utils/logtape.h"
138 #include "utils/lsyscache.h"
139 #include "utils/memutils.h"
140 #include "utils/pg_rusage.h"
141 #include "utils/rel.h"
142 #include "utils/sortsupport.h"
143 #include "utils/tuplesort.h"
144
145
146 /* sort-type codes for sort__start probes */
147 #define HEAP_SORT               0
148 #define INDEX_SORT              1
149 #define DATUM_SORT              2
150 #define CLUSTER_SORT    3
151
152 /* GUC variables */
153 #ifdef TRACE_SORT
154 bool            trace_sort = false;
155 #endif
156
157 #ifdef DEBUG_BOUNDED_SORT
158 bool            optimize_bounded_sort = true;
159 #endif
160
161
162 /*
163  * The objects we actually sort are SortTuple structs.  These contain
164  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
165  * which is a separate palloc chunk --- we assume it is just one chunk and
166  * can be freed by a simple pfree() (except during merge, when we use a
167  * simple slab allocator).  SortTuples also contain the tuple's first key
168  * column in Datum/nullflag format, and an index integer.
169  *
170  * Storing the first key column lets us save heap_getattr or index_getattr
171  * calls during tuple comparisons.  We could extract and save all the key
172  * columns not just the first, but this would increase code complexity and
173  * overhead, and wouldn't actually save any comparison cycles in the common
174  * case where the first key determines the comparison result.  Note that
175  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
176  *
177  * There is one special case: when the sort support infrastructure provides an
178  * "abbreviated key" representation, where the key is (typically) a pass by
179  * value proxy for a pass by reference type.  In this case, the abbreviated key
180  * is stored in datum1 in place of the actual first key column.
181  *
182  * When sorting single Datums, the data value is represented directly by
183  * datum1/isnull1 for pass by value types (or null values).  If the datatype is
184  * pass-by-reference and isnull1 is false, then "tuple" points to a separately
185  * palloc'd data value, otherwise "tuple" is NULL.  The value of datum1 is then
186  * either the same pointer as "tuple", or is an abbreviated key value as
187  * described above.  Accordingly, "tuple" is always used in preference to
188  * datum1 as the authoritative value for pass-by-reference cases.
189  *
190  * While building initial runs, tupindex holds the tuple's run number.
191  * Historically, the run number could meaningfully distinguish many runs, but
192  * it now only distinguishes RUN_FIRST and HEAP_RUN_NEXT, since replacement
193  * selection is always abandoned after the first run; no other run number
194  * should be represented here.  During merge passes, we re-use it to hold the
195  * input tape number that each tuple in the heap was read from.  tupindex goes
196  * unused if the sort occurs entirely in memory.
197  */
198 typedef struct
199 {
200         void       *tuple;                      /* the tuple itself */
201         Datum           datum1;                 /* value of first key column */
202         bool            isnull1;                /* is first key column NULL? */
203         int                     tupindex;               /* see notes above */
204 } SortTuple;
205
206 /*
207  * During merge, we use a pre-allocated set of fixed-size slots to hold
208  * tuples.  To avoid palloc/pfree overhead.
209  *
210  * Merge doesn't require a lot of memory, so we can afford to waste some,
211  * by using gratuitously-sized slots.  If a tuple is larger than 1 kB, the
212  * palloc() overhead is not significant anymore.
213  *
214  * 'nextfree' is valid when this chunk is in the free list.  When in use, the
215  * slot holds a tuple.
216  */
217 #define SLAB_SLOT_SIZE 1024
218
219 typedef union SlabSlot
220 {
221         union SlabSlot *nextfree;
222         char            buffer[SLAB_SLOT_SIZE];
223 } SlabSlot;
224
225 /*
226  * Possible states of a Tuplesort object.  These denote the states that
227  * persist between calls of Tuplesort routines.
228  */
229 typedef enum
230 {
231         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
232         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
233         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
234         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
235         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
236         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
237 } TupSortStatus;
238
239 /*
240  * Parameters for calculation of number of tapes to use --- see inittapes()
241  * and tuplesort_merge_order().
242  *
243  * In this calculation we assume that each tape will cost us about 3 blocks
244  * worth of buffer space (which is an underestimate for very large data
245  * volumes, but it's probably close enough --- see logtape.c).
246  *
247  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
248  * tape during a preread cycle (see discussion at top of file).
249  */
250 #define MINORDER                6               /* minimum merge order */
251 #define MAXORDER                500             /* maximum merge order */
252 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
253 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
254
255  /*
256   * Run numbers, used during external sort operations.
257   *
258   * HEAP_RUN_NEXT is only used for SortTuple.tupindex, never state.currentRun.
259   */
260 #define RUN_FIRST               0
261 #define HEAP_RUN_NEXT   INT_MAX
262 #define RUN_SECOND              1
263
264 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
265                                                                                                 Tuplesortstate *state);
266
267 /*
268  * Private state of a Tuplesort operation.
269  */
270 struct Tuplesortstate
271 {
272         TupSortStatus status;           /* enumerated value as shown above */
273         int                     nKeys;                  /* number of columns in sort key */
274         bool            randomAccess;   /* did caller request random access? */
275         bool            bounded;                /* did caller specify a maximum number of
276                                                                  * tuples to return? */
277         bool            boundUsed;              /* true if we made use of a bounded heap */
278         int                     bound;                  /* if bounded, the maximum number of tuples */
279         bool            tuples;                 /* Can SortTuple.tuple ever be set? */
280         int64           availMem;               /* remaining memory available, in bytes */
281         int64           allowedMem;             /* total memory allowed, in bytes */
282         int                     maxTapes;               /* number of tapes (Knuth's T) */
283         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
284         MemoryContext sortcontext;      /* memory context holding most sort data */
285         MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
286         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
287
288         /*
289          * These function pointers decouple the routines that must know what kind
290          * of tuple we are sorting from the routines that don't need to know it.
291          * They are set up by the tuplesort_begin_xxx routines.
292          *
293          * Function to compare two tuples; result is per qsort() convention, ie:
294          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
295          * qsort_arg_comparator.
296          */
297         SortTupleComparator comparetup;
298
299         /*
300          * Function to copy a supplied input tuple into palloc'd space and set up
301          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
302          * state->availMem must be decreased by the amount of space used for the
303          * tuple copy (note the SortTuple struct itself is not counted).
304          */
305         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
306
307         /*
308          * Function to write a stored tuple onto tape.  The representation of the
309          * tuple on tape need not be the same as it is in memory; requirements on
310          * the tape representation are given below.  Unless the slab allocator is
311          * used, after writing the tuple, pfree() the out-of-line data (not the
312          * SortTuple struct!), and increase state->availMem by the amount of
313          * memory space thereby released.
314          */
315         void            (*writetup) (Tuplesortstate *state, int tapenum,
316                                                                                  SortTuple *stup);
317
318         /*
319          * Function to read a stored tuple from tape back into memory. 'len' is
320          * the already-read length of the stored tuple.  The tuple is allocated
321          * from the slab memory arena, or is palloc'd, see readtup_alloc().
322          */
323         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
324                                                                                 int tapenum, unsigned int len);
325
326         /*
327          * This array holds the tuples now in sort memory.  If we are in state
328          * INITIAL, the tuples are in no particular order; if we are in state
329          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
330          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
331          * H.  In state SORTEDONTAPE, the array is not used.
332          */
333         SortTuple  *memtuples;          /* array of SortTuple structs */
334         int                     memtupcount;    /* number of tuples currently present */
335         int                     memtupsize;             /* allocated length of memtuples array */
336         bool            growmemtuples;  /* memtuples' growth still underway? */
337
338         /*
339          * Memory for tuples is sometimes allocated using a simple slab allocator,
340          * rather than with palloc().  Currently, we switch to slab allocation
341          * when we start merging.  Merging only needs to keep a small, fixed
342          * number of tuples in memory at any time, so we can avoid the
343          * palloc/pfree overhead by recycling a fixed number of fixed-size slots
344          * to hold the tuples.
345          *
346          * For the slab, we use one large allocation, divided into SLAB_SLOT_SIZE
347          * slots.  The allocation is sized to have one slot per tape, plus one
348          * additional slot.  We need that many slots to hold all the tuples kept
349          * in the heap during merge, plus the one we have last returned from the
350          * sort, with tuplesort_gettuple.
351          *
352          * Initially, all the slots are kept in a linked list of free slots.  When
353          * a tuple is read from a tape, it is put to the next available slot, if
354          * it fits.  If the tuple is larger than SLAB_SLOT_SIZE, it is palloc'd
355          * instead.
356          *
357          * When we're done processing a tuple, we return the slot back to the free
358          * list, or pfree() if it was palloc'd.  We know that a tuple was
359          * allocated from the slab, if its pointer value is between
360          * slabMemoryBegin and -End.
361          *
362          * When the slab allocator is used, the USEMEM/LACKMEM mechanism of
363          * tracking memory usage is not used.
364          */
365         bool            slabAllocatorUsed;
366
367         char       *slabMemoryBegin;    /* beginning of slab memory arena */
368         char       *slabMemoryEnd;      /* end of slab memory arena */
369         SlabSlot   *slabFreeHead;       /* head of free list */
370
371         /* Buffer size to use for reading input tapes, during merge. */
372         size_t          read_buffer_size;
373
374         /*
375          * When we return a tuple to the caller in tuplesort_gettuple_XXX, that
376          * came from a tape (that is, in TSS_SORTEDONTAPE or TSS_FINALMERGE
377          * modes), we remember the tuple in 'lastReturnedTuple', so that we can
378          * recycle the memory on next gettuple call.
379          */
380         void       *lastReturnedTuple;
381
382         /*
383          * While building initial runs, this indicates if the replacement
384          * selection strategy is in use.  When it isn't, then a simple hybrid
385          * sort-merge strategy is in use instead (runs are quicksorted).
386          */
387         bool            replaceActive;
388
389         /*
390          * While building initial runs, this is the current output run number
391          * (starting at RUN_FIRST).  Afterwards, it is the number of initial runs
392          * we made.
393          */
394         int                     currentRun;
395
396         /*
397          * Unless otherwise noted, all pointer variables below are pointers to
398          * arrays of length maxTapes, holding per-tape data.
399          */
400
401         /*
402          * This variable is only used during merge passes.  mergeactive[i] is true
403          * if we are reading an input run from (actual) tape number i and have not
404          * yet exhausted that run.
405          */
406         bool       *mergeactive;        /* active input run source? */
407
408         /*
409          * Variables for Algorithm D.  Note that destTape is a "logical" tape
410          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
411          * "logical" and "actual" tape numbers straight!
412          */
413         int                     Level;                  /* Knuth's l */
414         int                     destTape;               /* current output tape (Knuth's j, less 1) */
415         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
416         int                *tp_runs;            /* # of real runs on each tape */
417         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
418         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
419         int                     activeTapes;    /* # of active input tapes in merge pass */
420
421         /*
422          * These variables are used after completion of sorting to keep track of
423          * the next tuple to return.  (In the tape case, the tape's current read
424          * position is also critical state.)
425          */
426         int                     result_tape;    /* actual tape number of finished output */
427         int                     current;                /* array index (only used if SORTEDINMEM) */
428         bool            eof_reached;    /* reached EOF (needed for cursors) */
429
430         /* markpos_xxx holds marked position for mark and restore */
431         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
432         int                     markpos_offset; /* saved "current", or offset in tape block */
433         bool            markpos_eof;    /* saved "eof_reached" */
434
435         /*
436          * The sortKeys variable is used by every case other than the hash index
437          * case; it is set by tuplesort_begin_xxx.  tupDesc is only used by the
438          * MinimalTuple and CLUSTER routines, though.
439          */
440         TupleDesc       tupDesc;
441         SortSupport sortKeys;           /* array of length nKeys */
442
443         /*
444          * This variable is shared by the single-key MinimalTuple case and the
445          * Datum case (which both use qsort_ssup()).  Otherwise it's NULL.
446          */
447         SortSupport onlyKey;
448
449         /*
450          * Additional state for managing "abbreviated key" sortsupport routines
451          * (which currently may be used by all cases except the hash index case).
452          * Tracks the intervals at which the optimization's effectiveness is
453          * tested.
454          */
455         int64           abbrevNext;             /* Tuple # at which to next check
456                                                                  * applicability */
457
458         /*
459          * These variables are specific to the CLUSTER case; they are set by
460          * tuplesort_begin_cluster.
461          */
462         IndexInfo  *indexInfo;          /* info about index being used for reference */
463         EState     *estate;                     /* for evaluating index expressions */
464
465         /*
466          * These variables are specific to the IndexTuple case; they are set by
467          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
468          */
469         Relation        heapRel;                /* table the index is being built on */
470         Relation        indexRel;               /* index being built */
471
472         /* These are specific to the index_btree subcase: */
473         bool            enforceUnique;  /* complain if we find duplicate tuples */
474
475         /* These are specific to the index_hash subcase: */
476         uint32          hash_mask;              /* mask for sortable part of hash code */
477
478         /*
479          * These variables are specific to the Datum case; they are set by
480          * tuplesort_begin_datum and used only by the DatumTuple routines.
481          */
482         Oid                     datumType;
483         /* we need typelen in order to know how to copy the Datums. */
484         int                     datumTypeLen;
485
486         /*
487          * Resource snapshot for time of sort start.
488          */
489 #ifdef TRACE_SORT
490         PGRUsage        ru_start;
491 #endif
492 };
493
494 /*
495  * Is the given tuple allocated from the slab memory arena?
496  */
497 #define IS_SLAB_SLOT(state, tuple) \
498         ((char *) (tuple) >= (state)->slabMemoryBegin && \
499          (char *) (tuple) < (state)->slabMemoryEnd)
500
501 /*
502  * Return the given tuple to the slab memory free list, or free it
503  * if it was palloc'd.
504  */
505 #define RELEASE_SLAB_SLOT(state, tuple) \
506         do { \
507                 SlabSlot *buf = (SlabSlot *) tuple; \
508                 \
509                 if (IS_SLAB_SLOT((state), buf)) \
510                 { \
511                         buf->nextfree = (state)->slabFreeHead; \
512                         (state)->slabFreeHead = buf; \
513                 } else \
514                         pfree(buf); \
515         } while(0)
516
517 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
518 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
519 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
520 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
521 #define LACKMEM(state)          ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
522 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
523 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
524
525 /*
526  * NOTES about on-tape representation of tuples:
527  *
528  * We require the first "unsigned int" of a stored tuple to be the total size
529  * on-tape of the tuple, including itself (so it is never zero; an all-zero
530  * unsigned int is used to delimit runs).  The remainder of the stored tuple
531  * may or may not match the in-memory representation of the tuple ---
532  * any conversion needed is the job of the writetup and readtup routines.
533  *
534  * If state->randomAccess is true, then the stored representation of the
535  * tuple must be followed by another "unsigned int" that is a copy of the
536  * length --- so the total tape space used is actually sizeof(unsigned int)
537  * more than the stored length value.  This allows read-backwards.  When
538  * randomAccess is not true, the write/read routines may omit the extra
539  * length word.
540  *
541  * writetup is expected to write both length words as well as the tuple
542  * data.  When readtup is called, the tape is positioned just after the
543  * front length word; readtup must read the tuple data and advance past
544  * the back length word (if present).
545  *
546  * The write/read routines can make use of the tuple description data
547  * stored in the Tuplesortstate record, if needed.  They are also expected
548  * to adjust state->availMem by the amount of memory space (not tape space!)
549  * released or consumed.  There is no error return from either writetup
550  * or readtup; they should ereport() on failure.
551  *
552  *
553  * NOTES about memory consumption calculations:
554  *
555  * We count space allocated for tuples against the workMem limit, plus
556  * the space used by the variable-size memtuples array.  Fixed-size space
557  * is not counted; it's small enough to not be interesting.
558  *
559  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
560  * rather than the originally-requested size.  This is important since
561  * palloc can add substantial overhead.  It's not a complete answer since
562  * we won't count any wasted space in palloc allocation blocks, but it's
563  * a lot better than what we were doing before 7.3.  As of 9.6, a
564  * separate memory context is used for caller passed tuples.  Resetting
565  * it at certain key increments significantly ameliorates fragmentation.
566  * Note that this places a responsibility on readtup and copytup routines
567  * to use the right memory context for these tuples (and to not use the
568  * reset context for anything whose lifetime needs to span multiple
569  * external sort runs).
570  */
571
572 /* When using this macro, beware of double evaluation of len */
573 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
574         do { \
575                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
576                         elog(ERROR, "unexpected end of data"); \
577         } while(0)
578
579
580 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
581 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
582 static bool consider_abort_common(Tuplesortstate *state);
583 static bool useselection(Tuplesortstate *state);
584 static void inittapes(Tuplesortstate *state);
585 static void selectnewtape(Tuplesortstate *state);
586 static void init_slab_allocator(Tuplesortstate *state, int numSlots);
587 static void mergeruns(Tuplesortstate *state);
588 static void mergeonerun(Tuplesortstate *state);
589 static void beginmerge(Tuplesortstate *state);
590 static bool mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup);
591 static void dumptuples(Tuplesortstate *state, bool alltuples);
592 static void dumpbatch(Tuplesortstate *state, bool alltuples);
593 static void make_bounded_heap(Tuplesortstate *state);
594 static void sort_bounded_heap(Tuplesortstate *state);
595 static void tuplesort_sort_memtuples(Tuplesortstate *state);
596 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
597                                           bool checkIndex);
598 static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
599                                                    bool checkIndex);
600 static void tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex);
601 static void reversedirection(Tuplesortstate *state);
602 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
603 static void markrunend(Tuplesortstate *state, int tapenum);
604 static void *readtup_alloc(Tuplesortstate *state, Size tuplen);
605 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
606                                 Tuplesortstate *state);
607 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
608 static void writetup_heap(Tuplesortstate *state, int tapenum,
609                           SortTuple *stup);
610 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
611                          int tapenum, unsigned int len);
612 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
613                                    Tuplesortstate *state);
614 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
615 static void writetup_cluster(Tuplesortstate *state, int tapenum,
616                                  SortTuple *stup);
617 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
618                                 int tapenum, unsigned int len);
619 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
620                                            Tuplesortstate *state);
621 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
622                                           Tuplesortstate *state);
623 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
624 static void writetup_index(Tuplesortstate *state, int tapenum,
625                            SortTuple *stup);
626 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
627                           int tapenum, unsigned int len);
628 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
629                                  Tuplesortstate *state);
630 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
631 static void writetup_datum(Tuplesortstate *state, int tapenum,
632                            SortTuple *stup);
633 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
634                           int tapenum, unsigned int len);
635 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
636
637 /*
638  * Special versions of qsort just for SortTuple objects.  qsort_tuple() sorts
639  * any variant of SortTuples, using the appropriate comparetup function.
640  * qsort_ssup() is specialized for the case where the comparetup function
641  * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
642  * and Datum sorts.
643  */
644 #include "qsort_tuple.c"
645
646
647 /*
648  *              tuplesort_begin_xxx
649  *
650  * Initialize for a tuple sort operation.
651  *
652  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
653  * zero or more times, then call tuplesort_performsort when all the tuples
654  * have been supplied.  After performsort, retrieve the tuples in sorted
655  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
656  * access was requested, rescan, markpos, and restorepos can also be called.)
657  * Call tuplesort_end to terminate the operation and release memory/disk space.
658  *
659  * Each variant of tuplesort_begin has a workMem parameter specifying the
660  * maximum number of kilobytes of RAM to use before spilling data to disk.
661  * (The normal value of this parameter is work_mem, but some callers use
662  * other values.)  Each variant also has a randomAccess parameter specifying
663  * whether the caller needs non-sequential access to the sort result.
664  */
665
666 static Tuplesortstate *
667 tuplesort_begin_common(int workMem, bool randomAccess)
668 {
669         Tuplesortstate *state;
670         MemoryContext sortcontext;
671         MemoryContext tuplecontext;
672         MemoryContext oldcontext;
673
674         /*
675          * Create a working memory context for this sort operation. All data
676          * needed by the sort will live inside this context.
677          */
678         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
679                                                                                 "TupleSort main",
680                                                                                 ALLOCSET_DEFAULT_SIZES);
681
682         /*
683          * Caller tuple (e.g. IndexTuple) memory context.
684          *
685          * A dedicated child context used exclusively for caller passed tuples
686          * eases memory management.  Resetting at key points reduces
687          * fragmentation. Note that the memtuples array of SortTuples is allocated
688          * in the parent context, not this context, because there is no need to
689          * free memtuples early.
690          */
691         tuplecontext = AllocSetContextCreate(sortcontext,
692                                                                                  "Caller tuples",
693                                                                                  ALLOCSET_DEFAULT_SIZES);
694
695         /*
696          * Make the Tuplesortstate within the per-sort context.  This way, we
697          * don't need a separate pfree() operation for it at shutdown.
698          */
699         oldcontext = MemoryContextSwitchTo(sortcontext);
700
701         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
702
703 #ifdef TRACE_SORT
704         if (trace_sort)
705                 pg_rusage_init(&state->ru_start);
706 #endif
707
708         state->status = TSS_INITIAL;
709         state->randomAccess = randomAccess;
710         state->bounded = false;
711         state->tuples = true;
712         state->boundUsed = false;
713         state->allowedMem = workMem * (int64) 1024;
714         state->availMem = state->allowedMem;
715         state->sortcontext = sortcontext;
716         state->tuplecontext = tuplecontext;
717         state->tapeset = NULL;
718
719         state->memtupcount = 0;
720
721         /*
722          * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
723          * see comments in grow_memtuples().
724          */
725         state->memtupsize = Max(1024,
726                                                 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
727
728         state->growmemtuples = true;
729         state->slabAllocatorUsed = false;
730         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
731
732         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
733
734         /* workMem must be large enough for the minimal memtuples array */
735         if (LACKMEM(state))
736                 elog(ERROR, "insufficient memory allowed for sort");
737
738         state->currentRun = RUN_FIRST;
739
740         /*
741          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
742          * inittapes(), if needed
743          */
744
745         state->result_tape = -1;        /* flag that result tape has not been formed */
746
747         MemoryContextSwitchTo(oldcontext);
748
749         return state;
750 }
751
752 Tuplesortstate *
753 tuplesort_begin_heap(TupleDesc tupDesc,
754                                          int nkeys, AttrNumber *attNums,
755                                          Oid *sortOperators, Oid *sortCollations,
756                                          bool *nullsFirstFlags,
757                                          int workMem, bool randomAccess)
758 {
759         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
760         MemoryContext oldcontext;
761         int                     i;
762
763         oldcontext = MemoryContextSwitchTo(state->sortcontext);
764
765         AssertArg(nkeys > 0);
766
767 #ifdef TRACE_SORT
768         if (trace_sort)
769                 elog(LOG,
770                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
771                          nkeys, workMem, randomAccess ? 't' : 'f');
772 #endif
773
774         state->nKeys = nkeys;
775
776         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
777                                                                 false,  /* no unique check */
778                                                                 nkeys,
779                                                                 workMem,
780                                                                 randomAccess);
781
782         state->comparetup = comparetup_heap;
783         state->copytup = copytup_heap;
784         state->writetup = writetup_heap;
785         state->readtup = readtup_heap;
786
787         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
788         state->abbrevNext = 10;
789
790         /* Prepare SortSupport data for each column */
791         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
792
793         for (i = 0; i < nkeys; i++)
794         {
795                 SortSupport sortKey = state->sortKeys + i;
796
797                 AssertArg(attNums[i] != 0);
798                 AssertArg(sortOperators[i] != 0);
799
800                 sortKey->ssup_cxt = CurrentMemoryContext;
801                 sortKey->ssup_collation = sortCollations[i];
802                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
803                 sortKey->ssup_attno = attNums[i];
804                 /* Convey if abbreviation optimization is applicable in principle */
805                 sortKey->abbreviate = (i == 0);
806
807                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
808         }
809
810         /*
811          * The "onlyKey" optimization cannot be used with abbreviated keys, since
812          * tie-breaker comparisons may be required.  Typically, the optimization
813          * is only of value to pass-by-value types anyway, whereas abbreviated
814          * keys are typically only of value to pass-by-reference types.
815          */
816         if (nkeys == 1 && !state->sortKeys->abbrev_converter)
817                 state->onlyKey = state->sortKeys;
818
819         MemoryContextSwitchTo(oldcontext);
820
821         return state;
822 }
823
824 Tuplesortstate *
825 tuplesort_begin_cluster(TupleDesc tupDesc,
826                                                 Relation indexRel,
827                                                 int workMem, bool randomAccess)
828 {
829         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
830         ScanKey         indexScanKey;
831         MemoryContext oldcontext;
832         int                     i;
833
834         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
835
836         oldcontext = MemoryContextSwitchTo(state->sortcontext);
837
838 #ifdef TRACE_SORT
839         if (trace_sort)
840                 elog(LOG,
841                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
842                          RelationGetNumberOfAttributes(indexRel),
843                          workMem, randomAccess ? 't' : 'f');
844 #endif
845
846         state->nKeys = RelationGetNumberOfAttributes(indexRel);
847
848         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
849                                                                 false,  /* no unique check */
850                                                                 state->nKeys,
851                                                                 workMem,
852                                                                 randomAccess);
853
854         state->comparetup = comparetup_cluster;
855         state->copytup = copytup_cluster;
856         state->writetup = writetup_cluster;
857         state->readtup = readtup_cluster;
858         state->abbrevNext = 10;
859
860         state->indexInfo = BuildIndexInfo(indexRel);
861
862         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
863
864         indexScanKey = _bt_mkscankey_nodata(indexRel);
865
866         if (state->indexInfo->ii_Expressions != NULL)
867         {
868                 TupleTableSlot *slot;
869                 ExprContext *econtext;
870
871                 /*
872                  * We will need to use FormIndexDatum to evaluate the index
873                  * expressions.  To do that, we need an EState, as well as a
874                  * TupleTableSlot to put the table tuples into.  The econtext's
875                  * scantuple has to point to that slot, too.
876                  */
877                 state->estate = CreateExecutorState();
878                 slot = MakeSingleTupleTableSlot(tupDesc);
879                 econtext = GetPerTupleExprContext(state->estate);
880                 econtext->ecxt_scantuple = slot;
881         }
882
883         /* Prepare SortSupport data for each column */
884         state->sortKeys = (SortSupport) palloc0(state->nKeys *
885                                                                                         sizeof(SortSupportData));
886
887         for (i = 0; i < state->nKeys; i++)
888         {
889                 SortSupport sortKey = state->sortKeys + i;
890                 ScanKey         scanKey = indexScanKey + i;
891                 int16           strategy;
892
893                 sortKey->ssup_cxt = CurrentMemoryContext;
894                 sortKey->ssup_collation = scanKey->sk_collation;
895                 sortKey->ssup_nulls_first =
896                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
897                 sortKey->ssup_attno = scanKey->sk_attno;
898                 /* Convey if abbreviation optimization is applicable in principle */
899                 sortKey->abbreviate = (i == 0);
900
901                 AssertState(sortKey->ssup_attno != 0);
902
903                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
904                         BTGreaterStrategyNumber : BTLessStrategyNumber;
905
906                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
907         }
908
909         _bt_freeskey(indexScanKey);
910
911         MemoryContextSwitchTo(oldcontext);
912
913         return state;
914 }
915
916 Tuplesortstate *
917 tuplesort_begin_index_btree(Relation heapRel,
918                                                         Relation indexRel,
919                                                         bool enforceUnique,
920                                                         int workMem, bool randomAccess)
921 {
922         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
923         ScanKey         indexScanKey;
924         MemoryContext oldcontext;
925         int                     i;
926
927         oldcontext = MemoryContextSwitchTo(state->sortcontext);
928
929 #ifdef TRACE_SORT
930         if (trace_sort)
931                 elog(LOG,
932                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
933                          enforceUnique ? 't' : 'f',
934                          workMem, randomAccess ? 't' : 'f');
935 #endif
936
937         state->nKeys = RelationGetNumberOfAttributes(indexRel);
938
939         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
940                                                                 enforceUnique,
941                                                                 state->nKeys,
942                                                                 workMem,
943                                                                 randomAccess);
944
945         state->comparetup = comparetup_index_btree;
946         state->copytup = copytup_index;
947         state->writetup = writetup_index;
948         state->readtup = readtup_index;
949         state->abbrevNext = 10;
950
951         state->heapRel = heapRel;
952         state->indexRel = indexRel;
953         state->enforceUnique = enforceUnique;
954
955         indexScanKey = _bt_mkscankey_nodata(indexRel);
956         state->nKeys = RelationGetNumberOfAttributes(indexRel);
957
958         /* Prepare SortSupport data for each column */
959         state->sortKeys = (SortSupport) palloc0(state->nKeys *
960                                                                                         sizeof(SortSupportData));
961
962         for (i = 0; i < state->nKeys; i++)
963         {
964                 SortSupport sortKey = state->sortKeys + i;
965                 ScanKey         scanKey = indexScanKey + i;
966                 int16           strategy;
967
968                 sortKey->ssup_cxt = CurrentMemoryContext;
969                 sortKey->ssup_collation = scanKey->sk_collation;
970                 sortKey->ssup_nulls_first =
971                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
972                 sortKey->ssup_attno = scanKey->sk_attno;
973                 /* Convey if abbreviation optimization is applicable in principle */
974                 sortKey->abbreviate = (i == 0);
975
976                 AssertState(sortKey->ssup_attno != 0);
977
978                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
979                         BTGreaterStrategyNumber : BTLessStrategyNumber;
980
981                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
982         }
983
984         _bt_freeskey(indexScanKey);
985
986         MemoryContextSwitchTo(oldcontext);
987
988         return state;
989 }
990
991 Tuplesortstate *
992 tuplesort_begin_index_hash(Relation heapRel,
993                                                    Relation indexRel,
994                                                    uint32 hash_mask,
995                                                    int workMem, bool randomAccess)
996 {
997         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
998         MemoryContext oldcontext;
999
1000         oldcontext = MemoryContextSwitchTo(state->sortcontext);
1001
1002 #ifdef TRACE_SORT
1003         if (trace_sort)
1004                 elog(LOG,
1005                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
1006                          hash_mask,
1007                          workMem, randomAccess ? 't' : 'f');
1008 #endif
1009
1010         state->nKeys = 1;                       /* Only one sort column, the hash code */
1011
1012         state->comparetup = comparetup_index_hash;
1013         state->copytup = copytup_index;
1014         state->writetup = writetup_index;
1015         state->readtup = readtup_index;
1016
1017         state->heapRel = heapRel;
1018         state->indexRel = indexRel;
1019
1020         state->hash_mask = hash_mask;
1021
1022         MemoryContextSwitchTo(oldcontext);
1023
1024         return state;
1025 }
1026
1027 Tuplesortstate *
1028 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
1029                                           bool nullsFirstFlag,
1030                                           int workMem, bool randomAccess)
1031 {
1032         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
1033         MemoryContext oldcontext;
1034         int16           typlen;
1035         bool            typbyval;
1036
1037         oldcontext = MemoryContextSwitchTo(state->sortcontext);
1038
1039 #ifdef TRACE_SORT
1040         if (trace_sort)
1041                 elog(LOG,
1042                          "begin datum sort: workMem = %d, randomAccess = %c",
1043                          workMem, randomAccess ? 't' : 'f');
1044 #endif
1045
1046         state->nKeys = 1;                       /* always a one-column sort */
1047
1048         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
1049                                                                 false,  /* no unique check */
1050                                                                 1,
1051                                                                 workMem,
1052                                                                 randomAccess);
1053
1054         state->comparetup = comparetup_datum;
1055         state->copytup = copytup_datum;
1056         state->writetup = writetup_datum;
1057         state->readtup = readtup_datum;
1058         state->abbrevNext = 10;
1059
1060         state->datumType = datumType;
1061
1062         /* lookup necessary attributes of the datum type */
1063         get_typlenbyval(datumType, &typlen, &typbyval);
1064         state->datumTypeLen = typlen;
1065         state->tuples = !typbyval;
1066
1067         /* Prepare SortSupport data */
1068         state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
1069
1070         state->sortKeys->ssup_cxt = CurrentMemoryContext;
1071         state->sortKeys->ssup_collation = sortCollation;
1072         state->sortKeys->ssup_nulls_first = nullsFirstFlag;
1073
1074         /*
1075          * Abbreviation is possible here only for by-reference types.  In theory,
1076          * a pass-by-value datatype could have an abbreviated form that is cheaper
1077          * to compare.  In a tuple sort, we could support that, because we can
1078          * always extract the original datum from the tuple is needed.  Here, we
1079          * can't, because a datum sort only stores a single copy of the datum; the
1080          * "tuple" field of each sortTuple is NULL.
1081          */
1082         state->sortKeys->abbreviate = !typbyval;
1083
1084         PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
1085
1086         /*
1087          * The "onlyKey" optimization cannot be used with abbreviated keys, since
1088          * tie-breaker comparisons may be required.  Typically, the optimization
1089          * is only of value to pass-by-value types anyway, whereas abbreviated
1090          * keys are typically only of value to pass-by-reference types.
1091          */
1092         if (!state->sortKeys->abbrev_converter)
1093                 state->onlyKey = state->sortKeys;
1094
1095         MemoryContextSwitchTo(oldcontext);
1096
1097         return state;
1098 }
1099
1100 /*
1101  * tuplesort_set_bound
1102  *
1103  *      Advise tuplesort that at most the first N result tuples are required.
1104  *
1105  * Must be called before inserting any tuples.  (Actually, we could allow it
1106  * as long as the sort hasn't spilled to disk, but there seems no need for
1107  * delayed calls at the moment.)
1108  *
1109  * This is a hint only. The tuplesort may still return more tuples than
1110  * requested.
1111  */
1112 void
1113 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
1114 {
1115         /* Assert we're called before loading any tuples */
1116         Assert(state->status == TSS_INITIAL);
1117         Assert(state->memtupcount == 0);
1118         Assert(!state->bounded);
1119
1120 #ifdef DEBUG_BOUNDED_SORT
1121         /* Honor GUC setting that disables the feature (for easy testing) */
1122         if (!optimize_bounded_sort)
1123                 return;
1124 #endif
1125
1126         /* We want to be able to compute bound * 2, so limit the setting */
1127         if (bound > (int64) (INT_MAX / 2))
1128                 return;
1129
1130         state->bounded = true;
1131         state->bound = (int) bound;
1132
1133         /*
1134          * Bounded sorts are not an effective target for abbreviated key
1135          * optimization.  Disable by setting state to be consistent with no
1136          * abbreviation support.
1137          */
1138         state->sortKeys->abbrev_converter = NULL;
1139         if (state->sortKeys->abbrev_full_comparator)
1140                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
1141
1142         /* Not strictly necessary, but be tidy */
1143         state->sortKeys->abbrev_abort = NULL;
1144         state->sortKeys->abbrev_full_comparator = NULL;
1145 }
1146
1147 /*
1148  * tuplesort_end
1149  *
1150  *      Release resources and clean up.
1151  *
1152  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1153  * pointing to garbage.  Be careful not to attempt to use or free such
1154  * pointers afterwards!
1155  */
1156 void
1157 tuplesort_end(Tuplesortstate *state)
1158 {
1159         /* context swap probably not needed, but let's be safe */
1160         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1161
1162 #ifdef TRACE_SORT
1163         long            spaceUsed;
1164
1165         if (state->tapeset)
1166                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1167         else
1168                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1169 #endif
1170
1171         /*
1172          * Delete temporary "tape" files, if any.
1173          *
1174          * Note: want to include this in reported total cost of sort, hence need
1175          * for two #ifdef TRACE_SORT sections.
1176          */
1177         if (state->tapeset)
1178                 LogicalTapeSetClose(state->tapeset);
1179
1180 #ifdef TRACE_SORT
1181         if (trace_sort)
1182         {
1183                 if (state->tapeset)
1184                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
1185                                  spaceUsed, pg_rusage_show(&state->ru_start));
1186                 else
1187                         elog(LOG, "internal sort ended, %ld KB used: %s",
1188                                  spaceUsed, pg_rusage_show(&state->ru_start));
1189         }
1190
1191         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1192 #else
1193
1194         /*
1195          * If you disabled TRACE_SORT, you can still probe sort__done, but you
1196          * ain't getting space-used stats.
1197          */
1198         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1199 #endif
1200
1201         /* Free any execution state created for CLUSTER case */
1202         if (state->estate != NULL)
1203         {
1204                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1205
1206                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1207                 FreeExecutorState(state->estate);
1208         }
1209
1210         MemoryContextSwitchTo(oldcontext);
1211
1212         /*
1213          * Free the per-sort memory context, thereby releasing all working memory,
1214          * including the Tuplesortstate struct itself.
1215          */
1216         MemoryContextDelete(state->sortcontext);
1217 }
1218
1219 /*
1220  * Grow the memtuples[] array, if possible within our memory constraint.  We
1221  * must not exceed INT_MAX tuples in memory or the caller-provided memory
1222  * limit.  Return TRUE if we were able to enlarge the array, FALSE if not.
1223  *
1224  * Normally, at each increment we double the size of the array.  When doing
1225  * that would exceed a limit, we attempt one last, smaller increase (and then
1226  * clear the growmemtuples flag so we don't try any more).  That allows us to
1227  * use memory as fully as permitted; sticking to the pure doubling rule could
1228  * result in almost half going unused.  Because availMem moves around with
1229  * tuple addition/removal, we need some rule to prevent making repeated small
1230  * increases in memtupsize, which would just be useless thrashing.  The
1231  * growmemtuples flag accomplishes that and also prevents useless
1232  * recalculations in this function.
1233  */
1234 static bool
1235 grow_memtuples(Tuplesortstate *state)
1236 {
1237         int                     newmemtupsize;
1238         int                     memtupsize = state->memtupsize;
1239         int64           memNowUsed = state->allowedMem - state->availMem;
1240
1241         /* Forget it if we've already maxed out memtuples, per comment above */
1242         if (!state->growmemtuples)
1243                 return false;
1244
1245         /* Select new value of memtupsize */
1246         if (memNowUsed <= state->availMem)
1247         {
1248                 /*
1249                  * We've used no more than half of allowedMem; double our usage,
1250                  * clamping at INT_MAX tuples.
1251                  */
1252                 if (memtupsize < INT_MAX / 2)
1253                         newmemtupsize = memtupsize * 2;
1254                 else
1255                 {
1256                         newmemtupsize = INT_MAX;
1257                         state->growmemtuples = false;
1258                 }
1259         }
1260         else
1261         {
1262                 /*
1263                  * This will be the last increment of memtupsize.  Abandon doubling
1264                  * strategy and instead increase as much as we safely can.
1265                  *
1266                  * To stay within allowedMem, we can't increase memtupsize by more
1267                  * than availMem / sizeof(SortTuple) elements.  In practice, we want
1268                  * to increase it by considerably less, because we need to leave some
1269                  * space for the tuples to which the new array slots will refer.  We
1270                  * assume the new tuples will be about the same size as the tuples
1271                  * we've already seen, and thus we can extrapolate from the space
1272                  * consumption so far to estimate an appropriate new size for the
1273                  * memtuples array.  The optimal value might be higher or lower than
1274                  * this estimate, but it's hard to know that in advance.  We again
1275                  * clamp at INT_MAX tuples.
1276                  *
1277                  * This calculation is safe against enlarging the array so much that
1278                  * LACKMEM becomes true, because the memory currently used includes
1279                  * the present array; thus, there would be enough allowedMem for the
1280                  * new array elements even if no other memory were currently used.
1281                  *
1282                  * We do the arithmetic in float8, because otherwise the product of
1283                  * memtupsize and allowedMem could overflow.  Any inaccuracy in the
1284                  * result should be insignificant; but even if we computed a
1285                  * completely insane result, the checks below will prevent anything
1286                  * really bad from happening.
1287                  */
1288                 double          grow_ratio;
1289
1290                 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1291                 if (memtupsize * grow_ratio < INT_MAX)
1292                         newmemtupsize = (int) (memtupsize * grow_ratio);
1293                 else
1294                         newmemtupsize = INT_MAX;
1295
1296                 /* We won't make any further enlargement attempts */
1297                 state->growmemtuples = false;
1298         }
1299
1300         /* Must enlarge array by at least one element, else report failure */
1301         if (newmemtupsize <= memtupsize)
1302                 goto noalloc;
1303
1304         /*
1305          * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize.  Clamp
1306          * to ensure our request won't be rejected.  Note that we can easily
1307          * exhaust address space before facing this outcome.  (This is presently
1308          * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1309          * don't rely on that at this distance.)
1310          */
1311         if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1312         {
1313                 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1314                 state->growmemtuples = false;   /* can't grow any more */
1315         }
1316
1317         /*
1318          * We need to be sure that we do not cause LACKMEM to become true, else
1319          * the space management algorithm will go nuts.  The code above should
1320          * never generate a dangerous request, but to be safe, check explicitly
1321          * that the array growth fits within availMem.  (We could still cause
1322          * LACKMEM if the memory chunk overhead associated with the memtuples
1323          * array were to increase.  That shouldn't happen because we chose the
1324          * initial array size large enough to ensure that palloc will be treating
1325          * both old and new arrays as separate chunks.  But we'll check LACKMEM
1326          * explicitly below just in case.)
1327          */
1328         if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1329                 goto noalloc;
1330
1331         /* OK, do it */
1332         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1333         state->memtupsize = newmemtupsize;
1334         state->memtuples = (SortTuple *)
1335                 repalloc_huge(state->memtuples,
1336                                           state->memtupsize * sizeof(SortTuple));
1337         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1338         if (LACKMEM(state))
1339                 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1340         return true;
1341
1342 noalloc:
1343         /* If for any reason we didn't realloc, shut off future attempts */
1344         state->growmemtuples = false;
1345         return false;
1346 }
1347
1348 /*
1349  * Accept one tuple while collecting input data for sort.
1350  *
1351  * Note that the input data is always copied; the caller need not save it.
1352  */
1353 void
1354 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1355 {
1356         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1357         SortTuple       stup;
1358
1359         /*
1360          * Copy the given tuple into memory we control, and decrease availMem.
1361          * Then call the common code.
1362          */
1363         COPYTUP(state, &stup, (void *) slot);
1364
1365         puttuple_common(state, &stup);
1366
1367         MemoryContextSwitchTo(oldcontext);
1368 }
1369
1370 /*
1371  * Accept one tuple while collecting input data for sort.
1372  *
1373  * Note that the input data is always copied; the caller need not save it.
1374  */
1375 void
1376 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1377 {
1378         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1379         SortTuple       stup;
1380
1381         /*
1382          * Copy the given tuple into memory we control, and decrease availMem.
1383          * Then call the common code.
1384          */
1385         COPYTUP(state, &stup, (void *) tup);
1386
1387         puttuple_common(state, &stup);
1388
1389         MemoryContextSwitchTo(oldcontext);
1390 }
1391
1392 /*
1393  * Collect one index tuple while collecting input data for sort, building
1394  * it from caller-supplied values.
1395  */
1396 void
1397 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1398                                                           ItemPointer self, Datum *values,
1399                                                           bool *isnull)
1400 {
1401         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1402         SortTuple       stup;
1403         Datum           original;
1404         IndexTuple      tuple;
1405
1406         stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1407         tuple = ((IndexTuple) stup.tuple);
1408         tuple->t_tid = *self;
1409         USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1410         /* set up first-column key value */
1411         original = index_getattr(tuple,
1412                                                          1,
1413                                                          RelationGetDescr(state->indexRel),
1414                                                          &stup.isnull1);
1415
1416         MemoryContextSwitchTo(state->sortcontext);
1417
1418         if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1419         {
1420                 /*
1421                  * Store ordinary Datum representation, or NULL value.  If there is a
1422                  * converter it won't expect NULL values, and cost model is not
1423                  * required to account for NULL, so in that case we avoid calling
1424                  * converter and just set datum1 to zeroed representation (to be
1425                  * consistent, and to support cheap inequality tests for NULL
1426                  * abbreviated keys).
1427                  */
1428                 stup.datum1 = original;
1429         }
1430         else if (!consider_abort_common(state))
1431         {
1432                 /* Store abbreviated key representation */
1433                 stup.datum1 = state->sortKeys->abbrev_converter(original,
1434                                                                                                                 state->sortKeys);
1435         }
1436         else
1437         {
1438                 /* Abort abbreviation */
1439                 int                     i;
1440
1441                 stup.datum1 = original;
1442
1443                 /*
1444                  * Set state to be consistent with never trying abbreviation.
1445                  *
1446                  * Alter datum1 representation in already-copied tuples, so as to
1447                  * ensure a consistent representation (current tuple was just
1448                  * handled).  It does not matter if some dumped tuples are already
1449                  * sorted on tape, since serialized tuples lack abbreviated keys
1450                  * (TSS_BUILDRUNS state prevents control reaching here in any case).
1451                  */
1452                 for (i = 0; i < state->memtupcount; i++)
1453                 {
1454                         SortTuple  *mtup = &state->memtuples[i];
1455
1456                         tuple = mtup->tuple;
1457                         mtup->datum1 = index_getattr(tuple,
1458                                                                                  1,
1459                                                                                  RelationGetDescr(state->indexRel),
1460                                                                                  &mtup->isnull1);
1461                 }
1462         }
1463
1464         puttuple_common(state, &stup);
1465
1466         MemoryContextSwitchTo(oldcontext);
1467 }
1468
1469 /*
1470  * Accept one Datum while collecting input data for sort.
1471  *
1472  * If the Datum is pass-by-ref type, the value will be copied.
1473  */
1474 void
1475 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1476 {
1477         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1478         SortTuple       stup;
1479
1480         /*
1481          * Pass-by-value types or null values are just stored directly in
1482          * stup.datum1 (and stup.tuple is not used and set to NULL).
1483          *
1484          * Non-null pass-by-reference values need to be copied into memory we
1485          * control, and possibly abbreviated. The copied value is pointed to by
1486          * stup.tuple and is treated as the canonical copy (e.g. to return via
1487          * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1488          * abbreviated value if abbreviation is happening, otherwise it's
1489          * identical to stup.tuple.
1490          */
1491
1492         if (isNull || !state->tuples)
1493         {
1494                 /*
1495                  * Set datum1 to zeroed representation for NULLs (to be consistent,
1496                  * and to support cheap inequality tests for NULL abbreviated keys).
1497                  */
1498                 stup.datum1 = !isNull ? val : (Datum) 0;
1499                 stup.isnull1 = isNull;
1500                 stup.tuple = NULL;              /* no separate storage */
1501                 MemoryContextSwitchTo(state->sortcontext);
1502         }
1503         else
1504         {
1505                 Datum           original = datumCopy(val, false, state->datumTypeLen);
1506
1507                 stup.isnull1 = false;
1508                 stup.tuple = DatumGetPointer(original);
1509                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1510                 MemoryContextSwitchTo(state->sortcontext);
1511
1512                 if (!state->sortKeys->abbrev_converter)
1513                 {
1514                         stup.datum1 = original;
1515                 }
1516                 else if (!consider_abort_common(state))
1517                 {
1518                         /* Store abbreviated key representation */
1519                         stup.datum1 = state->sortKeys->abbrev_converter(original,
1520                                                                                                                         state->sortKeys);
1521                 }
1522                 else
1523                 {
1524                         /* Abort abbreviation */
1525                         int                     i;
1526
1527                         stup.datum1 = original;
1528
1529                         /*
1530                          * Set state to be consistent with never trying abbreviation.
1531                          *
1532                          * Alter datum1 representation in already-copied tuples, so as to
1533                          * ensure a consistent representation (current tuple was just
1534                          * handled).  It does not matter if some dumped tuples are already
1535                          * sorted on tape, since serialized tuples lack abbreviated keys
1536                          * (TSS_BUILDRUNS state prevents control reaching here in any
1537                          * case).
1538                          */
1539                         for (i = 0; i < state->memtupcount; i++)
1540                         {
1541                                 SortTuple  *mtup = &state->memtuples[i];
1542
1543                                 mtup->datum1 = PointerGetDatum(mtup->tuple);
1544                         }
1545                 }
1546         }
1547
1548         puttuple_common(state, &stup);
1549
1550         MemoryContextSwitchTo(oldcontext);
1551 }
1552
1553 /*
1554  * Shared code for tuple and datum cases.
1555  */
1556 static void
1557 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1558 {
1559         switch (state->status)
1560         {
1561                 case TSS_INITIAL:
1562
1563                         /*
1564                          * Save the tuple into the unsorted array.  First, grow the array
1565                          * as needed.  Note that we try to grow the array when there is
1566                          * still one free slot remaining --- if we fail, there'll still be
1567                          * room to store the incoming tuple, and then we'll switch to
1568                          * tape-based operation.
1569                          */
1570                         if (state->memtupcount >= state->memtupsize - 1)
1571                         {
1572                                 (void) grow_memtuples(state);
1573                                 Assert(state->memtupcount < state->memtupsize);
1574                         }
1575                         state->memtuples[state->memtupcount++] = *tuple;
1576
1577                         /*
1578                          * Check if it's time to switch over to a bounded heapsort. We do
1579                          * so if the input tuple count exceeds twice the desired tuple
1580                          * count (this is a heuristic for where heapsort becomes cheaper
1581                          * than a quicksort), or if we've just filled workMem and have
1582                          * enough tuples to meet the bound.
1583                          *
1584                          * Note that once we enter TSS_BOUNDED state we will always try to
1585                          * complete the sort that way.  In the worst case, if later input
1586                          * tuples are larger than earlier ones, this might cause us to
1587                          * exceed workMem significantly.
1588                          */
1589                         if (state->bounded &&
1590                                 (state->memtupcount > state->bound * 2 ||
1591                                  (state->memtupcount > state->bound && LACKMEM(state))))
1592                         {
1593 #ifdef TRACE_SORT
1594                                 if (trace_sort)
1595                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1596                                                  state->memtupcount,
1597                                                  pg_rusage_show(&state->ru_start));
1598 #endif
1599                                 make_bounded_heap(state);
1600                                 return;
1601                         }
1602
1603                         /*
1604                          * Done if we still fit in available memory and have array slots.
1605                          */
1606                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1607                                 return;
1608
1609                         /*
1610                          * Nope; time to switch to tape-based operation.
1611                          */
1612                         inittapes(state);
1613
1614                         /*
1615                          * Dump tuples until we are back under the limit.
1616                          */
1617                         dumptuples(state, false);
1618                         break;
1619
1620                 case TSS_BOUNDED:
1621
1622                         /*
1623                          * We don't want to grow the array here, so check whether the new
1624                          * tuple can be discarded before putting it in.  This should be a
1625                          * good speed optimization, too, since when there are many more
1626                          * input tuples than the bound, most input tuples can be discarded
1627                          * with just this one comparison.  Note that because we currently
1628                          * have the sort direction reversed, we must check for <= not >=.
1629                          */
1630                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1631                         {
1632                                 /* new tuple <= top of the heap, so we can discard it */
1633                                 free_sort_tuple(state, tuple);
1634                                 CHECK_FOR_INTERRUPTS();
1635                         }
1636                         else
1637                         {
1638                                 /* discard top of heap, replacing it with the new tuple */
1639                                 free_sort_tuple(state, &state->memtuples[0]);
1640                                 tuple->tupindex = 0;    /* not used */
1641                                 tuplesort_heap_replace_top(state, tuple, false);
1642                         }
1643                         break;
1644
1645                 case TSS_BUILDRUNS:
1646
1647                         /*
1648                          * Insert the tuple into the heap, with run number currentRun if
1649                          * it can go into the current run, else HEAP_RUN_NEXT.  The tuple
1650                          * can go into the current run if it is >= the first
1651                          * not-yet-output tuple.  (Actually, it could go into the current
1652                          * run if it is >= the most recently output tuple ... but that
1653                          * would require keeping around the tuple we last output, and it's
1654                          * simplest to let writetup free each tuple as soon as it's
1655                          * written.)
1656                          *
1657                          * Note that this only applies when:
1658                          *
1659                          * - currentRun is RUN_FIRST
1660                          *
1661                          * - Replacement selection is in use (typically it is never used).
1662                          *
1663                          * When these two conditions are not both true, all tuples are
1664                          * appended indifferently, much like the TSS_INITIAL case.
1665                          *
1666                          * There should always be room to store the incoming tuple.
1667                          */
1668                         Assert(!state->replaceActive || state->memtupcount > 0);
1669                         if (state->replaceActive &&
1670                                 COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1671                         {
1672                                 Assert(state->currentRun == RUN_FIRST);
1673
1674                                 /*
1675                                  * Insert tuple into first, fully heapified run.
1676                                  *
1677                                  * Unlike classic replacement selection, which this module was
1678                                  * previously based on, only RUN_FIRST tuples are fully
1679                                  * heapified.  Any second/next run tuples are appended
1680                                  * indifferently.  While HEAP_RUN_NEXT tuples may be sifted
1681                                  * out of the way of first run tuples, COMPARETUP() will never
1682                                  * be called for the run's tuples during sifting (only our
1683                                  * initial COMPARETUP() call is required for the tuple, to
1684                                  * determine that the tuple does not belong in RUN_FIRST).
1685                                  */
1686                                 tuple->tupindex = state->currentRun;
1687                                 tuplesort_heap_insert(state, tuple, true);
1688                         }
1689                         else
1690                         {
1691                                 /*
1692                                  * Tuple was determined to not belong to heapified RUN_FIRST,
1693                                  * or replacement selection not in play.  Append the tuple to
1694                                  * memtuples indifferently.
1695                                  *
1696                                  * dumptuples() does not trust that the next run's tuples are
1697                                  * heapified.  Anything past the first run will always be
1698                                  * quicksorted even when replacement selection is initially
1699                                  * used.  (When it's never used, every tuple still takes this
1700                                  * path.)
1701                                  */
1702                                 tuple->tupindex = HEAP_RUN_NEXT;
1703                                 state->memtuples[state->memtupcount++] = *tuple;
1704                         }
1705
1706                         /*
1707                          * If we are over the memory limit, dump tuples till we're under.
1708                          */
1709                         dumptuples(state, false);
1710                         break;
1711
1712                 default:
1713                         elog(ERROR, "invalid tuplesort state");
1714                         break;
1715         }
1716 }
1717
1718 static bool
1719 consider_abort_common(Tuplesortstate *state)
1720 {
1721         Assert(state->sortKeys[0].abbrev_converter != NULL);
1722         Assert(state->sortKeys[0].abbrev_abort != NULL);
1723         Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1724
1725         /*
1726          * Check effectiveness of abbreviation optimization.  Consider aborting
1727          * when still within memory limit.
1728          */
1729         if (state->status == TSS_INITIAL &&
1730                 state->memtupcount >= state->abbrevNext)
1731         {
1732                 state->abbrevNext *= 2;
1733
1734                 /*
1735                  * Check opclass-supplied abbreviation abort routine.  It may indicate
1736                  * that abbreviation should not proceed.
1737                  */
1738                 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1739                                                                                    state->sortKeys))
1740                         return false;
1741
1742                 /*
1743                  * Finally, restore authoritative comparator, and indicate that
1744                  * abbreviation is not in play by setting abbrev_converter to NULL
1745                  */
1746                 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1747                 state->sortKeys[0].abbrev_converter = NULL;
1748                 /* Not strictly necessary, but be tidy */
1749                 state->sortKeys[0].abbrev_abort = NULL;
1750                 state->sortKeys[0].abbrev_full_comparator = NULL;
1751
1752                 /* Give up - expect original pass-by-value representation */
1753                 return true;
1754         }
1755
1756         return false;
1757 }
1758
1759 /*
1760  * All tuples have been provided; finish the sort.
1761  */
1762 void
1763 tuplesort_performsort(Tuplesortstate *state)
1764 {
1765         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1766
1767 #ifdef TRACE_SORT
1768         if (trace_sort)
1769                 elog(LOG, "performsort starting: %s",
1770                          pg_rusage_show(&state->ru_start));
1771 #endif
1772
1773         switch (state->status)
1774         {
1775                 case TSS_INITIAL:
1776
1777                         /*
1778                          * We were able to accumulate all the tuples within the allowed
1779                          * amount of memory.  Just qsort 'em and we're done.
1780                          */
1781                         tuplesort_sort_memtuples(state);
1782                         state->current = 0;
1783                         state->eof_reached = false;
1784                         state->markpos_offset = 0;
1785                         state->markpos_eof = false;
1786                         state->status = TSS_SORTEDINMEM;
1787                         break;
1788
1789                 case TSS_BOUNDED:
1790
1791                         /*
1792                          * We were able to accumulate all the tuples required for output
1793                          * in memory, using a heap to eliminate excess tuples.  Now we
1794                          * have to transform the heap to a properly-sorted array.
1795                          */
1796                         sort_bounded_heap(state);
1797                         state->current = 0;
1798                         state->eof_reached = false;
1799                         state->markpos_offset = 0;
1800                         state->markpos_eof = false;
1801                         state->status = TSS_SORTEDINMEM;
1802                         break;
1803
1804                 case TSS_BUILDRUNS:
1805
1806                         /*
1807                          * Finish tape-based sort.  First, flush all tuples remaining in
1808                          * memory out to tape; then merge until we have a single remaining
1809                          * run (or, if !randomAccess, one run per tape). Note that
1810                          * mergeruns sets the correct state->status.
1811                          */
1812                         dumptuples(state, true);
1813                         mergeruns(state);
1814                         state->eof_reached = false;
1815                         state->markpos_block = 0L;
1816                         state->markpos_offset = 0;
1817                         state->markpos_eof = false;
1818                         break;
1819
1820                 default:
1821                         elog(ERROR, "invalid tuplesort state");
1822                         break;
1823         }
1824
1825 #ifdef TRACE_SORT
1826         if (trace_sort)
1827         {
1828                 if (state->status == TSS_FINALMERGE)
1829                         elog(LOG, "performsort done (except %d-way final merge): %s",
1830                                  state->activeTapes,
1831                                  pg_rusage_show(&state->ru_start));
1832                 else
1833                         elog(LOG, "performsort done: %s",
1834                                  pg_rusage_show(&state->ru_start));
1835         }
1836 #endif
1837
1838         MemoryContextSwitchTo(oldcontext);
1839 }
1840
1841 /*
1842  * Internal routine to fetch the next tuple in either forward or back
1843  * direction into *stup.  Returns FALSE if no more tuples.
1844  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1845  * Otherwise, caller should not use tuple following next call here.
1846  */
1847 static bool
1848 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1849                                                   SortTuple *stup, bool *should_free)
1850 {
1851         unsigned int tuplen;
1852
1853         switch (state->status)
1854         {
1855                 case TSS_SORTEDINMEM:
1856                         Assert(forward || state->randomAccess);
1857                         Assert(!state->slabAllocatorUsed);
1858                         *should_free = false;
1859                         if (forward)
1860                         {
1861                                 if (state->current < state->memtupcount)
1862                                 {
1863                                         *stup = state->memtuples[state->current++];
1864                                         return true;
1865                                 }
1866                                 state->eof_reached = true;
1867
1868                                 /*
1869                                  * Complain if caller tries to retrieve more tuples than
1870                                  * originally asked for in a bounded sort.  This is because
1871                                  * returning EOF here might be the wrong thing.
1872                                  */
1873                                 if (state->bounded && state->current >= state->bound)
1874                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1875
1876                                 return false;
1877                         }
1878                         else
1879                         {
1880                                 if (state->current <= 0)
1881                                         return false;
1882
1883                                 /*
1884                                  * if all tuples are fetched already then we return last
1885                                  * tuple, else - tuple before last returned.
1886                                  */
1887                                 if (state->eof_reached)
1888                                         state->eof_reached = false;
1889                                 else
1890                                 {
1891                                         state->current--;       /* last returned tuple */
1892                                         if (state->current <= 0)
1893                                                 return false;
1894                                 }
1895                                 *stup = state->memtuples[state->current - 1];
1896                                 return true;
1897                         }
1898                         break;
1899
1900                 case TSS_SORTEDONTAPE:
1901                         Assert(forward || state->randomAccess);
1902                         Assert(state->slabAllocatorUsed);
1903
1904                         /*
1905                          * The slot that held the tuple that we returned in previous
1906                          * gettuple call can now be reused.
1907                          */
1908                         if (state->lastReturnedTuple)
1909                         {
1910                                 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1911                                 state->lastReturnedTuple = NULL;
1912                         }
1913
1914                         if (forward)
1915                         {
1916                                 if (state->eof_reached)
1917                                         return false;
1918
1919                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1920                                 {
1921                                         READTUP(state, stup, state->result_tape, tuplen);
1922
1923                                         /*
1924                                          * Remember the tuple we return, so that we can recycle
1925                                          * its memory on next call.  (This can be NULL, in the
1926                                          * !state->tuples case).
1927                                          */
1928                                         state->lastReturnedTuple = stup->tuple;
1929
1930                                         *should_free = false;
1931                                         return true;
1932                                 }
1933                                 else
1934                                 {
1935                                         state->eof_reached = true;
1936                                         return false;
1937                                 }
1938                         }
1939
1940                         /*
1941                          * Backward.
1942                          *
1943                          * if all tuples are fetched already then we return last tuple,
1944                          * else - tuple before last returned.
1945                          */
1946                         if (state->eof_reached)
1947                         {
1948                                 /*
1949                                  * Seek position is pointing just past the zero tuplen at the
1950                                  * end of file; back up to fetch last tuple's ending length
1951                                  * word.  If seek fails we must have a completely empty file.
1952                                  */
1953                                 if (!LogicalTapeBackspace(state->tapeset,
1954                                                                                   state->result_tape,
1955                                                                                   2 * sizeof(unsigned int)))
1956                                         return false;
1957                                 state->eof_reached = false;
1958                         }
1959                         else
1960                         {
1961                                 /*
1962                                  * Back up and fetch previously-returned tuple's ending length
1963                                  * word.  If seek fails, assume we are at start of file.
1964                                  */
1965                                 if (!LogicalTapeBackspace(state->tapeset,
1966                                                                                   state->result_tape,
1967                                                                                   sizeof(unsigned int)))
1968                                         return false;
1969                                 tuplen = getlen(state, state->result_tape, false);
1970
1971                                 /*
1972                                  * Back up to get ending length word of tuple before it.
1973                                  */
1974                                 if (!LogicalTapeBackspace(state->tapeset,
1975                                                                                   state->result_tape,
1976                                                                                   tuplen + 2 * sizeof(unsigned int)))
1977                                 {
1978                                         /*
1979                                          * If that fails, presumably the prev tuple is the first
1980                                          * in the file.  Back up so that it becomes next to read
1981                                          * in forward direction (not obviously right, but that is
1982                                          * what in-memory case does).
1983                                          */
1984                                         if (!LogicalTapeBackspace(state->tapeset,
1985                                                                                           state->result_tape,
1986                                                                                           tuplen + sizeof(unsigned int)))
1987                                                 elog(ERROR, "bogus tuple length in backward scan");
1988                                         return false;
1989                                 }
1990                         }
1991
1992                         tuplen = getlen(state, state->result_tape, false);
1993
1994                         /*
1995                          * Now we have the length of the prior tuple, back up and read it.
1996                          * Note: READTUP expects we are positioned after the initial
1997                          * length word of the tuple, so back up to that point.
1998                          */
1999                         if (!LogicalTapeBackspace(state->tapeset,
2000                                                                           state->result_tape,
2001                                                                           tuplen))
2002                                 elog(ERROR, "bogus tuple length in backward scan");
2003                         READTUP(state, stup, state->result_tape, tuplen);
2004
2005                         /*
2006                          * Remember the tuple we return, so that we can recycle its memory
2007                          * on next call. (This can be NULL, in the Datum case).
2008                          */
2009                         state->lastReturnedTuple = stup->tuple;
2010
2011                         *should_free = false;
2012                         return true;
2013
2014                 case TSS_FINALMERGE:
2015                         Assert(forward);
2016                         /* We are managing memory ourselves, with the slab allocator. */
2017                         Assert(state->slabAllocatorUsed);
2018                         *should_free = false;
2019
2020                         /*
2021                          * The slab slot holding the tuple that we returned in previous
2022                          * gettuple call can now be reused.
2023                          */
2024                         if (state->lastReturnedTuple)
2025                         {
2026                                 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
2027                                 state->lastReturnedTuple = NULL;
2028                         }
2029
2030                         /*
2031                          * This code should match the inner loop of mergeonerun().
2032                          */
2033                         if (state->memtupcount > 0)
2034                         {
2035                                 int                     srcTape = state->memtuples[0].tupindex;
2036                                 SortTuple       newtup;
2037
2038                                 *stup = state->memtuples[0];
2039
2040                                 /*
2041                                  * Remember the tuple we return, so that we can recycle its
2042                                  * memory on next call. (This can be NULL, in the Datum case).
2043                                  */
2044                                 state->lastReturnedTuple = stup->tuple;
2045
2046                                 /*
2047                                  * Pull next tuple from tape, and replace the returned tuple
2048                                  * at top of the heap with it.
2049                                  */
2050                                 if (!mergereadnext(state, srcTape, &newtup))
2051                                 {
2052                                         /*
2053                                          * If no more data, we've reached end of run on this tape.
2054                                          * Remove the top node from the heap.
2055                                          */
2056                                         tuplesort_heap_delete_top(state, false);
2057
2058                                         /*
2059                                          * Rewind to free the read buffer.  It'd go away at the
2060                                          * end of the sort anyway, but better to release the
2061                                          * memory early.
2062                                          */
2063                                         LogicalTapeRewindForWrite(state->tapeset, srcTape);
2064                                         return true;
2065                                 }
2066                                 newtup.tupindex = srcTape;
2067                                 tuplesort_heap_replace_top(state, &newtup, false);
2068                                 return true;
2069                         }
2070                         return false;
2071
2072                 default:
2073                         elog(ERROR, "invalid tuplesort state");
2074                         return false;           /* keep compiler quiet */
2075         }
2076 }
2077
2078 /*
2079  * Fetch the next tuple in either forward or back direction.
2080  * If successful, put tuple in slot and return TRUE; else, clear the slot
2081  * and return FALSE.
2082  *
2083  * Caller may optionally be passed back abbreviated value (on TRUE return
2084  * value) when abbreviation was used, which can be used to cheaply avoid
2085  * equality checks that might otherwise be required.  Caller can safely make a
2086  * determination of "non-equal tuple" based on simple binary inequality.  A
2087  * NULL value in leading attribute will set abbreviated value to zeroed
2088  * representation, which caller may rely on in abbreviated inequality check.
2089  *
2090  * The slot receives a copied tuple (sometimes allocated in caller memory
2091  * context) that will stay valid regardless of future manipulations of the
2092  * tuplesort's state.
2093  */
2094 bool
2095 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
2096                                            TupleTableSlot *slot, Datum *abbrev)
2097 {
2098         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2099         SortTuple       stup;
2100         bool            should_free;
2101
2102         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2103                 stup.tuple = NULL;
2104
2105         MemoryContextSwitchTo(oldcontext);
2106
2107         if (stup.tuple)
2108         {
2109                 /* Record abbreviated key for caller */
2110                 if (state->sortKeys->abbrev_converter && abbrev)
2111                         *abbrev = stup.datum1;
2112
2113                 if (!should_free)
2114                 {
2115                         stup.tuple = heap_copy_minimal_tuple((MinimalTuple) stup.tuple);
2116                         should_free = true;
2117                 }
2118                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
2119                 return true;
2120         }
2121         else
2122         {
2123                 ExecClearTuple(slot);
2124                 return false;
2125         }
2126 }
2127
2128 /*
2129  * Fetch the next tuple in either forward or back direction.
2130  * Returns NULL if no more tuples.  If *should_free is set, the
2131  * caller must pfree the returned tuple when done with it.
2132  * If it is not set, caller should not use tuple following next
2133  * call here.
2134  */
2135 HeapTuple
2136 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
2137 {
2138         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2139         SortTuple       stup;
2140
2141         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2142                 stup.tuple = NULL;
2143
2144         MemoryContextSwitchTo(oldcontext);
2145
2146         return stup.tuple;
2147 }
2148
2149 /*
2150  * Fetch the next index tuple in either forward or back direction.
2151  * Returns NULL if no more tuples.  If *should_free is set, the
2152  * caller must pfree the returned tuple when done with it.
2153  * If it is not set, caller should not use tuple following next
2154  * call here.
2155  */
2156 IndexTuple
2157 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
2158                                                 bool *should_free)
2159 {
2160         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2161         SortTuple       stup;
2162
2163         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2164                 stup.tuple = NULL;
2165
2166         MemoryContextSwitchTo(oldcontext);
2167
2168         return (IndexTuple) stup.tuple;
2169 }
2170
2171 /*
2172  * Fetch the next Datum in either forward or back direction.
2173  * Returns FALSE if no more datums.
2174  *
2175  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
2176  * and is now owned by the caller.
2177  *
2178  * Caller may optionally be passed back abbreviated value (on TRUE return
2179  * value) when abbreviation was used, which can be used to cheaply avoid
2180  * equality checks that might otherwise be required.  Caller can safely make a
2181  * determination of "non-equal tuple" based on simple binary inequality.  A
2182  * NULL value will have a zeroed abbreviated value representation, which caller
2183  * may rely on in abbreviated inequality check.
2184  */
2185 bool
2186 tuplesort_getdatum(Tuplesortstate *state, bool forward,
2187                                    Datum *val, bool *isNull, Datum *abbrev)
2188 {
2189         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2190         SortTuple       stup;
2191         bool            should_free;
2192
2193         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2194         {
2195                 MemoryContextSwitchTo(oldcontext);
2196                 return false;
2197         }
2198
2199         /* Record abbreviated key for caller */
2200         if (state->sortKeys->abbrev_converter && abbrev)
2201                 *abbrev = stup.datum1;
2202
2203         if (stup.isnull1 || !state->tuples)
2204         {
2205                 *val = stup.datum1;
2206                 *isNull = stup.isnull1;
2207         }
2208         else
2209         {
2210                 /* use stup.tuple because stup.datum1 may be an abbreviation */
2211
2212                 if (should_free)
2213                         *val = PointerGetDatum(stup.tuple);
2214                 else
2215                         *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
2216                 *isNull = false;
2217         }
2218
2219         MemoryContextSwitchTo(oldcontext);
2220
2221         return true;
2222 }
2223
2224 /*
2225  * Advance over N tuples in either forward or back direction,
2226  * without returning any data.  N==0 is a no-op.
2227  * Returns TRUE if successful, FALSE if ran out of tuples.
2228  */
2229 bool
2230 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
2231 {
2232         MemoryContext oldcontext;
2233
2234         /*
2235          * We don't actually support backwards skip yet, because no callers need
2236          * it.  The API is designed to allow for that later, though.
2237          */
2238         Assert(forward);
2239         Assert(ntuples >= 0);
2240
2241         switch (state->status)
2242         {
2243                 case TSS_SORTEDINMEM:
2244                         if (state->memtupcount - state->current >= ntuples)
2245                         {
2246                                 state->current += ntuples;
2247                                 return true;
2248                         }
2249                         state->current = state->memtupcount;
2250                         state->eof_reached = true;
2251
2252                         /*
2253                          * Complain if caller tries to retrieve more tuples than
2254                          * originally asked for in a bounded sort.  This is because
2255                          * returning EOF here might be the wrong thing.
2256                          */
2257                         if (state->bounded && state->current >= state->bound)
2258                                 elog(ERROR, "retrieved too many tuples in a bounded sort");
2259
2260                         return false;
2261
2262                 case TSS_SORTEDONTAPE:
2263                 case TSS_FINALMERGE:
2264
2265                         /*
2266                          * We could probably optimize these cases better, but for now it's
2267                          * not worth the trouble.
2268                          */
2269                         oldcontext = MemoryContextSwitchTo(state->sortcontext);
2270                         while (ntuples-- > 0)
2271                         {
2272                                 SortTuple       stup;
2273                                 bool            should_free;
2274
2275                                 if (!tuplesort_gettuple_common(state, forward,
2276                                                                                            &stup, &should_free))
2277                                 {
2278                                         MemoryContextSwitchTo(oldcontext);
2279                                         return false;
2280                                 }
2281                                 if (should_free && stup.tuple)
2282                                         pfree(stup.tuple);
2283                                 CHECK_FOR_INTERRUPTS();
2284                         }
2285                         MemoryContextSwitchTo(oldcontext);
2286                         return true;
2287
2288                 default:
2289                         elog(ERROR, "invalid tuplesort state");
2290                         return false;           /* keep compiler quiet */
2291         }
2292 }
2293
2294 /*
2295  * tuplesort_merge_order - report merge order we'll use for given memory
2296  * (note: "merge order" just means the number of input tapes in the merge).
2297  *
2298  * This is exported for use by the planner.  allowedMem is in bytes.
2299  */
2300 int
2301 tuplesort_merge_order(int64 allowedMem)
2302 {
2303         int                     mOrder;
2304
2305         /*
2306          * We need one tape for each merge input, plus another one for the output,
2307          * and each of these tapes needs buffer space.  In addition we want
2308          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2309          * count).
2310          *
2311          * Note: you might be thinking we need to account for the memtuples[]
2312          * array in this calculation, but we effectively treat that as part of the
2313          * MERGE_BUFFER_SIZE workspace.
2314          */
2315         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2316                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2317
2318         /*
2319          * Even in minimum memory, use at least a MINORDER merge.  On the other
2320          * hand, even when we have lots of memory, do not use more than a MAXORDER
2321          * merge.  Tapes are pretty cheap, but they're not entirely free.  Each
2322          * additional tape reduces the amount of memory available to build runs,
2323          * which in turn can cause the same sort to need more runs, which makes
2324          * merging slower even if it can still be done in a single pass.  Also,
2325          * high order merges are quite slow due to CPU cache effects; it can be
2326          * faster to pay the I/O cost of a polyphase merge than to perform a single
2327          * merge pass across many hundreds of tapes.
2328          */
2329         mOrder = Max(mOrder, MINORDER);
2330         mOrder = Min(mOrder, MAXORDER);
2331
2332         return mOrder;
2333 }
2334
2335 /*
2336  * useselection - determine algorithm to use to sort first run.
2337  *
2338  * It can sometimes be useful to use the replacement selection algorithm if it
2339  * results in one large run, and there is little available workMem.  See
2340  * remarks on RUN_SECOND optimization within dumptuples().
2341  */
2342 static bool
2343 useselection(Tuplesortstate *state)
2344 {
2345         /*
2346          * memtupsize might be noticeably higher than memtupcount here in atypical
2347          * cases.  It seems slightly preferable to not allow recent outliers to
2348          * impact this determination.  Note that caller's trace_sort output
2349          * reports memtupcount instead.
2350          */
2351         if (state->memtupsize <= replacement_sort_tuples)
2352                 return true;
2353
2354         return false;
2355 }
2356
2357 /*
2358  * inittapes - initialize for tape sorting.
2359  *
2360  * This is called only if we have found we don't have room to sort in memory.
2361  */
2362 static void
2363 inittapes(Tuplesortstate *state)
2364 {
2365         int                     maxTapes,
2366                                 j;
2367         int64           tapeSpace;
2368
2369         /* Compute number of tapes to use: merge order plus 1 */
2370         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2371
2372         state->maxTapes = maxTapes;
2373         state->tapeRange = maxTapes - 1;
2374
2375 #ifdef TRACE_SORT
2376         if (trace_sort)
2377                 elog(LOG, "switching to external sort with %d tapes: %s",
2378                          maxTapes, pg_rusage_show(&state->ru_start));
2379 #endif
2380
2381         /*
2382          * Decrease availMem to reflect the space needed for tape buffers, when
2383          * writing the initial runs; but don't decrease it to the point that we
2384          * have no room for tuples.  (That case is only likely to occur if sorting
2385          * pass-by-value Datums; in all other scenarios the memtuples[] array is
2386          * unlikely to occupy more than half of allowedMem.  In the pass-by-value
2387          * case it's not important to account for tuple space, so we don't care if
2388          * LACKMEM becomes inaccurate.)
2389          */
2390         tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2391
2392         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2393                 USEMEM(state, tapeSpace);
2394
2395         /*
2396          * Make sure that the temp file(s) underlying the tape set are created in
2397          * suitable temp tablespaces.
2398          */
2399         PrepareTempTablespaces();
2400
2401         /*
2402          * Create the tape set and allocate the per-tape data arrays.
2403          */
2404         state->tapeset = LogicalTapeSetCreate(maxTapes);
2405
2406         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2407         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2408         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2409         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2410         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2411
2412         /*
2413          * Give replacement selection a try based on user setting.  There will be
2414          * a switch to a simple hybrid sort-merge strategy after the first run
2415          * (iff we could not output one long run).
2416          */
2417         state->replaceActive = useselection(state);
2418
2419         if (state->replaceActive)
2420         {
2421                 /*
2422                  * Convert the unsorted contents of memtuples[] into a heap. Each
2423                  * tuple is marked as belonging to run number zero.
2424                  *
2425                  * NOTE: we pass false for checkIndex since there's no point in
2426                  * comparing indexes in this step, even though we do intend the
2427                  * indexes to be part of the sort key...
2428                  */
2429                 int                     ntuples = state->memtupcount;
2430
2431 #ifdef TRACE_SORT
2432                 if (trace_sort)
2433                         elog(LOG, "replacement selection will sort %d first run tuples",
2434                                  state->memtupcount);
2435 #endif
2436                 state->memtupcount = 0; /* make the heap empty */
2437
2438                 for (j = 0; j < ntuples; j++)
2439                 {
2440                         /* Must copy source tuple to avoid possible overwrite */
2441                         SortTuple       stup = state->memtuples[j];
2442
2443                         stup.tupindex = RUN_FIRST;
2444                         tuplesort_heap_insert(state, &stup, false);
2445                 }
2446                 Assert(state->memtupcount == ntuples);
2447         }
2448
2449         state->currentRun = RUN_FIRST;
2450
2451         /*
2452          * Initialize variables of Algorithm D (step D1).
2453          */
2454         for (j = 0; j < maxTapes; j++)
2455         {
2456                 state->tp_fib[j] = 1;
2457                 state->tp_runs[j] = 0;
2458                 state->tp_dummy[j] = 1;
2459                 state->tp_tapenum[j] = j;
2460         }
2461         state->tp_fib[state->tapeRange] = 0;
2462         state->tp_dummy[state->tapeRange] = 0;
2463
2464         state->Level = 1;
2465         state->destTape = 0;
2466
2467         state->status = TSS_BUILDRUNS;
2468 }
2469
2470 /*
2471  * selectnewtape -- select new tape for new initial run.
2472  *
2473  * This is called after finishing a run when we know another run
2474  * must be started.  This implements steps D3, D4 of Algorithm D.
2475  */
2476 static void
2477 selectnewtape(Tuplesortstate *state)
2478 {
2479         int                     j;
2480         int                     a;
2481
2482         /* Step D3: advance j (destTape) */
2483         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2484         {
2485                 state->destTape++;
2486                 return;
2487         }
2488         if (state->tp_dummy[state->destTape] != 0)
2489         {
2490                 state->destTape = 0;
2491                 return;
2492         }
2493
2494         /* Step D4: increase level */
2495         state->Level++;
2496         a = state->tp_fib[0];
2497         for (j = 0; j < state->tapeRange; j++)
2498         {
2499                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2500                 state->tp_fib[j] = a + state->tp_fib[j + 1];
2501         }
2502         state->destTape = 0;
2503 }
2504
2505 /*
2506  * Initialize the slab allocation arena, for the given number of slots.
2507  */
2508 static void
2509 init_slab_allocator(Tuplesortstate *state, int numSlots)
2510 {
2511         if (numSlots > 0)
2512         {
2513                 char       *p;
2514                 int                     i;
2515
2516                 state->slabMemoryBegin = palloc(numSlots * SLAB_SLOT_SIZE);
2517                 state->slabMemoryEnd = state->slabMemoryBegin +
2518                         numSlots * SLAB_SLOT_SIZE;
2519                 state->slabFreeHead = (SlabSlot *) state->slabMemoryBegin;
2520                 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
2521
2522                 p = state->slabMemoryBegin;
2523                 for (i = 0; i < numSlots - 1; i++)
2524                 {
2525                         ((SlabSlot *) p)->nextfree = (SlabSlot *) (p + SLAB_SLOT_SIZE);
2526                         p += SLAB_SLOT_SIZE;
2527                 }
2528                 ((SlabSlot *) p)->nextfree = NULL;
2529         }
2530         else
2531         {
2532                 state->slabMemoryBegin = state->slabMemoryEnd = NULL;
2533                 state->slabFreeHead = NULL;
2534         }
2535         state->slabAllocatorUsed = true;
2536 }
2537
2538 /*
2539  * mergeruns -- merge all the completed initial runs.
2540  *
2541  * This implements steps D5, D6 of Algorithm D.  All input data has
2542  * already been written to initial runs on tape (see dumptuples).
2543  */
2544 static void
2545 mergeruns(Tuplesortstate *state)
2546 {
2547         int                     tapenum,
2548                                 svTape,
2549                                 svRuns,
2550                                 svDummy;
2551         int                     numTapes;
2552         int                     numInputTapes;
2553
2554         Assert(state->status == TSS_BUILDRUNS);
2555         Assert(state->memtupcount == 0);
2556
2557         if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2558         {
2559                 /*
2560                  * If there are multiple runs to be merged, when we go to read back
2561                  * tuples from disk, abbreviated keys will not have been stored, and
2562                  * we don't care to regenerate them.  Disable abbreviation from this
2563                  * point on.
2564                  */
2565                 state->sortKeys->abbrev_converter = NULL;
2566                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2567
2568                 /* Not strictly necessary, but be tidy */
2569                 state->sortKeys->abbrev_abort = NULL;
2570                 state->sortKeys->abbrev_full_comparator = NULL;
2571         }
2572
2573         /*
2574          * Reset tuple memory.  We've freed all the tuples that we previously
2575          * allocated.  We will use the slab allocator from now on.
2576          */
2577         MemoryContextDelete(state->tuplecontext);
2578         state->tuplecontext = NULL;
2579
2580         /*
2581          * We no longer need a large memtuples array.  (We will allocate a smaller
2582          * one for the heap later.)
2583          */
2584         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
2585         pfree(state->memtuples);
2586         state->memtuples = NULL;
2587
2588         /*
2589          * If we had fewer runs than tapes, refund the memory that we imagined we
2590          * would need for the tape buffers of the unused tapes.
2591          *
2592          * numTapes and numInputTapes reflect the actual number of tapes we will
2593          * use.  Note that the output tape's tape number is maxTapes - 1, so the
2594          * tape numbers of the used tapes are not consecutive, and you cannot just
2595          * loop from 0 to numTapes to visit all used tapes!
2596          */
2597         if (state->Level == 1)
2598         {
2599                 numInputTapes = state->currentRun;
2600                 numTapes = numInputTapes + 1;
2601                 FREEMEM(state, (state->maxTapes - numTapes) * TAPE_BUFFER_OVERHEAD);
2602         }
2603         else
2604         {
2605                 numInputTapes = state->tapeRange;
2606                 numTapes = state->maxTapes;
2607         }
2608
2609         /*
2610          * Initialize the slab allocator.  We need one slab slot per input tape,
2611          * for the tuples in the heap, plus one to hold the tuple last returned
2612          * from tuplesort_gettuple.  (If we're sorting pass-by-val Datums,
2613          * however, we don't need to do allocate anything.)
2614          *
2615          * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism
2616          * to track memory usage of individual tuples.
2617          */
2618         if (state->tuples)
2619                 init_slab_allocator(state, numInputTapes + 1);
2620         else
2621                 init_slab_allocator(state, 0);
2622
2623         /*
2624          * If we produced only one initial run (quite likely if the total data
2625          * volume is between 1X and 2X workMem when replacement selection is used,
2626          * but something we particular count on when input is presorted), we can
2627          * just use that tape as the finished output, rather than doing a useless
2628          * merge.  (This obvious optimization is not in Knuth's algorithm.)
2629          */
2630         if (state->currentRun == RUN_SECOND)
2631         {
2632                 state->result_tape = state->tp_tapenum[state->destTape];
2633                 /* must freeze and rewind the finished output tape */
2634                 LogicalTapeFreeze(state->tapeset, state->result_tape);
2635                 state->status = TSS_SORTEDONTAPE;
2636                 return;
2637         }
2638
2639         /*
2640          * Allocate a new 'memtuples' array, for the heap.  It will hold one tuple
2641          * from each input tape.
2642          */
2643         state->memtupsize = numInputTapes;
2644         state->memtuples = (SortTuple *) palloc(numInputTapes * sizeof(SortTuple));
2645         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
2646
2647         /*
2648          * Use all the remaining memory we have available for read buffers among
2649          * the input tapes.
2650          *
2651          * We do this only after checking for the case that we produced only one
2652          * initial run, because there is no need to use a large read buffer when
2653          * we're reading from a single tape.  With one tape, the I/O pattern will
2654          * be the same regardless of the buffer size.
2655          *
2656          * We don't try to "rebalance" the memory among tapes, when we start a new
2657          * merge phase, even if some tapes are inactive in the new phase.  That
2658          * would be hard, because logtape.c doesn't know where one run ends and
2659          * another begins.  When a new merge phase begins, and a tape doesn't
2660          * participate in it, its buffer nevertheless already contains tuples from
2661          * the next run on same tape, so we cannot release the buffer.  That's OK
2662          * in practice, merge performance isn't that sensitive to the amount of
2663          * buffers used, and most merge phases use all or almost all tapes,
2664          * anyway.
2665          */
2666 #ifdef TRACE_SORT
2667         if (trace_sort)
2668                 elog(LOG, "using " INT64_FORMAT " KB of memory for read buffers among %d input tapes",
2669                          (state->availMem) / 1024, numInputTapes);
2670 #endif
2671
2672         state->read_buffer_size = Min(state->availMem / numInputTapes, 0);
2673         USEMEM(state, state->availMem);
2674
2675         /* End of step D2: rewind all output tapes to prepare for merging */
2676         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2677                 LogicalTapeRewindForRead(state->tapeset, tapenum, state->read_buffer_size);
2678
2679         for (;;)
2680         {
2681                 /*
2682                  * At this point we know that tape[T] is empty.  If there's just one
2683                  * (real or dummy) run left on each input tape, then only one merge
2684                  * pass remains.  If we don't have to produce a materialized sorted
2685                  * tape, we can stop at this point and do the final merge on-the-fly.
2686                  */
2687                 if (!state->randomAccess)
2688                 {
2689                         bool            allOneRun = true;
2690
2691                         Assert(state->tp_runs[state->tapeRange] == 0);
2692                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2693                         {
2694                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2695                                 {
2696                                         allOneRun = false;
2697                                         break;
2698                                 }
2699                         }
2700                         if (allOneRun)
2701                         {
2702                                 /* Tell logtape.c we won't be writing anymore */
2703                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
2704                                 /* Initialize for the final merge pass */
2705                                 beginmerge(state);
2706                                 state->status = TSS_FINALMERGE;
2707                                 return;
2708                         }
2709                 }
2710
2711                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2712                 while (state->tp_runs[state->tapeRange - 1] ||
2713                            state->tp_dummy[state->tapeRange - 1])
2714                 {
2715                         bool            allDummy = true;
2716
2717                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2718                         {
2719                                 if (state->tp_dummy[tapenum] == 0)
2720                                 {
2721                                         allDummy = false;
2722                                         break;
2723                                 }
2724                         }
2725
2726                         if (allDummy)
2727                         {
2728                                 state->tp_dummy[state->tapeRange]++;
2729                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2730                                         state->tp_dummy[tapenum]--;
2731                         }
2732                         else
2733                                 mergeonerun(state);
2734                 }
2735
2736                 /* Step D6: decrease level */
2737                 if (--state->Level == 0)
2738                         break;
2739                 /* rewind output tape T to use as new input */
2740                 LogicalTapeRewindForRead(state->tapeset, state->tp_tapenum[state->tapeRange],
2741                                                                  state->read_buffer_size);
2742                 /* rewind used-up input tape P, and prepare it for write pass */
2743                 LogicalTapeRewindForWrite(state->tapeset, state->tp_tapenum[state->tapeRange - 1]);
2744                 state->tp_runs[state->tapeRange - 1] = 0;
2745
2746                 /*
2747                  * reassign tape units per step D6; note we no longer care about A[]
2748                  */
2749                 svTape = state->tp_tapenum[state->tapeRange];
2750                 svDummy = state->tp_dummy[state->tapeRange];
2751                 svRuns = state->tp_runs[state->tapeRange];
2752                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2753                 {
2754                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2755                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2756                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2757                 }
2758                 state->tp_tapenum[0] = svTape;
2759                 state->tp_dummy[0] = svDummy;
2760                 state->tp_runs[0] = svRuns;
2761         }
2762
2763         /*
2764          * Done.  Knuth says that the result is on TAPE[1], but since we exited
2765          * the loop without performing the last iteration of step D6, we have not
2766          * rearranged the tape unit assignment, and therefore the result is on
2767          * TAPE[T].  We need to do it this way so that we can freeze the final
2768          * output tape while rewinding it.  The last iteration of step D6 would be
2769          * a waste of cycles anyway...
2770          */
2771         state->result_tape = state->tp_tapenum[state->tapeRange];
2772         LogicalTapeFreeze(state->tapeset, state->result_tape);
2773         state->status = TSS_SORTEDONTAPE;
2774
2775         /* Release the read buffers of all the other tapes, by rewinding them. */
2776         for (tapenum = 0; tapenum < state->maxTapes; tapenum++)
2777         {
2778                 if (tapenum != state->result_tape)
2779                         LogicalTapeRewindForWrite(state->tapeset, tapenum);
2780         }
2781 }
2782
2783 /*
2784  * Merge one run from each input tape, except ones with dummy runs.
2785  *
2786  * This is the inner loop of Algorithm D step D5.  We know that the
2787  * output tape is TAPE[T].
2788  */
2789 static void
2790 mergeonerun(Tuplesortstate *state)
2791 {
2792         int                     destTape = state->tp_tapenum[state->tapeRange];
2793         int                     srcTape;
2794
2795         /*
2796          * Start the merge by loading one tuple from each active source tape into
2797          * the heap.  We can also decrease the input run/dummy run counts.
2798          */
2799         beginmerge(state);
2800
2801         /*
2802          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2803          * out, and replacing it with next tuple from same tape (if there is
2804          * another one).
2805          */
2806         while (state->memtupcount > 0)
2807         {
2808                 SortTuple       stup;
2809
2810                 /* write the tuple to destTape */
2811                 srcTape = state->memtuples[0].tupindex;
2812                 WRITETUP(state, destTape, &state->memtuples[0]);
2813
2814                 /* recycle the slot of the tuple we just wrote out, for the next read */
2815                 RELEASE_SLAB_SLOT(state, state->memtuples[0].tuple);
2816
2817                 /*
2818                  * pull next tuple from the tape, and replace the written-out tuple in
2819                  * the heap with it.
2820                  */
2821                 if (mergereadnext(state, srcTape, &stup))
2822                 {
2823                         stup.tupindex = srcTape;
2824                         tuplesort_heap_replace_top(state, &stup, false);
2825
2826                 }
2827                 else
2828                         tuplesort_heap_delete_top(state, false);
2829         }
2830
2831         /*
2832          * When the heap empties, we're done.  Write an end-of-run marker on the
2833          * output tape, and increment its count of real runs.
2834          */
2835         markrunend(state, destTape);
2836         state->tp_runs[state->tapeRange]++;
2837
2838 #ifdef TRACE_SORT
2839         if (trace_sort)
2840                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2841                          pg_rusage_show(&state->ru_start));
2842 #endif
2843 }
2844
2845 /*
2846  * beginmerge - initialize for a merge pass
2847  *
2848  * We decrease the counts of real and dummy runs for each tape, and mark
2849  * which tapes contain active input runs in mergeactive[].  Then, fill the
2850  * merge heap with the first tuple from each active tape.
2851  */
2852 static void
2853 beginmerge(Tuplesortstate *state)
2854 {
2855         int                     activeTapes;
2856         int                     tapenum;
2857         int                     srcTape;
2858
2859         /* Heap should be empty here */
2860         Assert(state->memtupcount == 0);
2861
2862         /* Adjust run counts and mark the active tapes */
2863         memset(state->mergeactive, 0,
2864                    state->maxTapes * sizeof(*state->mergeactive));
2865         activeTapes = 0;
2866         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2867         {
2868                 if (state->tp_dummy[tapenum] > 0)
2869                         state->tp_dummy[tapenum]--;
2870                 else
2871                 {
2872                         Assert(state->tp_runs[tapenum] > 0);
2873                         state->tp_runs[tapenum]--;
2874                         srcTape = state->tp_tapenum[tapenum];
2875                         state->mergeactive[srcTape] = true;
2876                         activeTapes++;
2877                 }
2878         }
2879         Assert(activeTapes > 0);
2880         state->activeTapes = activeTapes;
2881
2882         /* Load the merge heap with the first tuple from each input tape */
2883         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2884         {
2885                 SortTuple       tup;
2886
2887                 if (mergereadnext(state, srcTape, &tup))
2888                 {
2889                         tup.tupindex = srcTape;
2890                         tuplesort_heap_insert(state, &tup, false);
2891                 }
2892         }
2893 }
2894
2895 /*
2896  * mergereadnext - read next tuple from one merge input tape
2897  *
2898  * Returns false on EOF.
2899  */
2900 static bool
2901 mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup)
2902 {
2903         unsigned int tuplen;
2904
2905         if (!state->mergeactive[srcTape])
2906                 return false;                   /* tape's run is already exhausted */
2907
2908         /* read next tuple, if any */
2909         if ((tuplen = getlen(state, srcTape, true)) == 0)
2910         {
2911                 state->mergeactive[srcTape] = false;
2912                 return false;
2913         }
2914         READTUP(state, stup, srcTape, tuplen);
2915
2916         return true;
2917 }
2918
2919 /*
2920  * dumptuples - remove tuples from memtuples and write to tape
2921  *
2922  * This is used during initial-run building, but not during merging.
2923  *
2924  * When alltuples = false and replacement selection is still active, dump
2925  * only enough tuples to get under the availMem limit (and leave at least
2926  * one tuple in memtuples, since puttuple will then assume it is a heap that
2927  * has a tuple to compare to).  We always insist there be at least one free
2928  * slot in the memtuples[] array.
2929  *
2930  * When alltuples = true, dump everything currently in memory.  (This
2931  * case is only used at end of input data, although in practice only the
2932  * first run could fail to dump all tuples when we LACKMEM(), and only
2933  * when replacement selection is active.)
2934  *
2935  * If, when replacement selection is active, we see that the tuple run
2936  * number at the top of the heap has changed, start a new run.  This must be
2937  * the first run, because replacement selection is always abandoned for all
2938  * further runs.
2939  */
2940 static void
2941 dumptuples(Tuplesortstate *state, bool alltuples)
2942 {
2943         while (alltuples ||
2944                    (LACKMEM(state) && state->memtupcount > 1) ||
2945                    state->memtupcount >= state->memtupsize)
2946         {
2947                 if (state->replaceActive)
2948                 {
2949                         /*
2950                          * Still holding out for a case favorable to replacement
2951                          * selection. Still incrementally spilling using heap.
2952                          *
2953                          * Dump the heap's frontmost entry, and remove it from the heap.
2954                          */
2955                         Assert(state->memtupcount > 0);
2956                         WRITETUP(state, state->tp_tapenum[state->destTape],
2957                                          &state->memtuples[0]);
2958                         tuplesort_heap_delete_top(state, true);
2959                 }
2960                 else
2961                 {
2962                         /*
2963                          * Once committed to quicksorting runs, never incrementally spill
2964                          */
2965                         dumpbatch(state, alltuples);
2966                         break;
2967                 }
2968
2969                 /*
2970                  * If top run number has changed, we've finished the current run (this
2971                  * can only be the first run), and will no longer spill incrementally.
2972                  */
2973                 if (state->memtupcount == 0 ||
2974                         state->memtuples[0].tupindex == HEAP_RUN_NEXT)
2975                 {
2976                         markrunend(state, state->tp_tapenum[state->destTape]);
2977                         Assert(state->currentRun == RUN_FIRST);
2978                         state->currentRun++;
2979                         state->tp_runs[state->destTape]++;
2980                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2981
2982 #ifdef TRACE_SORT
2983                         if (trace_sort)
2984                                 elog(LOG, "finished incrementally writing %s run %d to tape %d: %s",
2985                                          (state->memtupcount == 0) ? "only" : "first",
2986                                          state->currentRun, state->destTape,
2987                                          pg_rusage_show(&state->ru_start));
2988 #endif
2989
2990                         /*
2991                          * Done if heap is empty, which is possible when there is only one
2992                          * long run.
2993                          */
2994                         Assert(state->currentRun == RUN_SECOND);
2995                         if (state->memtupcount == 0)
2996                         {
2997                                 /*
2998                                  * Replacement selection best case; no final merge required,
2999                                  * because there was only one initial run (second run has no
3000                                  * tuples).  See RUN_SECOND case in mergeruns().
3001                                  */
3002                                 break;
3003                         }
3004
3005                         /*
3006                          * Abandon replacement selection for second run (as well as any
3007                          * subsequent runs).
3008                          */
3009                         state->replaceActive = false;
3010
3011                         /*
3012                          * First tuple of next run should not be heapified, and so will
3013                          * bear placeholder run number.  In practice this must actually be
3014                          * the second run, which just became the currentRun, so we're
3015                          * clear to quicksort and dump the tuples in batch next time
3016                          * memtuples becomes full.
3017                          */
3018                         Assert(state->memtuples[0].tupindex == HEAP_RUN_NEXT);
3019                         selectnewtape(state);
3020                 }
3021         }
3022 }
3023
3024 /*
3025  * dumpbatch - sort and dump all memtuples, forming one run on tape
3026  *
3027  * Second or subsequent runs are never heapified by this module (although
3028  * heapification still respects run number differences between the first and
3029  * second runs), and a heap (replacement selection priority queue) is often
3030  * avoided in the first place.
3031  */
3032 static void
3033 dumpbatch(Tuplesortstate *state, bool alltuples)
3034 {
3035         int                     memtupwrite;
3036         int                     i;
3037
3038         /*
3039          * Final call might require no sorting, in rare cases where we just so
3040          * happen to have previously LACKMEM()'d at the point where exactly all
3041          * remaining tuples are loaded into memory, just before input was
3042          * exhausted.
3043          *
3044          * In general, short final runs are quite possible.  Rather than allowing
3045          * a special case where there was a superfluous selectnewtape() call (i.e.
3046          * a call with no subsequent run actually written to destTape), we prefer
3047          * to write out a 0 tuple run.
3048          *
3049          * mergereadnext() is prepared for 0 tuple runs, and will reliably mark
3050          * the tape inactive for the merge when called from beginmerge().  This
3051          * case is therefore similar to the case where mergeonerun() finds a dummy
3052          * run for the tape, and so doesn't need to merge a run from the tape (or
3053          * conceptually "merges" the dummy run, if you prefer).  According to
3054          * Knuth, Algorithm D "isn't strictly optimal" in its method of
3055          * distribution and dummy run assignment; this edge case seems very
3056          * unlikely to make that appreciably worse.
3057          */
3058         Assert(state->status == TSS_BUILDRUNS);
3059
3060         /*
3061          * It seems unlikely that this limit will ever be exceeded, but take no
3062          * chances
3063          */
3064         if (state->currentRun == INT_MAX)
3065                 ereport(ERROR,
3066                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3067                                  errmsg("cannot have more than %d runs for an external sort",
3068                                                 INT_MAX)));
3069
3070         state->currentRun++;
3071
3072 #ifdef TRACE_SORT
3073         if (trace_sort)
3074                 elog(LOG, "starting quicksort of run %d: %s",
3075                          state->currentRun, pg_rusage_show(&state->ru_start));
3076 #endif
3077
3078         /*
3079          * Sort all tuples accumulated within the allowed amount of memory for
3080          * this run using quicksort
3081          */
3082         tuplesort_sort_memtuples(state);
3083
3084 #ifdef TRACE_SORT
3085         if (trace_sort)
3086                 elog(LOG, "finished quicksort of run %d: %s",
3087                          state->currentRun, pg_rusage_show(&state->ru_start));
3088 #endif
3089
3090         memtupwrite = state->memtupcount;
3091         for (i = 0; i < memtupwrite; i++)
3092         {
3093                 WRITETUP(state, state->tp_tapenum[state->destTape],
3094                                  &state->memtuples[i]);
3095                 state->memtupcount--;
3096         }
3097
3098         /*
3099          * Reset tuple memory.  We've freed all of the tuples that we previously
3100          * allocated.  It's important to avoid fragmentation when there is a stark
3101          * change in the sizes of incoming tuples.  Fragmentation due to
3102          * AllocSetFree's bucketing by size class might be particularly bad if
3103          * this step wasn't taken.
3104          */
3105         MemoryContextReset(state->tuplecontext);
3106
3107         markrunend(state, state->tp_tapenum[state->destTape]);
3108         state->tp_runs[state->destTape]++;
3109         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3110
3111 #ifdef TRACE_SORT
3112         if (trace_sort)
3113                 elog(LOG, "finished writing run %d to tape %d: %s",
3114                          state->currentRun, state->destTape,
3115                          pg_rusage_show(&state->ru_start));
3116 #endif
3117
3118         if (!alltuples)
3119                 selectnewtape(state);
3120 }
3121
3122 /*
3123  * tuplesort_rescan             - rewind and replay the scan
3124  */
3125 void
3126 tuplesort_rescan(Tuplesortstate *state)
3127 {
3128         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3129
3130         Assert(state->randomAccess);
3131
3132         switch (state->status)
3133         {
3134                 case TSS_SORTEDINMEM:
3135                         state->current = 0;
3136                         state->eof_reached = false;
3137                         state->markpos_offset = 0;
3138                         state->markpos_eof = false;
3139                         break;
3140                 case TSS_SORTEDONTAPE:
3141                         LogicalTapeRewindForRead(state->tapeset,
3142                                                                          state->result_tape,
3143                                                                          0);
3144                         state->eof_reached = false;
3145                         state->markpos_block = 0L;
3146                         state->markpos_offset = 0;
3147                         state->markpos_eof = false;
3148                         break;
3149                 default:
3150                         elog(ERROR, "invalid tuplesort state");
3151                         break;
3152         }
3153
3154         MemoryContextSwitchTo(oldcontext);
3155 }
3156
3157 /*
3158  * tuplesort_markpos    - saves current position in the merged sort file
3159  */
3160 void
3161 tuplesort_markpos(Tuplesortstate *state)
3162 {
3163         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3164
3165         Assert(state->randomAccess);
3166
3167         switch (state->status)
3168         {
3169                 case TSS_SORTEDINMEM:
3170                         state->markpos_offset = state->current;
3171                         state->markpos_eof = state->eof_reached;
3172                         break;
3173                 case TSS_SORTEDONTAPE:
3174                         LogicalTapeTell(state->tapeset,
3175                                                         state->result_tape,
3176                                                         &state->markpos_block,
3177                                                         &state->markpos_offset);
3178                         state->markpos_eof = state->eof_reached;
3179                         break;
3180                 default:
3181                         elog(ERROR, "invalid tuplesort state");
3182                         break;
3183         }
3184
3185         MemoryContextSwitchTo(oldcontext);
3186 }
3187
3188 /*
3189  * tuplesort_restorepos - restores current position in merged sort file to
3190  *                                                last saved position
3191  */
3192 void
3193 tuplesort_restorepos(Tuplesortstate *state)
3194 {
3195         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3196
3197         Assert(state->randomAccess);
3198
3199         switch (state->status)
3200         {
3201                 case TSS_SORTEDINMEM:
3202                         state->current = state->markpos_offset;
3203                         state->eof_reached = state->markpos_eof;
3204                         break;
3205                 case TSS_SORTEDONTAPE:
3206                         if (!LogicalTapeSeek(state->tapeset,
3207                                                                  state->result_tape,
3208                                                                  state->markpos_block,
3209                                                                  state->markpos_offset))
3210                                 elog(ERROR, "tuplesort_restorepos failed");
3211                         state->eof_reached = state->markpos_eof;
3212                         break;
3213                 default:
3214                         elog(ERROR, "invalid tuplesort state");
3215                         break;
3216         }
3217
3218         MemoryContextSwitchTo(oldcontext);
3219 }
3220
3221 /*
3222  * tuplesort_get_stats - extract summary statistics
3223  *
3224  * This can be called after tuplesort_performsort() finishes to obtain
3225  * printable summary information about how the sort was performed.
3226  * spaceUsed is measured in kilobytes.
3227  */
3228 void
3229 tuplesort_get_stats(Tuplesortstate *state,
3230                                         const char **sortMethod,
3231                                         const char **spaceType,
3232                                         long *spaceUsed)
3233 {
3234         /*
3235          * Note: it might seem we should provide both memory and disk usage for a
3236          * disk-based sort.  However, the current code doesn't track memory space
3237          * accurately once we have begun to return tuples to the caller (since we
3238          * don't account for pfree's the caller is expected to do), so we cannot
3239          * rely on availMem in a disk sort.  This does not seem worth the overhead
3240          * to fix.  Is it worth creating an API for the memory context code to
3241          * tell us how much is actually used in sortcontext?
3242          */
3243         if (state->tapeset)
3244         {
3245                 *spaceType = "Disk";
3246                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
3247         }
3248         else
3249         {
3250                 *spaceType = "Memory";
3251                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
3252         }
3253
3254         switch (state->status)
3255         {
3256                 case TSS_SORTEDINMEM:
3257                         if (state->boundUsed)
3258                                 *sortMethod = "top-N heapsort";
3259                         else
3260                                 *sortMethod = "quicksort";
3261                         break;
3262                 case TSS_SORTEDONTAPE:
3263                         *sortMethod = "external sort";
3264                         break;
3265                 case TSS_FINALMERGE:
3266                         *sortMethod = "external merge";
3267                         break;
3268                 default:
3269                         *sortMethod = "still in progress";
3270                         break;
3271         }
3272 }
3273
3274
3275 /*
3276  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
3277  *
3278  * Compare two SortTuples.  If checkIndex is true, use the tuple index
3279  * as the front of the sort key; otherwise, no.
3280  *
3281  * Note that for checkIndex callers, the heap invariant is never
3282  * maintained beyond the first run, and so there are no COMPARETUP()
3283  * calls needed to distinguish tuples in HEAP_RUN_NEXT.
3284  */
3285
3286 #define HEAPCOMPARE(tup1,tup2) \
3287         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex || \
3288                                         (tup1)->tupindex == HEAP_RUN_NEXT) ? \
3289          ((tup1)->tupindex) - ((tup2)->tupindex) : \
3290          COMPARETUP(state, tup1, tup2))
3291
3292 /*
3293  * Convert the existing unordered array of SortTuples to a bounded heap,
3294  * discarding all but the smallest "state->bound" tuples.
3295  *
3296  * When working with a bounded heap, we want to keep the largest entry
3297  * at the root (array entry zero), instead of the smallest as in the normal
3298  * sort case.  This allows us to discard the largest entry cheaply.
3299  * Therefore, we temporarily reverse the sort direction.
3300  *
3301  * We assume that all entries in a bounded heap will always have tupindex
3302  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
3303  * the direction of comparison for tupindexes.
3304  */
3305 static void
3306 make_bounded_heap(Tuplesortstate *state)
3307 {
3308         int                     tupcount = state->memtupcount;
3309         int                     i;
3310
3311         Assert(state->status == TSS_INITIAL);
3312         Assert(state->bounded);
3313         Assert(tupcount >= state->bound);
3314
3315         /* Reverse sort direction so largest entry will be at root */
3316         reversedirection(state);
3317
3318         state->memtupcount = 0;         /* make the heap empty */
3319         for (i = 0; i < tupcount; i++)
3320         {
3321                 if (state->memtupcount < state->bound)
3322                 {
3323                         /* Insert next tuple into heap */
3324                         /* Must copy source tuple to avoid possible overwrite */
3325                         SortTuple       stup = state->memtuples[i];
3326
3327                         stup.tupindex = 0;      /* not used */
3328                         tuplesort_heap_insert(state, &stup, false);
3329                 }
3330                 else
3331                 {
3332                         /*
3333                          * The heap is full.  Replace the largest entry with the new
3334                          * tuple, or just discard it, if it's larger than anything already
3335                          * in the heap.
3336                          */
3337                         if (COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
3338                         {
3339                                 free_sort_tuple(state, &state->memtuples[i]);
3340                                 CHECK_FOR_INTERRUPTS();
3341                         }
3342                         else
3343                                 tuplesort_heap_replace_top(state, &state->memtuples[i], false);
3344                 }
3345         }
3346
3347         Assert(state->memtupcount == state->bound);
3348         state->status = TSS_BOUNDED;
3349 }
3350
3351 /*
3352  * Convert the bounded heap to a properly-sorted array
3353  */
3354 static void
3355 sort_bounded_heap(Tuplesortstate *state)
3356 {
3357         int                     tupcount = state->memtupcount;
3358
3359         Assert(state->status == TSS_BOUNDED);
3360         Assert(state->bounded);
3361         Assert(tupcount == state->bound);
3362
3363         /*
3364          * We can unheapify in place because each delete-top call will remove the
3365          * largest entry, which we can promptly store in the newly freed slot at
3366          * the end.  Once we're down to a single-entry heap, we're done.
3367          */
3368         while (state->memtupcount > 1)
3369         {
3370                 SortTuple       stup = state->memtuples[0];
3371
3372                 /* this sifts-up the next-largest entry and decreases memtupcount */
3373                 tuplesort_heap_delete_top(state, false);
3374                 state->memtuples[state->memtupcount] = stup;
3375         }
3376         state->memtupcount = tupcount;
3377
3378         /*
3379          * Reverse sort direction back to the original state.  This is not
3380          * actually necessary but seems like a good idea for tidiness.
3381          */
3382         reversedirection(state);
3383
3384         state->status = TSS_SORTEDINMEM;
3385         state->boundUsed = true;
3386 }
3387
3388 /*
3389  * Sort all memtuples using specialized qsort() routines.
3390  *
3391  * Quicksort is used for small in-memory sorts.  Quicksort is also generally
3392  * preferred to replacement selection for generating runs during external sort
3393  * operations, although replacement selection is sometimes used for the first
3394  * run.
3395  */
3396 static void
3397 tuplesort_sort_memtuples(Tuplesortstate *state)
3398 {
3399         if (state->memtupcount > 1)
3400         {
3401                 /* Can we use the single-key sort function? */
3402                 if (state->onlyKey != NULL)
3403                         qsort_ssup(state->memtuples, state->memtupcount,
3404                                            state->onlyKey);
3405                 else
3406                         qsort_tuple(state->memtuples,
3407                                                 state->memtupcount,
3408                                                 state->comparetup,
3409                                                 state);
3410         }
3411 }
3412
3413 /*
3414  * Insert a new tuple into an empty or existing heap, maintaining the
3415  * heap invariant.  Caller is responsible for ensuring there's room.
3416  *
3417  * Note: For some callers, tuple points to a memtuples[] entry above the
3418  * end of the heap.  This is safe as long as it's not immediately adjacent
3419  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
3420  * is, it might get overwritten before being moved into the heap!
3421  */
3422 static void
3423 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
3424                                           bool checkIndex)
3425 {
3426         SortTuple  *memtuples;
3427         int                     j;
3428
3429         memtuples = state->memtuples;
3430         Assert(state->memtupcount < state->memtupsize);
3431         Assert(!checkIndex || tuple->tupindex == RUN_FIRST);
3432
3433         CHECK_FOR_INTERRUPTS();
3434
3435         /*
3436          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
3437          * using 1-based array indexes, not 0-based.
3438          */
3439         j = state->memtupcount++;
3440         while (j > 0)
3441         {
3442                 int                     i = (j - 1) >> 1;
3443
3444                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
3445                         break;
3446                 memtuples[j] = memtuples[i];
3447                 j = i;
3448         }
3449         memtuples[j] = *tuple;
3450 }
3451
3452 /*
3453  * Remove the tuple at state->memtuples[0] from the heap.  Decrement
3454  * memtupcount, and sift up to maintain the heap invariant.
3455  *
3456  * The caller has already free'd the tuple the top node points to,
3457  * if necessary.
3458  */
3459 static void
3460 tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex)
3461 {
3462         SortTuple  *memtuples = state->memtuples;
3463         SortTuple  *tuple;
3464
3465         Assert(!checkIndex || state->currentRun == RUN_FIRST);
3466         if (--state->memtupcount <= 0)
3467                 return;
3468
3469         /*
3470          * Remove the last tuple in the heap, and re-insert it, by replacing the
3471          * current top node with it.
3472          */
3473         tuple = &memtuples[state->memtupcount];
3474         tuplesort_heap_replace_top(state, tuple, checkIndex);
3475 }
3476
3477 /*
3478  * Replace the tuple at state->memtuples[0] with a new tuple.  Sift up to
3479  * maintain the heap invariant.
3480  *
3481  * This corresponds to Knuth's "sift-up" algorithm (Algorithm 5.2.3H,
3482  * Heapsort, steps H3-H8).
3483  */
3484 static void
3485 tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
3486                                                    bool checkIndex)
3487 {
3488         SortTuple  *memtuples = state->memtuples;
3489         int                     i,
3490                                 n;
3491
3492         Assert(!checkIndex || state->currentRun == RUN_FIRST);
3493         Assert(state->memtupcount >= 1);
3494
3495         CHECK_FOR_INTERRUPTS();
3496
3497         n = state->memtupcount;
3498         i = 0;                                          /* i is where the "hole" is */
3499         for (;;)
3500         {
3501                 int                     j = 2 * i + 1;
3502
3503                 if (j >= n)
3504                         break;
3505                 if (j + 1 < n &&
3506                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3507                         j++;
3508                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3509                         break;
3510                 memtuples[i] = memtuples[j];
3511                 i = j;
3512         }
3513         memtuples[i] = *tuple;
3514 }
3515
3516 /*
3517  * Function to reverse the sort direction from its current state
3518  *
3519  * It is not safe to call this when performing hash tuplesorts
3520  */
3521 static void
3522 reversedirection(Tuplesortstate *state)
3523 {
3524         SortSupport sortKey = state->sortKeys;
3525         int                     nkey;
3526
3527         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3528         {
3529                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3530                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3531         }
3532 }
3533
3534
3535 /*
3536  * Tape interface routines
3537  */
3538
3539 static unsigned int
3540 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3541 {
3542         unsigned int len;
3543
3544         if (LogicalTapeRead(state->tapeset, tapenum,
3545                                                 &len, sizeof(len)) != sizeof(len))
3546                 elog(ERROR, "unexpected end of tape");
3547         if (len == 0 && !eofOK)
3548                 elog(ERROR, "unexpected end of data");
3549         return len;
3550 }
3551
3552 static void
3553 markrunend(Tuplesortstate *state, int tapenum)
3554 {
3555         unsigned int len = 0;
3556
3557         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3558 }
3559
3560 /*
3561  * Get memory for tuple from within READTUP() routine.
3562  *
3563  * We use next free slot from the slab allocator, or palloc() if the tuple
3564  * is too large for that.
3565  */
3566 static void *
3567 readtup_alloc(Tuplesortstate *state, Size tuplen)
3568 {
3569         SlabSlot   *buf;
3570
3571         /*
3572          * We pre-allocate enough slots in the slab arena that we should never run
3573          * out.
3574          */
3575         Assert(state->slabFreeHead);
3576
3577         if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
3578                 return MemoryContextAlloc(state->sortcontext, tuplen);
3579         else
3580         {
3581                 buf = state->slabFreeHead;
3582                 /* Reuse this slot */
3583                 state->slabFreeHead = buf->nextfree;
3584
3585                 return buf;
3586         }
3587 }
3588
3589
3590 /*
3591  * Routines specialized for HeapTuple (actually MinimalTuple) case
3592  */
3593
3594 static int
3595 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3596 {
3597         SortSupport sortKey = state->sortKeys;
3598         HeapTupleData ltup;
3599         HeapTupleData rtup;
3600         TupleDesc       tupDesc;
3601         int                     nkey;
3602         int32           compare;
3603         AttrNumber      attno;
3604         Datum           datum1,
3605                                 datum2;
3606         bool            isnull1,
3607                                 isnull2;
3608
3609
3610         /* Compare the leading sort key */
3611         compare = ApplySortComparator(a->datum1, a->isnull1,
3612                                                                   b->datum1, b->isnull1,
3613                                                                   sortKey);
3614         if (compare != 0)
3615                 return compare;
3616
3617         /* Compare additional sort keys */
3618         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3619         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3620         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3621         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3622         tupDesc = state->tupDesc;
3623
3624         if (sortKey->abbrev_converter)
3625         {
3626                 attno = sortKey->ssup_attno;
3627
3628                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3629                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3630
3631                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3632                                                                                                 datum2, isnull2,
3633                                                                                                 sortKey);
3634                 if (compare != 0)
3635                         return compare;
3636         }
3637
3638         sortKey++;
3639         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3640         {
3641                 attno = sortKey->ssup_attno;
3642
3643                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3644                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3645
3646                 compare = ApplySortComparator(datum1, isnull1,
3647                                                                           datum2, isnull2,
3648                                                                           sortKey);
3649                 if (compare != 0)
3650                         return compare;
3651         }
3652
3653         return 0;
3654 }
3655
3656 static void
3657 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3658 {
3659         /*
3660          * We expect the passed "tup" to be a TupleTableSlot, and form a
3661          * MinimalTuple using the exported interface for that.
3662          */
3663         TupleTableSlot *slot = (TupleTableSlot *) tup;
3664         Datum           original;
3665         MinimalTuple tuple;
3666         HeapTupleData htup;
3667         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3668
3669         /* copy the tuple into sort storage */
3670         tuple = ExecCopySlotMinimalTuple(slot);
3671         stup->tuple = (void *) tuple;
3672         USEMEM(state, GetMemoryChunkSpace(tuple));
3673         /* set up first-column key value */
3674         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3675         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3676         original = heap_getattr(&htup,
3677                                                         state->sortKeys[0].ssup_attno,
3678                                                         state->tupDesc,
3679                                                         &stup->isnull1);
3680
3681         MemoryContextSwitchTo(oldcontext);
3682
3683         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3684         {
3685                 /*
3686                  * Store ordinary Datum representation, or NULL value.  If there is a
3687                  * converter it won't expect NULL values, and cost model is not
3688                  * required to account for NULL, so in that case we avoid calling
3689                  * converter and just set datum1 to zeroed representation (to be
3690                  * consistent, and to support cheap inequality tests for NULL
3691                  * abbreviated keys).
3692                  */
3693                 stup->datum1 = original;
3694         }
3695         else if (!consider_abort_common(state))
3696         {
3697                 /* Store abbreviated key representation */
3698                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3699                                                                                                                  state->sortKeys);
3700         }
3701         else
3702         {
3703                 /* Abort abbreviation */
3704                 int                     i;
3705
3706                 stup->datum1 = original;
3707
3708                 /*
3709                  * Set state to be consistent with never trying abbreviation.
3710                  *
3711                  * Alter datum1 representation in already-copied tuples, so as to
3712                  * ensure a consistent representation (current tuple was just
3713                  * handled).  It does not matter if some dumped tuples are already
3714                  * sorted on tape, since serialized tuples lack abbreviated keys
3715                  * (TSS_BUILDRUNS state prevents control reaching here in any case).
3716                  */
3717                 for (i = 0; i < state->memtupcount; i++)
3718                 {
3719                         SortTuple  *mtup = &state->memtuples[i];
3720
3721                         htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3722                                 MINIMAL_TUPLE_OFFSET;
3723                         htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3724                                                                                          MINIMAL_TUPLE_OFFSET);
3725
3726                         mtup->datum1 = heap_getattr(&htup,
3727                                                                                 state->sortKeys[0].ssup_attno,
3728                                                                                 state->tupDesc,
3729                                                                                 &mtup->isnull1);
3730                 }
3731         }
3732 }
3733
3734 static void
3735 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3736 {
3737         MinimalTuple tuple = (MinimalTuple) stup->tuple;
3738
3739         /* the part of the MinimalTuple we'll write: */
3740         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3741         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3742
3743         /* total on-disk footprint: */
3744         unsigned int tuplen = tupbodylen + sizeof(int);
3745
3746         LogicalTapeWrite(state->tapeset, tapenum,
3747                                          (void *) &tuplen, sizeof(tuplen));
3748         LogicalTapeWrite(state->tapeset, tapenum,
3749                                          (void *) tupbody, tupbodylen);
3750         if (state->randomAccess)        /* need trailing length word? */
3751                 LogicalTapeWrite(state->tapeset, tapenum,
3752                                                  (void *) &tuplen, sizeof(tuplen));
3753
3754         if (!state->slabAllocatorUsed)
3755         {
3756                 FREEMEM(state, GetMemoryChunkSpace(tuple));
3757                 heap_free_minimal_tuple(tuple);
3758         }
3759 }
3760
3761 static void
3762 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3763                          int tapenum, unsigned int len)
3764 {
3765         unsigned int tupbodylen = len - sizeof(int);
3766         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3767         MinimalTuple tuple = (MinimalTuple) readtup_alloc(state, tuplen);
3768         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3769         HeapTupleData htup;
3770
3771         /* read in the tuple proper */
3772         tuple->t_len = tuplen;
3773         LogicalTapeReadExact(state->tapeset, tapenum,
3774                                                  tupbody, tupbodylen);
3775         if (state->randomAccess)        /* need trailing length word? */
3776                 LogicalTapeReadExact(state->tapeset, tapenum,
3777                                                          &tuplen, sizeof(tuplen));
3778         stup->tuple = (void *) tuple;
3779         /* set up first-column key value */
3780         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3781         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3782         stup->datum1 = heap_getattr(&htup,
3783                                                                 state->sortKeys[0].ssup_attno,
3784                                                                 state->tupDesc,
3785                                                                 &stup->isnull1);
3786 }
3787
3788 /*
3789  * Routines specialized for the CLUSTER case (HeapTuple data, with
3790  * comparisons per a btree index definition)
3791  */
3792
3793 static int
3794 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3795                                    Tuplesortstate *state)
3796 {
3797         SortSupport sortKey = state->sortKeys;
3798         HeapTuple       ltup;
3799         HeapTuple       rtup;
3800         TupleDesc       tupDesc;
3801         int                     nkey;
3802         int32           compare;
3803         Datum           datum1,
3804                                 datum2;
3805         bool            isnull1,
3806                                 isnull2;
3807         AttrNumber      leading = state->indexInfo->ii_KeyAttrNumbers[0];
3808
3809         /* Be prepared to compare additional sort keys */
3810         ltup = (HeapTuple) a->tuple;
3811         rtup = (HeapTuple) b->tuple;
3812         tupDesc = state->tupDesc;
3813
3814         /* Compare the leading sort key, if it's simple */
3815         if (leading != 0)
3816         {
3817                 compare = ApplySortComparator(a->datum1, a->isnull1,
3818                                                                           b->datum1, b->isnull1,
3819                                                                           sortKey);
3820                 if (compare != 0)
3821                         return compare;
3822
3823                 if (sortKey->abbrev_converter)
3824                 {
3825                         datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3826                         datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3827
3828                         compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3829                                                                                                         datum2, isnull2,
3830                                                                                                         sortKey);
3831                 }
3832                 if (compare != 0 || state->nKeys == 1)
3833                         return compare;
3834                 /* Compare additional columns the hard way */
3835                 sortKey++;
3836                 nkey = 1;
3837         }
3838         else
3839         {
3840                 /* Must compare all keys the hard way */
3841                 nkey = 0;
3842         }
3843
3844         if (state->indexInfo->ii_Expressions == NULL)
3845         {
3846                 /* If not expression index, just compare the proper heap attrs */
3847
3848                 for (; nkey < state->nKeys; nkey++, sortKey++)
3849                 {
3850                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3851
3852                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3853                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3854
3855                         compare = ApplySortComparator(datum1, isnull1,
3856                                                                                   datum2, isnull2,
3857                                                                                   sortKey);
3858                         if (compare != 0)
3859                                 return compare;
3860                 }
3861         }
3862         else
3863         {
3864                 /*
3865                  * In the expression index case, compute the whole index tuple and
3866                  * then compare values.  It would perhaps be faster to compute only as
3867                  * many columns as we need to compare, but that would require
3868                  * duplicating all the logic in FormIndexDatum.
3869                  */
3870                 Datum           l_index_values[INDEX_MAX_KEYS];
3871                 bool            l_index_isnull[INDEX_MAX_KEYS];
3872                 Datum           r_index_values[INDEX_MAX_KEYS];
3873                 bool            r_index_isnull[INDEX_MAX_KEYS];
3874                 TupleTableSlot *ecxt_scantuple;
3875
3876                 /* Reset context each time to prevent memory leakage */
3877                 ResetPerTupleExprContext(state->estate);
3878
3879                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3880
3881                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3882                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3883                                            l_index_values, l_index_isnull);
3884
3885                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3886                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3887                                            r_index_values, r_index_isnull);
3888
3889                 for (; nkey < state->nKeys; nkey++, sortKey++)
3890                 {
3891                         compare = ApplySortComparator(l_index_values[nkey],
3892                                                                                   l_index_isnull[nkey],
3893                                                                                   r_index_values[nkey],
3894                                                                                   r_index_isnull[nkey],
3895                                                                                   sortKey);
3896                         if (compare != 0)
3897                                 return compare;
3898                 }
3899         }
3900
3901         return 0;
3902 }
3903
3904 static void
3905 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3906 {
3907         HeapTuple       tuple = (HeapTuple) tup;
3908         Datum           original;
3909         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3910
3911         /* copy the tuple into sort storage */
3912         tuple = heap_copytuple(tuple);
3913         stup->tuple = (void *) tuple;
3914         USEMEM(state, GetMemoryChunkSpace(tuple));
3915
3916         MemoryContextSwitchTo(oldcontext);
3917
3918         /*
3919          * set up first-column key value, and potentially abbreviate, if it's a
3920          * simple column
3921          */
3922         if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3923                 return;
3924
3925         original = heap_getattr(tuple,
3926                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3927                                                         state->tupDesc,
3928                                                         &stup->isnull1);
3929
3930         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3931         {
3932                 /*
3933                  * Store ordinary Datum representation, or NULL value.  If there is a
3934                  * converter it won't expect NULL values, and cost model is not
3935                  * required to account for NULL, so in that case we avoid calling
3936                  * converter and just set datum1 to zeroed representation (to be
3937                  * consistent, and to support cheap inequality tests for NULL
3938                  * abbreviated keys).
3939                  */
3940                 stup->datum1 = original;
3941         }
3942         else if (!consider_abort_common(state))
3943         {
3944                 /* Store abbreviated key representation */
3945                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3946                                                                                                                  state->sortKeys);
3947         }
3948         else
3949         {
3950                 /* Abort abbreviation */
3951                 int                     i;
3952
3953                 stup->datum1 = original;
3954
3955                 /*
3956                  * Set state to be consistent with never trying abbreviation.
3957                  *
3958                  * Alter datum1 representation in already-copied tuples, so as to
3959                  * ensure a consistent representation (current tuple was just
3960                  * handled).  It does not matter if some dumped tuples are already
3961                  * sorted on tape, since serialized tuples lack abbreviated keys
3962                  * (TSS_BUILDRUNS state prevents control reaching here in any case).
3963                  */
3964                 for (i = 0; i < state->memtupcount; i++)
3965                 {
3966                         SortTuple  *mtup = &state->memtuples[i];
3967
3968                         tuple = (HeapTuple) mtup->tuple;
3969                         mtup->datum1 = heap_getattr(tuple,
3970                                                                           state->indexInfo->ii_KeyAttrNumbers[0],
3971                                                                                 state->tupDesc,
3972                                                                                 &mtup->isnull1);
3973                 }
3974         }
3975 }
3976
3977 static void
3978 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3979 {
3980         HeapTuple       tuple = (HeapTuple) stup->tuple;
3981         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3982
3983         /* We need to store t_self, but not other fields of HeapTupleData */
3984         LogicalTapeWrite(state->tapeset, tapenum,
3985                                          &tuplen, sizeof(tuplen));
3986         LogicalTapeWrite(state->tapeset, tapenum,
3987                                          &tuple->t_self, sizeof(ItemPointerData));
3988         LogicalTapeWrite(state->tapeset, tapenum,
3989                                          tuple->t_data, tuple->t_len);
3990         if (state->randomAccess)        /* need trailing length word? */
3991                 LogicalTapeWrite(state->tapeset, tapenum,
3992                                                  &tuplen, sizeof(tuplen));
3993
3994         if (!state->slabAllocatorUsed)
3995         {
3996                 FREEMEM(state, GetMemoryChunkSpace(tuple));
3997                 heap_freetuple(tuple);
3998         }
3999 }
4000
4001 static void
4002 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
4003                                 int tapenum, unsigned int tuplen)
4004 {
4005         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
4006         HeapTuple       tuple = (HeapTuple) readtup_alloc(state,
4007                                                                                                   t_len + HEAPTUPLESIZE);
4008
4009         /* Reconstruct the HeapTupleData header */
4010         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
4011         tuple->t_len = t_len;
4012         LogicalTapeReadExact(state->tapeset, tapenum,
4013                                                  &tuple->t_self, sizeof(ItemPointerData));
4014         /* We don't currently bother to reconstruct t_tableOid */
4015         tuple->t_tableOid = InvalidOid;
4016         /* Read in the tuple body */
4017         LogicalTapeReadExact(state->tapeset, tapenum,
4018                                                  tuple->t_data, tuple->t_len);
4019         if (state->randomAccess)        /* need trailing length word? */
4020                 LogicalTapeReadExact(state->tapeset, tapenum,
4021                                                          &tuplen, sizeof(tuplen));
4022         stup->tuple = (void *) tuple;
4023         /* set up first-column key value, if it's a simple column */
4024         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
4025                 stup->datum1 = heap_getattr(tuple,
4026                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
4027                                                                         state->tupDesc,
4028                                                                         &stup->isnull1);
4029 }
4030
4031 /*
4032  * Routines specialized for IndexTuple case
4033  *
4034  * The btree and hash cases require separate comparison functions, but the
4035  * IndexTuple representation is the same so the copy/write/read support
4036  * functions can be shared.
4037  */
4038
4039 static int
4040 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
4041                                            Tuplesortstate *state)
4042 {
4043         /*
4044          * This is similar to comparetup_heap(), but expects index tuples.  There
4045          * is also special handling for enforcing uniqueness, and special
4046          * treatment for equal keys at the end.
4047          */
4048         SortSupport sortKey = state->sortKeys;
4049         IndexTuple      tuple1;
4050         IndexTuple      tuple2;
4051         int                     keysz;
4052         TupleDesc       tupDes;
4053         bool            equal_hasnull = false;
4054         int                     nkey;
4055         int32           compare;
4056         Datum           datum1,
4057                                 datum2;
4058         bool            isnull1,
4059                                 isnull2;
4060
4061
4062         /* Compare the leading sort key */
4063         compare = ApplySortComparator(a->datum1, a->isnull1,
4064                                                                   b->datum1, b->isnull1,
4065                                                                   sortKey);
4066         if (compare != 0)
4067                 return compare;
4068
4069         /* Compare additional sort keys */
4070         tuple1 = (IndexTuple) a->tuple;
4071         tuple2 = (IndexTuple) b->tuple;
4072         keysz = state->nKeys;
4073         tupDes = RelationGetDescr(state->indexRel);
4074
4075         if (sortKey->abbrev_converter)
4076         {
4077                 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
4078                 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
4079
4080                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4081                                                                                                 datum2, isnull2,
4082                                                                                                 sortKey);
4083                 if (compare != 0)
4084                         return compare;
4085         }
4086
4087         /* they are equal, so we only need to examine one null flag */
4088         if (a->isnull1)
4089                 equal_hasnull = true;
4090
4091         sortKey++;
4092         for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4093         {
4094                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
4095                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
4096
4097                 compare = ApplySortComparator(datum1, isnull1,
4098                                                                           datum2, isnull2,
4099                                                                           sortKey);
4100                 if (compare != 0)
4101                         return compare;         /* done when we find unequal attributes */
4102
4103                 /* they are equal, so we only need to examine one null flag */
4104                 if (isnull1)
4105                         equal_hasnull = true;
4106         }
4107
4108         /*
4109          * If btree has asked us to enforce uniqueness, complain if two equal
4110          * tuples are detected (unless there was at least one NULL field).
4111          *
4112          * It is sufficient to make the test here, because if two tuples are equal
4113          * they *must* get compared at some stage of the sort --- otherwise the
4114          * sort algorithm wouldn't have checked whether one must appear before the
4115          * other.
4116          */
4117         if (state->enforceUnique && !equal_hasnull)
4118         {
4119                 Datum           values[INDEX_MAX_KEYS];
4120                 bool            isnull[INDEX_MAX_KEYS];
4121                 char       *key_desc;
4122
4123                 /*
4124                  * Some rather brain-dead implementations of qsort (such as the one in
4125                  * QNX 4) will sometimes call the comparison routine to compare a
4126                  * value to itself, but we always use our own implementation, which
4127                  * does not.
4128                  */
4129                 Assert(tuple1 != tuple2);
4130
4131                 index_deform_tuple(tuple1, tupDes, values, isnull);
4132
4133                 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
4134
4135                 ereport(ERROR,
4136                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
4137                                  errmsg("could not create unique index \"%s\"",
4138                                                 RelationGetRelationName(state->indexRel)),
4139                                  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
4140                                  errdetail("Duplicate keys exist."),
4141                                  errtableconstraint(state->heapRel,
4142                                                                  RelationGetRelationName(state->indexRel))));
4143         }
4144
4145         /*
4146          * If key values are equal, we sort on ItemPointer.  This does not affect
4147          * validity of the finished index, but it may be useful to have index
4148          * scans in physical order.
4149          */
4150         {
4151                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4152                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4153
4154                 if (blk1 != blk2)
4155                         return (blk1 < blk2) ? -1 : 1;
4156         }
4157         {
4158                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4159                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4160
4161                 if (pos1 != pos2)
4162                         return (pos1 < pos2) ? -1 : 1;
4163         }
4164
4165         return 0;
4166 }
4167
4168 static int
4169 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
4170                                           Tuplesortstate *state)
4171 {
4172         uint32          hash1;
4173         uint32          hash2;
4174         IndexTuple      tuple1;
4175         IndexTuple      tuple2;
4176
4177         /*
4178          * Fetch hash keys and mask off bits we don't want to sort by. We know
4179          * that the first column of the index tuple is the hash key.
4180          */
4181         Assert(!a->isnull1);
4182         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
4183         Assert(!b->isnull1);
4184         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
4185
4186         if (hash1 > hash2)
4187                 return 1;
4188         else if (hash1 < hash2)
4189                 return -1;
4190
4191         /*
4192          * If hash values are equal, we sort on ItemPointer.  This does not affect
4193          * validity of the finished index, but it may be useful to have index
4194          * scans in physical order.
4195          */
4196         tuple1 = (IndexTuple) a->tuple;
4197         tuple2 = (IndexTuple) b->tuple;
4198
4199         {
4200                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4201                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4202
4203                 if (blk1 != blk2)
4204                         return (blk1 < blk2) ? -1 : 1;
4205         }
4206         {
4207                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4208                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4209
4210                 if (pos1 != pos2)
4211                         return (pos1 < pos2) ? -1 : 1;
4212         }
4213
4214         return 0;
4215 }
4216
4217 static void
4218 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
4219 {
4220         IndexTuple      tuple = (IndexTuple) tup;
4221         unsigned int tuplen = IndexTupleSize(tuple);
4222         IndexTuple      newtuple;
4223         Datum           original;
4224
4225         /* copy the tuple into sort storage */
4226         newtuple = (IndexTuple) MemoryContextAlloc(state->tuplecontext, tuplen);
4227         memcpy(newtuple, tuple, tuplen);
4228         USEMEM(state, GetMemoryChunkSpace(newtuple));
4229         stup->tuple = (void *) newtuple;
4230         /* set up first-column key value */
4231         original = index_getattr(newtuple,
4232                                                          1,
4233                                                          RelationGetDescr(state->indexRel),
4234                                                          &stup->isnull1);
4235
4236         if (!state->sortKeys->abbrev_converter || stup->isnull1)
4237         {
4238                 /*
4239                  * Store ordinary Datum representation, or NULL value.  If there is a
4240                  * converter it won't expect NULL values, and cost model is not
4241                  * required to account for NULL, so in that case we avoid calling
4242                  * converter and just set datum1 to zeroed representation (to be
4243                  * consistent, and to support cheap inequality tests for NULL
4244                  * abbreviated keys).
4245                  */
4246                 stup->datum1 = original;
4247         }
4248         else if (!consider_abort_common(state))
4249         {
4250                 /* Store abbreviated key representation */
4251                 stup->datum1 = state->sortKeys->abbrev_converter(original,
4252                                                                                                                  state->sortKeys);
4253         }
4254         else
4255         {
4256                 /* Abort abbreviation */
4257                 int                     i;
4258
4259                 stup->datum1 = original;
4260
4261                 /*
4262                  * Set state to be consistent with never trying abbreviation.
4263                  *
4264                  * Alter datum1 representation in already-copied tuples, so as to
4265                  * ensure a consistent representation (current tuple was just
4266                  * handled).  It does not matter if some dumped tuples are already
4267                  * sorted on tape, since serialized tuples lack abbreviated keys
4268                  * (TSS_BUILDRUNS state prevents control reaching here in any case).
4269                  */
4270                 for (i = 0; i < state->memtupcount; i++)
4271                 {
4272                         SortTuple  *mtup = &state->memtuples[i];
4273
4274                         tuple = (IndexTuple) mtup->tuple;
4275                         mtup->datum1 = index_getattr(tuple,
4276                                                                                  1,
4277                                                                                  RelationGetDescr(state->indexRel),
4278                                                                                  &mtup->isnull1);
4279                 }
4280         }
4281 }
4282
4283 static void
4284 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
4285 {
4286         IndexTuple      tuple = (IndexTuple) stup->tuple;
4287         unsigned int tuplen;
4288
4289         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
4290         LogicalTapeWrite(state->tapeset, tapenum,
4291                                          (void *) &tuplen, sizeof(tuplen));
4292         LogicalTapeWrite(state->tapeset, tapenum,
4293                                          (void *) tuple, IndexTupleSize(tuple));
4294         if (state->randomAccess)        /* need trailing length word? */
4295                 LogicalTapeWrite(state->tapeset, tapenum,
4296                                                  (void *) &tuplen, sizeof(tuplen));
4297
4298         if (!state->slabAllocatorUsed)
4299         {
4300                 FREEMEM(state, GetMemoryChunkSpace(tuple));
4301                 pfree(tuple);
4302         }
4303 }
4304
4305 static void
4306 readtup_index(Tuplesortstate *state, SortTuple *stup,
4307                           int tapenum, unsigned int len)
4308 {
4309         unsigned int tuplen = len - sizeof(unsigned int);
4310         IndexTuple      tuple = (IndexTuple) readtup_alloc(state, tuplen);
4311
4312         LogicalTapeReadExact(state->tapeset, tapenum,
4313                                                  tuple, tuplen);
4314         if (state->randomAccess)        /* need trailing length word? */
4315                 LogicalTapeReadExact(state->tapeset, tapenum,
4316                                                          &tuplen, sizeof(tuplen));
4317         stup->tuple = (void *) tuple;
4318         /* set up first-column key value */
4319         stup->datum1 = index_getattr(tuple,
4320                                                                  1,
4321                                                                  RelationGetDescr(state->indexRel),
4322                                                                  &stup->isnull1);
4323 }
4324
4325 /*
4326  * Routines specialized for DatumTuple case
4327  */
4328
4329 static int
4330 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
4331 {
4332         int                     compare;
4333
4334         compare = ApplySortComparator(a->datum1, a->isnull1,
4335                                                                   b->datum1, b->isnull1,
4336                                                                   state->sortKeys);
4337         if (compare != 0)
4338                 return compare;
4339
4340         /* if we have abbreviations, then "tuple" has the original value */
4341
4342         if (state->sortKeys->abbrev_converter)
4343                 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
4344                                                                            PointerGetDatum(b->tuple), b->isnull1,
4345                                                                                                 state->sortKeys);
4346
4347         return compare;
4348 }
4349
4350 static void
4351 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
4352 {
4353         /* Not currently needed */
4354         elog(ERROR, "copytup_datum() should not be called");
4355 }
4356
4357 static void
4358 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
4359 {
4360         void       *waddr;
4361         unsigned int tuplen;
4362         unsigned int writtenlen;
4363
4364         if (stup->isnull1)
4365         {
4366                 waddr = NULL;
4367                 tuplen = 0;
4368         }
4369         else if (!state->tuples)
4370         {
4371                 waddr = &stup->datum1;
4372                 tuplen = sizeof(Datum);
4373         }
4374         else
4375         {
4376                 waddr = stup->tuple;
4377                 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
4378                 Assert(tuplen != 0);
4379         }
4380
4381         writtenlen = tuplen + sizeof(unsigned int);
4382
4383         LogicalTapeWrite(state->tapeset, tapenum,
4384                                          (void *) &writtenlen, sizeof(writtenlen));
4385         LogicalTapeWrite(state->tapeset, tapenum,
4386                                          waddr, tuplen);
4387         if (state->randomAccess)        /* need trailing length word? */
4388                 LogicalTapeWrite(state->tapeset, tapenum,
4389                                                  (void *) &writtenlen, sizeof(writtenlen));
4390
4391         if (!state->slabAllocatorUsed && stup->tuple)
4392         {
4393                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4394                 pfree(stup->tuple);
4395         }
4396 }
4397
4398 static void
4399 readtup_datum(Tuplesortstate *state, SortTuple *stup,
4400                           int tapenum, unsigned int len)
4401 {
4402         unsigned int tuplen = len - sizeof(unsigned int);
4403
4404         if (tuplen == 0)
4405         {
4406                 /* it's NULL */
4407                 stup->datum1 = (Datum) 0;
4408                 stup->isnull1 = true;
4409                 stup->tuple = NULL;
4410         }
4411         else if (!state->tuples)
4412         {
4413                 Assert(tuplen == sizeof(Datum));
4414                 LogicalTapeReadExact(state->tapeset, tapenum,
4415                                                          &stup->datum1, tuplen);
4416                 stup->isnull1 = false;
4417                 stup->tuple = NULL;
4418         }
4419         else
4420         {
4421                 void       *raddr = readtup_alloc(state, tuplen);
4422
4423                 LogicalTapeReadExact(state->tapeset, tapenum,
4424                                                          raddr, tuplen);
4425                 stup->datum1 = PointerGetDatum(raddr);
4426                 stup->isnull1 = false;
4427                 stup->tuple = raddr;
4428         }
4429
4430         if (state->randomAccess)        /* need trailing length word? */
4431                 LogicalTapeReadExact(state->tapeset, tapenum,
4432                                                          &tuplen, sizeof(tuplen));
4433 }
4434
4435 /*
4436  * Convenience routine to free a tuple previously loaded into sort memory
4437  */
4438 static void
4439 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
4440 {
4441         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4442         pfree(stup->tuple);
4443 }