]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Attempt to fix breakage due to declaration following code.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  Historically, we divided the input into sorted runs
15  * using replacement selection, in the form of a priority tree implemented
16  * as a heap (essentially his Algorithm 5.2.3H -- although that strategy is
17  * often avoided altogether), but that can now only happen first the first
18  * run.  We merge the runs using polyphase merge, Knuth's Algorithm
19  * 5.4.2D.  The logical "tapes" used by Algorithm D are implemented by
20  * logtape.c, which avoids space wastage by recycling disk space as soon
21  * as each block is read from its "tape".
22  *
23  * We never form the initial runs using Knuth's recommended replacement
24  * selection data structure (Algorithm 5.4.1R), because it uses a fixed
25  * number of records in memory at all times.  Since we are dealing with
26  * tuples that may vary considerably in size, we want to be able to vary
27  * the number of records kept in memory to ensure full utilization of the
28  * allowed sort memory space.  So, we keep the tuples in a variable-size
29  * heap, with the next record to go out at the top of the heap.  Like
30  * Algorithm 5.4.1R, each record is stored with the run number that it
31  * must go into, and we use (run number, key) as the ordering key for the
32  * heap.  When the run number at the top of the heap changes, we know that
33  * no more records of the prior run are left in the heap.  Note that there
34  * are in practice only ever two distinct run numbers, due to the greatly
35  * reduced use of replacement selection in PostgreSQL 9.6.
36  *
37  * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small
38  * customizations) is only used with the aim of producing just one run,
39  * thereby avoiding all merging.  Only the first run can use replacement
40  * selection, which is why there are now only two possible valid run
41  * numbers, and why heapification is customized to not distinguish between
42  * tuples in the second run (those will be quicksorted).  We generally
43  * prefer a simple hybrid sort-merge strategy, where runs are sorted in much
44  * the same way as the entire input of an internal sort is sorted (using
45  * qsort()).  The replacement_sort_tuples GUC controls the limited remaining
46  * use of replacement selection for the first run.
47  *
48  * There are several reasons to favor a hybrid sort-merge strategy.
49  * Maintaining a priority tree/heap has poor CPU cache characteristics.
50  * Furthermore, the growth in main memory sizes has greatly diminished the
51  * value of having runs that are larger than available memory, even in the
52  * case where there is partially sorted input and runs can be made far
53  * larger by using a heap.  In most cases, a single-pass merge step is all
54  * that is required even when runs are no larger than available memory.
55  * Avoiding multiple merge passes was traditionally considered to be the
56  * major advantage of using replacement selection.
57  *
58  * The approximate amount of memory allowed for any one sort operation
59  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
60  * we absorb tuples and simply store them in an unsorted array as long as
61  * we haven't exceeded workMem.  If we reach the end of the input without
62  * exceeding workMem, we sort the array using qsort() and subsequently return
63  * tuples just by scanning the tuple array sequentially.  If we do exceed
64  * workMem, we begin to emit tuples into sorted runs in temporary tapes.
65  * When tuples are dumped in batch after quicksorting, we begin a new run
66  * with a new output tape (selected per Algorithm D).  After the end of the
67  * input is reached, we dump out remaining tuples in memory into a final run
68  * (or two, when replacement selection is still used), then merge the runs
69  * using Algorithm D.
70  *
71  * When merging runs, we use a heap containing just the frontmost tuple from
72  * each source run; we repeatedly output the smallest tuple and insert the
73  * next tuple from its source tape (if any).  When the heap empties, the merge
74  * is complete.  The basic merge algorithm thus needs very little memory ---
75  * only M tuples for an M-way merge, and M is constrained to a small number.
76  * However, we can still make good use of our full workMem allocation by
77  * pre-reading additional tuples from each source tape.  Without prereading,
78  * our access pattern to the temporary file would be very erratic; on average
79  * we'd read one block from each of M source tapes during the same time that
80  * we're writing M blocks to the output tape, so there is no sequentiality of
81  * access at all, defeating the read-ahead methods used by most Unix kernels.
82  * Worse, the output tape gets written into a very random sequence of blocks
83  * of the temp file, ensuring that things will be even worse when it comes
84  * time to read that tape.  A straightforward merge pass thus ends up doing a
85  * lot of waiting for disk seeks.  We can improve matters by prereading from
86  * each source tape sequentially, loading about workMem/M bytes from each tape
87  * in turn.  Then we run the merge algorithm, writing but not reading until
88  * one of the preloaded tuple series runs out.  Then we switch back to preread
89  * mode, fill memory again, and repeat.  This approach helps to localize both
90  * read and write accesses.
91  *
92  * When the caller requests random access to the sort result, we form
93  * the final sorted run on a logical tape which is then "frozen", so
94  * that we can access it randomly.  When the caller does not need random
95  * access, we return from tuplesort_performsort() as soon as we are down
96  * to one run per logical tape.  The final merge is then performed
97  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
98  * saves one cycle of writing all the data out to disk and reading it in.
99  *
100  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
101  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
102  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
103  * tape drives are expensive beasts, and in particular that there will always
104  * be many more runs than tape drives.  In our implementation a "tape drive"
105  * doesn't cost much more than a few Kb of memory buffers, so we can afford
106  * to have lots of them.  In particular, if we can have as many tape drives
107  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
108  * code we determine the number of tapes M on the basis of workMem: we want
109  * workMem/M to be large enough that we read a fair amount of data each time
110  * we preread from a tape, so as to maintain the locality of access described
111  * above.  Nonetheless, with large workMem we can have many tapes.
112  *
113  *
114  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
115  * Portions Copyright (c) 1994, Regents of the University of California
116  *
117  * IDENTIFICATION
118  *        src/backend/utils/sort/tuplesort.c
119  *
120  *-------------------------------------------------------------------------
121  */
122
123 #include "postgres.h"
124
125 #include <limits.h>
126
127 #include "access/htup_details.h"
128 #include "access/nbtree.h"
129 #include "catalog/index.h"
130 #include "catalog/pg_am.h"
131 #include "commands/tablespace.h"
132 #include "executor/executor.h"
133 #include "miscadmin.h"
134 #include "pg_trace.h"
135 #include "utils/datum.h"
136 #include "utils/logtape.h"
137 #include "utils/lsyscache.h"
138 #include "utils/memutils.h"
139 #include "utils/pg_rusage.h"
140 #include "utils/rel.h"
141 #include "utils/sortsupport.h"
142 #include "utils/tuplesort.h"
143
144
145 /* sort-type codes for sort__start probes */
146 #define HEAP_SORT               0
147 #define INDEX_SORT              1
148 #define DATUM_SORT              2
149 #define CLUSTER_SORT    3
150
151 /* GUC variables */
152 #ifdef TRACE_SORT
153 bool            trace_sort = false;
154 #endif
155
156 #ifdef DEBUG_BOUNDED_SORT
157 bool            optimize_bounded_sort = true;
158 #endif
159
160
161 /*
162  * The objects we actually sort are SortTuple structs.  These contain
163  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
164  * which is a separate palloc chunk --- we assume it is just one chunk and
165  * can be freed by a simple pfree() (except during final on-the-fly merge,
166  * when memory is used in batch).  SortTuples also contain the tuple's
167  * first key column in Datum/nullflag format, and an index integer.
168  *
169  * Storing the first key column lets us save heap_getattr or index_getattr
170  * calls during tuple comparisons.  We could extract and save all the key
171  * columns not just the first, but this would increase code complexity and
172  * overhead, and wouldn't actually save any comparison cycles in the common
173  * case where the first key determines the comparison result.  Note that
174  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
175  *
176  * There is one special case: when the sort support infrastructure provides an
177  * "abbreviated key" representation, where the key is (typically) a pass by
178  * value proxy for a pass by reference type.  In this case, the abbreviated key
179  * is stored in datum1 in place of the actual first key column.
180  *
181  * When sorting single Datums, the data value is represented directly by
182  * datum1/isnull1 for pass by value types (or null values).  If the datatype is
183  * pass-by-reference and isnull1 is false, then "tuple" points to a separately
184  * palloc'd data value, otherwise "tuple" is NULL.  The value of datum1 is then
185  * either the same pointer as "tuple", or is an abbreviated key value as
186  * described above.  Accordingly, "tuple" is always used in preference to
187  * datum1 as the authoritative value for pass-by-reference cases.
188  *
189  * While building initial runs, tupindex holds the tuple's run number.
190  * Historically, the run number could meaningfully distinguish many runs, but
191  * it now only distinguishes RUN_FIRST and HEAP_RUN_NEXT, since replacement
192  * selection is always abandoned after the first run; no other run number
193  * should be represented here.  During merge passes, we re-use it to hold the
194  * input tape number that each tuple in the heap was read from, or to hold the
195  * index of the next tuple pre-read from the same tape in the case of pre-read
196  * entries.  tupindex goes unused if the sort occurs entirely in memory.
197  */
198 typedef struct
199 {
200         void       *tuple;                      /* the tuple itself */
201         Datum           datum1;                 /* value of first key column */
202         bool            isnull1;                /* is first key column NULL? */
203         int                     tupindex;               /* see notes above */
204 } SortTuple;
205
206
207 /*
208  * Possible states of a Tuplesort object.  These denote the states that
209  * persist between calls of Tuplesort routines.
210  */
211 typedef enum
212 {
213         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
214         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
215         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
216         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
217         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
218         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
219 } TupSortStatus;
220
221 /*
222  * Parameters for calculation of number of tapes to use --- see inittapes()
223  * and tuplesort_merge_order().
224  *
225  * In this calculation we assume that each tape will cost us about 3 blocks
226  * worth of buffer space (which is an underestimate for very large data
227  * volumes, but it's probably close enough --- see logtape.c).
228  *
229  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
230  * tape during a preread cycle (see discussion at top of file).
231  */
232 #define MINORDER                6               /* minimum merge order */
233 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
234 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
235
236  /*
237   * Run numbers, used during external sort operations.
238   *
239   * HEAP_RUN_NEXT is only used for SortTuple.tupindex, never state.currentRun.
240   */
241 #define RUN_FIRST               0
242 #define HEAP_RUN_NEXT   INT_MAX
243 #define RUN_SECOND              1
244
245 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
246                                                                                                 Tuplesortstate *state);
247
248 /*
249  * Private state of a Tuplesort operation.
250  */
251 struct Tuplesortstate
252 {
253         TupSortStatus status;           /* enumerated value as shown above */
254         int                     nKeys;                  /* number of columns in sort key */
255         bool            randomAccess;   /* did caller request random access? */
256         bool            bounded;                /* did caller specify a maximum number of
257                                                                  * tuples to return? */
258         bool            boundUsed;              /* true if we made use of a bounded heap */
259         int                     bound;                  /* if bounded, the maximum number of tuples */
260         bool            tuples;                 /* Can SortTuple.tuple ever be set? */
261         int64           availMem;               /* remaining memory available, in bytes */
262         int64           allowedMem;             /* total memory allowed, in bytes */
263         int                     maxTapes;               /* number of tapes (Knuth's T) */
264         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
265         MemoryContext sortcontext;      /* memory context holding most sort data */
266         MemoryContext tuplecontext;     /* sub-context of sortcontext for tuple data */
267         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
268
269         /*
270          * These function pointers decouple the routines that must know what kind
271          * of tuple we are sorting from the routines that don't need to know it.
272          * They are set up by the tuplesort_begin_xxx routines.
273          *
274          * Function to compare two tuples; result is per qsort() convention, ie:
275          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
276          * qsort_arg_comparator.
277          */
278         SortTupleComparator comparetup;
279
280         /*
281          * Function to copy a supplied input tuple into palloc'd space and set up
282          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
283          * state->availMem must be decreased by the amount of space used for the
284          * tuple copy (note the SortTuple struct itself is not counted).
285          */
286         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
287
288         /*
289          * Function to write a stored tuple onto tape.  The representation of the
290          * tuple on tape need not be the same as it is in memory; requirements on
291          * the tape representation are given below.  After writing the tuple,
292          * pfree() the out-of-line data (not the SortTuple struct!), and increase
293          * state->availMem by the amount of memory space thereby released.
294          */
295         void            (*writetup) (Tuplesortstate *state, int tapenum,
296                                                                                  SortTuple *stup);
297
298         /*
299          * Function to read a stored tuple from tape back into memory. 'len' is
300          * the already-read length of the stored tuple.  Create a palloc'd copy,
301          * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
302          * decrease state->availMem by the amount of memory space consumed.
303          */
304         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
305                                                                                 int tapenum, unsigned int len);
306
307         /*
308          * This array holds the tuples now in sort memory.  If we are in state
309          * INITIAL, the tuples are in no particular order; if we are in state
310          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
311          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
312          * H.  (Note that memtupcount only counts the tuples that are part of the
313          * heap --- during merge passes, memtuples[] entries beyond tapeRange are
314          * never in the heap and are used to hold pre-read tuples.)  In state
315          * SORTEDONTAPE, the array is not used.
316          */
317         SortTuple  *memtuples;          /* array of SortTuple structs */
318         int                     memtupcount;    /* number of tuples currently present */
319         int                     memtupsize;             /* allocated length of memtuples array */
320         bool            growmemtuples;  /* memtuples' growth still underway? */
321
322         /*
323          * Memory for tuples is sometimes allocated in batch, rather than
324          * incrementally.  This implies that incremental memory accounting has been
325          * abandoned.  Currently, this only happens for the final on-the-fly merge
326          * step.  Large batch allocations can store tuples (e.g. IndexTuples)
327          * without palloc() fragmentation and other overhead.
328          */
329         bool            batchUsed;
330
331         /*
332          * While building initial runs, this indicates if the replacement
333          * selection strategy is in use.  When it isn't, then a simple hybrid
334          * sort-merge strategy is in use instead (runs are quicksorted).
335          */
336         bool            replaceActive;
337
338         /*
339          * While building initial runs, this is the current output run number
340          * (starting at RUN_FIRST).  Afterwards, it is the number of initial
341          * runs we made.
342          */
343         int                     currentRun;
344
345         /*
346          * Unless otherwise noted, all pointer variables below are pointers to
347          * arrays of length maxTapes, holding per-tape data.
348          */
349
350         /*
351          * These variables are only used during merge passes.  mergeactive[i] is
352          * true if we are reading an input run from (actual) tape number i and
353          * have not yet exhausted that run.  mergenext[i] is the memtuples index
354          * of the next pre-read tuple (next to be loaded into the heap) for tape
355          * i, or 0 if we are out of pre-read tuples.  mergelast[i] similarly
356          * points to the last pre-read tuple from each tape.  mergeavailslots[i]
357          * is the number of unused memtuples[] slots reserved for tape i, and
358          * mergeavailmem[i] is the amount of unused space allocated for tape i.
359          * mergefreelist and mergefirstfree keep track of unused locations in the
360          * memtuples[] array.  The memtuples[].tupindex fields link together
361          * pre-read tuples for each tape as well as recycled locations in
362          * mergefreelist. It is OK to use 0 as a null link in these lists, because
363          * memtuples[0] is part of the merge heap and is never a pre-read tuple.
364          */
365         bool       *mergeactive;        /* active input run source? */
366         int                *mergenext;          /* first preread tuple for each source */
367         int                *mergelast;          /* last preread tuple for each source */
368         int                *mergeavailslots;    /* slots left for prereading each tape */
369         int64      *mergeavailmem;      /* availMem for prereading each tape */
370         int                     mergefreelist;  /* head of freelist of recycled slots */
371         int                     mergefirstfree; /* first slot never used in this merge */
372
373         /*
374          * Per-tape batch state, when final on-the-fly merge consumes memory from
375          * just a few large allocations.
376          *
377          * Aside from the general benefits of performing fewer individual retail
378          * palloc() calls, this also helps make merging more cache efficient, since
379          * each tape's tuples must naturally be accessed sequentially (in sorted
380          * order).
381          */
382         int64           spacePerTape;   /* Space (memory) for tuples (not slots) */
383         char      **mergetuples;        /* Each tape's memory allocation */
384         char      **mergecurrent;       /* Current offset into each tape's memory */
385         char      **mergetail;          /* Last item's start point for each tape */
386         char      **mergeoverflow;      /* Retail palloc() "overflow" for each tape */
387
388         /*
389          * Variables for Algorithm D.  Note that destTape is a "logical" tape
390          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
391          * "logical" and "actual" tape numbers straight!
392          */
393         int                     Level;                  /* Knuth's l */
394         int                     destTape;               /* current output tape (Knuth's j, less 1) */
395         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
396         int                *tp_runs;            /* # of real runs on each tape */
397         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
398         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
399         int                     activeTapes;    /* # of active input tapes in merge pass */
400
401         /*
402          * These variables are used after completion of sorting to keep track of
403          * the next tuple to return.  (In the tape case, the tape's current read
404          * position is also critical state.)
405          */
406         int                     result_tape;    /* actual tape number of finished output */
407         int                     current;                /* array index (only used if SORTEDINMEM) */
408         bool            eof_reached;    /* reached EOF (needed for cursors) */
409
410         /* markpos_xxx holds marked position for mark and restore */
411         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
412         int                     markpos_offset; /* saved "current", or offset in tape block */
413         bool            markpos_eof;    /* saved "eof_reached" */
414
415         /*
416          * The sortKeys variable is used by every case other than the hash index
417          * case; it is set by tuplesort_begin_xxx.  tupDesc is only used by the
418          * MinimalTuple and CLUSTER routines, though.
419          */
420         TupleDesc       tupDesc;
421         SortSupport sortKeys;           /* array of length nKeys */
422
423         /*
424          * This variable is shared by the single-key MinimalTuple case and the
425          * Datum case (which both use qsort_ssup()).  Otherwise it's NULL.
426          */
427         SortSupport onlyKey;
428
429         /*
430          * Additional state for managing "abbreviated key" sortsupport routines
431          * (which currently may be used by all cases except the hash index case).
432          * Tracks the intervals at which the optimization's effectiveness is
433          * tested.
434          */
435         int64           abbrevNext;             /* Tuple # at which to next check
436                                                                  * applicability */
437
438         /*
439          * These variables are specific to the CLUSTER case; they are set by
440          * tuplesort_begin_cluster.
441          */
442         IndexInfo  *indexInfo;          /* info about index being used for reference */
443         EState     *estate;                     /* for evaluating index expressions */
444
445         /*
446          * These variables are specific to the IndexTuple case; they are set by
447          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
448          */
449         Relation        heapRel;                /* table the index is being built on */
450         Relation        indexRel;               /* index being built */
451
452         /* These are specific to the index_btree subcase: */
453         bool            enforceUnique;  /* complain if we find duplicate tuples */
454
455         /* These are specific to the index_hash subcase: */
456         uint32          hash_mask;              /* mask for sortable part of hash code */
457
458         /*
459          * These variables are specific to the Datum case; they are set by
460          * tuplesort_begin_datum and used only by the DatumTuple routines.
461          */
462         Oid                     datumType;
463         /* we need typelen in order to know how to copy the Datums. */
464         int                     datumTypeLen;
465
466         /*
467          * Resource snapshot for time of sort start.
468          */
469 #ifdef TRACE_SORT
470         PGRUsage        ru_start;
471 #endif
472 };
473
474 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
475 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
476 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
477 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
478 #define LACKMEM(state)          ((state)->availMem < 0 && !(state)->batchUsed)
479 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
480 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
481
482 /*
483  * NOTES about on-tape representation of tuples:
484  *
485  * We require the first "unsigned int" of a stored tuple to be the total size
486  * on-tape of the tuple, including itself (so it is never zero; an all-zero
487  * unsigned int is used to delimit runs).  The remainder of the stored tuple
488  * may or may not match the in-memory representation of the tuple ---
489  * any conversion needed is the job of the writetup and readtup routines.
490  *
491  * If state->randomAccess is true, then the stored representation of the
492  * tuple must be followed by another "unsigned int" that is a copy of the
493  * length --- so the total tape space used is actually sizeof(unsigned int)
494  * more than the stored length value.  This allows read-backwards.  When
495  * randomAccess is not true, the write/read routines may omit the extra
496  * length word.
497  *
498  * writetup is expected to write both length words as well as the tuple
499  * data.  When readtup is called, the tape is positioned just after the
500  * front length word; readtup must read the tuple data and advance past
501  * the back length word (if present).
502  *
503  * The write/read routines can make use of the tuple description data
504  * stored in the Tuplesortstate record, if needed.  They are also expected
505  * to adjust state->availMem by the amount of memory space (not tape space!)
506  * released or consumed.  There is no error return from either writetup
507  * or readtup; they should ereport() on failure.
508  *
509  *
510  * NOTES about memory consumption calculations:
511  *
512  * We count space allocated for tuples against the workMem limit, plus
513  * the space used by the variable-size memtuples array.  Fixed-size space
514  * is not counted; it's small enough to not be interesting.
515  *
516  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
517  * rather than the originally-requested size.  This is important since
518  * palloc can add substantial overhead.  It's not a complete answer since
519  * we won't count any wasted space in palloc allocation blocks, but it's
520  * a lot better than what we were doing before 7.3.  As of 9.6, a
521  * separate memory context is used for caller passed tuples.  Resetting
522  * it at certain key increments significantly ameliorates fragmentation.
523  * Note that this places a responsibility on readtup and copytup routines
524  * to use the right memory context for these tuples (and to not use the
525  * reset context for anything whose lifetime needs to span multiple
526  * external sort runs).
527  */
528
529 /* When using this macro, beware of double evaluation of len */
530 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
531         do { \
532                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
533                         elog(ERROR, "unexpected end of data"); \
534         } while(0)
535
536
537 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
538 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
539 static bool consider_abort_common(Tuplesortstate *state);
540 static bool useselection(Tuplesortstate *state);
541 static void inittapes(Tuplesortstate *state);
542 static void selectnewtape(Tuplesortstate *state);
543 static void mergeruns(Tuplesortstate *state);
544 static void mergeonerun(Tuplesortstate *state);
545 static void beginmerge(Tuplesortstate *state, bool finalMerge);
546 static void batchmemtuples(Tuplesortstate *state);
547 static void mergebatch(Tuplesortstate *state, int64 spacePerTape);
548 static void mergebatchone(Tuplesortstate *state, int srcTape,
549                                           SortTuple *stup, bool *should_free);
550 static void mergebatchfreetape(Tuplesortstate *state, int srcTape,
551                                                            SortTuple *rtup, bool *should_free);
552 static void *mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen);
553 static void mergepreread(Tuplesortstate *state);
554 static void mergeprereadone(Tuplesortstate *state, int srcTape);
555 static void dumptuples(Tuplesortstate *state, bool alltuples);
556 static void dumpbatch(Tuplesortstate *state, bool alltuples);
557 static void make_bounded_heap(Tuplesortstate *state);
558 static void sort_bounded_heap(Tuplesortstate *state);
559 static void tuplesort_sort_memtuples(Tuplesortstate *state);
560 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
561                                           int tupleindex, bool checkIndex);
562 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
563 static void reversedirection(Tuplesortstate *state);
564 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
565 static void markrunend(Tuplesortstate *state, int tapenum);
566 static void *readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen);
567 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
568                                 Tuplesortstate *state);
569 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
570 static void writetup_heap(Tuplesortstate *state, int tapenum,
571                           SortTuple *stup);
572 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
573                          int tapenum, unsigned int len);
574 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
575                                    Tuplesortstate *state);
576 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
577 static void writetup_cluster(Tuplesortstate *state, int tapenum,
578                                  SortTuple *stup);
579 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
580                                 int tapenum, unsigned int len);
581 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
582                                            Tuplesortstate *state);
583 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
584                                           Tuplesortstate *state);
585 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
586 static void writetup_index(Tuplesortstate *state, int tapenum,
587                            SortTuple *stup);
588 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
589                           int tapenum, unsigned int len);
590 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
591                                  Tuplesortstate *state);
592 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
593 static void writetup_datum(Tuplesortstate *state, int tapenum,
594                            SortTuple *stup);
595 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
596                           int tapenum, unsigned int len);
597 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
598
599 /*
600  * Special versions of qsort just for SortTuple objects.  qsort_tuple() sorts
601  * any variant of SortTuples, using the appropriate comparetup function.
602  * qsort_ssup() is specialized for the case where the comparetup function
603  * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
604  * and Datum sorts.
605  */
606 #include "qsort_tuple.c"
607
608
609 /*
610  *              tuplesort_begin_xxx
611  *
612  * Initialize for a tuple sort operation.
613  *
614  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
615  * zero or more times, then call tuplesort_performsort when all the tuples
616  * have been supplied.  After performsort, retrieve the tuples in sorted
617  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
618  * access was requested, rescan, markpos, and restorepos can also be called.)
619  * Call tuplesort_end to terminate the operation and release memory/disk space.
620  *
621  * Each variant of tuplesort_begin has a workMem parameter specifying the
622  * maximum number of kilobytes of RAM to use before spilling data to disk.
623  * (The normal value of this parameter is work_mem, but some callers use
624  * other values.)  Each variant also has a randomAccess parameter specifying
625  * whether the caller needs non-sequential access to the sort result.
626  */
627
628 static Tuplesortstate *
629 tuplesort_begin_common(int workMem, bool randomAccess)
630 {
631         Tuplesortstate *state;
632         MemoryContext sortcontext;
633         MemoryContext tuplecontext;
634         MemoryContext oldcontext;
635
636         /*
637          * Create a working memory context for this sort operation. All data
638          * needed by the sort will live inside this context.
639          */
640         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
641                                                                                 "TupleSort main",
642                                                                                 ALLOCSET_DEFAULT_MINSIZE,
643                                                                                 ALLOCSET_DEFAULT_INITSIZE,
644                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
645
646         /*
647          * Caller tuple (e.g. IndexTuple) memory context.
648          *
649          * A dedicated child content used exclusively for caller passed tuples
650          * eases memory management.  Resetting at key points reduces fragmentation.
651          * Note that the memtuples array of SortTuples is allocated in the parent
652          * context, not this context, because there is no need to free memtuples
653          * early.
654          */
655         tuplecontext = AllocSetContextCreate(sortcontext,
656                                                                                  "Caller tuples",
657                                                                                  ALLOCSET_DEFAULT_MINSIZE,
658                                                                                  ALLOCSET_DEFAULT_INITSIZE,
659                                                                                  ALLOCSET_DEFAULT_MAXSIZE);
660
661         /*
662          * Make the Tuplesortstate within the per-sort context.  This way, we
663          * don't need a separate pfree() operation for it at shutdown.
664          */
665         oldcontext = MemoryContextSwitchTo(sortcontext);
666
667         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
668
669 #ifdef TRACE_SORT
670         if (trace_sort)
671                 pg_rusage_init(&state->ru_start);
672 #endif
673
674         state->status = TSS_INITIAL;
675         state->randomAccess = randomAccess;
676         state->bounded = false;
677         state->tuples = true;
678         state->boundUsed = false;
679         state->allowedMem = workMem * (int64) 1024;
680         state->availMem = state->allowedMem;
681         state->sortcontext = sortcontext;
682         state->tuplecontext = tuplecontext;
683         state->tapeset = NULL;
684
685         state->memtupcount = 0;
686
687         /*
688          * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
689          * see comments in grow_memtuples().
690          */
691         state->memtupsize = Max(1024,
692                                                 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
693
694         state->growmemtuples = true;
695         state->batchUsed = false;
696         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
697
698         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
699
700         /* workMem must be large enough for the minimal memtuples array */
701         if (LACKMEM(state))
702                 elog(ERROR, "insufficient memory allowed for sort");
703
704         state->currentRun = RUN_FIRST;
705
706         /*
707          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
708          * inittapes(), if needed
709          */
710
711         state->result_tape = -1;        /* flag that result tape has not been formed */
712
713         MemoryContextSwitchTo(oldcontext);
714
715         return state;
716 }
717
718 Tuplesortstate *
719 tuplesort_begin_heap(TupleDesc tupDesc,
720                                          int nkeys, AttrNumber *attNums,
721                                          Oid *sortOperators, Oid *sortCollations,
722                                          bool *nullsFirstFlags,
723                                          int workMem, bool randomAccess)
724 {
725         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
726         MemoryContext oldcontext;
727         int                     i;
728
729         oldcontext = MemoryContextSwitchTo(state->sortcontext);
730
731         AssertArg(nkeys > 0);
732
733 #ifdef TRACE_SORT
734         if (trace_sort)
735                 elog(LOG,
736                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
737                          nkeys, workMem, randomAccess ? 't' : 'f');
738 #endif
739
740         state->nKeys = nkeys;
741
742         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
743                                                                 false,  /* no unique check */
744                                                                 nkeys,
745                                                                 workMem,
746                                                                 randomAccess);
747
748         state->comparetup = comparetup_heap;
749         state->copytup = copytup_heap;
750         state->writetup = writetup_heap;
751         state->readtup = readtup_heap;
752
753         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
754         state->abbrevNext = 10;
755
756         /* Prepare SortSupport data for each column */
757         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
758
759         for (i = 0; i < nkeys; i++)
760         {
761                 SortSupport sortKey = state->sortKeys + i;
762
763                 AssertArg(attNums[i] != 0);
764                 AssertArg(sortOperators[i] != 0);
765
766                 sortKey->ssup_cxt = CurrentMemoryContext;
767                 sortKey->ssup_collation = sortCollations[i];
768                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
769                 sortKey->ssup_attno = attNums[i];
770                 /* Convey if abbreviation optimization is applicable in principle */
771                 sortKey->abbreviate = (i == 0);
772
773                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
774         }
775
776         /*
777          * The "onlyKey" optimization cannot be used with abbreviated keys, since
778          * tie-breaker comparisons may be required.  Typically, the optimization
779          * is only of value to pass-by-value types anyway, whereas abbreviated
780          * keys are typically only of value to pass-by-reference types.
781          */
782         if (nkeys == 1 && !state->sortKeys->abbrev_converter)
783                 state->onlyKey = state->sortKeys;
784
785         MemoryContextSwitchTo(oldcontext);
786
787         return state;
788 }
789
790 Tuplesortstate *
791 tuplesort_begin_cluster(TupleDesc tupDesc,
792                                                 Relation indexRel,
793                                                 int workMem, bool randomAccess)
794 {
795         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
796         ScanKey         indexScanKey;
797         MemoryContext oldcontext;
798         int                     i;
799
800         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
801
802         oldcontext = MemoryContextSwitchTo(state->sortcontext);
803
804 #ifdef TRACE_SORT
805         if (trace_sort)
806                 elog(LOG,
807                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
808                          RelationGetNumberOfAttributes(indexRel),
809                          workMem, randomAccess ? 't' : 'f');
810 #endif
811
812         state->nKeys = RelationGetNumberOfAttributes(indexRel);
813
814         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
815                                                                 false,  /* no unique check */
816                                                                 state->nKeys,
817                                                                 workMem,
818                                                                 randomAccess);
819
820         state->comparetup = comparetup_cluster;
821         state->copytup = copytup_cluster;
822         state->writetup = writetup_cluster;
823         state->readtup = readtup_cluster;
824         state->abbrevNext = 10;
825
826         state->indexInfo = BuildIndexInfo(indexRel);
827
828         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
829
830         indexScanKey = _bt_mkscankey_nodata(indexRel);
831
832         if (state->indexInfo->ii_Expressions != NULL)
833         {
834                 TupleTableSlot *slot;
835                 ExprContext *econtext;
836
837                 /*
838                  * We will need to use FormIndexDatum to evaluate the index
839                  * expressions.  To do that, we need an EState, as well as a
840                  * TupleTableSlot to put the table tuples into.  The econtext's
841                  * scantuple has to point to that slot, too.
842                  */
843                 state->estate = CreateExecutorState();
844                 slot = MakeSingleTupleTableSlot(tupDesc);
845                 econtext = GetPerTupleExprContext(state->estate);
846                 econtext->ecxt_scantuple = slot;
847         }
848
849         /* Prepare SortSupport data for each column */
850         state->sortKeys = (SortSupport) palloc0(state->nKeys *
851                                                                                         sizeof(SortSupportData));
852
853         for (i = 0; i < state->nKeys; i++)
854         {
855                 SortSupport sortKey = state->sortKeys + i;
856                 ScanKey         scanKey = indexScanKey + i;
857                 int16           strategy;
858
859                 sortKey->ssup_cxt = CurrentMemoryContext;
860                 sortKey->ssup_collation = scanKey->sk_collation;
861                 sortKey->ssup_nulls_first =
862                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
863                 sortKey->ssup_attno = scanKey->sk_attno;
864                 /* Convey if abbreviation optimization is applicable in principle */
865                 sortKey->abbreviate = (i == 0);
866
867                 AssertState(sortKey->ssup_attno != 0);
868
869                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
870                         BTGreaterStrategyNumber : BTLessStrategyNumber;
871
872                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
873         }
874
875         _bt_freeskey(indexScanKey);
876
877         MemoryContextSwitchTo(oldcontext);
878
879         return state;
880 }
881
882 Tuplesortstate *
883 tuplesort_begin_index_btree(Relation heapRel,
884                                                         Relation indexRel,
885                                                         bool enforceUnique,
886                                                         int workMem, bool randomAccess)
887 {
888         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
889         ScanKey         indexScanKey;
890         MemoryContext oldcontext;
891         int                     i;
892
893         oldcontext = MemoryContextSwitchTo(state->sortcontext);
894
895 #ifdef TRACE_SORT
896         if (trace_sort)
897                 elog(LOG,
898                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
899                          enforceUnique ? 't' : 'f',
900                          workMem, randomAccess ? 't' : 'f');
901 #endif
902
903         state->nKeys = RelationGetNumberOfAttributes(indexRel);
904
905         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
906                                                                 enforceUnique,
907                                                                 state->nKeys,
908                                                                 workMem,
909                                                                 randomAccess);
910
911         state->comparetup = comparetup_index_btree;
912         state->copytup = copytup_index;
913         state->writetup = writetup_index;
914         state->readtup = readtup_index;
915         state->abbrevNext = 10;
916
917         state->heapRel = heapRel;
918         state->indexRel = indexRel;
919         state->enforceUnique = enforceUnique;
920
921         indexScanKey = _bt_mkscankey_nodata(indexRel);
922         state->nKeys = RelationGetNumberOfAttributes(indexRel);
923
924         /* Prepare SortSupport data for each column */
925         state->sortKeys = (SortSupport) palloc0(state->nKeys *
926                                                                                         sizeof(SortSupportData));
927
928         for (i = 0; i < state->nKeys; i++)
929         {
930                 SortSupport sortKey = state->sortKeys + i;
931                 ScanKey         scanKey = indexScanKey + i;
932                 int16           strategy;
933
934                 sortKey->ssup_cxt = CurrentMemoryContext;
935                 sortKey->ssup_collation = scanKey->sk_collation;
936                 sortKey->ssup_nulls_first =
937                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
938                 sortKey->ssup_attno = scanKey->sk_attno;
939                 /* Convey if abbreviation optimization is applicable in principle */
940                 sortKey->abbreviate = (i == 0);
941
942                 AssertState(sortKey->ssup_attno != 0);
943
944                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
945                         BTGreaterStrategyNumber : BTLessStrategyNumber;
946
947                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
948         }
949
950         _bt_freeskey(indexScanKey);
951
952         MemoryContextSwitchTo(oldcontext);
953
954         return state;
955 }
956
957 Tuplesortstate *
958 tuplesort_begin_index_hash(Relation heapRel,
959                                                    Relation indexRel,
960                                                    uint32 hash_mask,
961                                                    int workMem, bool randomAccess)
962 {
963         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
964         MemoryContext oldcontext;
965
966         oldcontext = MemoryContextSwitchTo(state->sortcontext);
967
968 #ifdef TRACE_SORT
969         if (trace_sort)
970                 elog(LOG,
971                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
972                          hash_mask,
973                          workMem, randomAccess ? 't' : 'f');
974 #endif
975
976         state->nKeys = 1;                       /* Only one sort column, the hash code */
977
978         state->comparetup = comparetup_index_hash;
979         state->copytup = copytup_index;
980         state->writetup = writetup_index;
981         state->readtup = readtup_index;
982
983         state->heapRel = heapRel;
984         state->indexRel = indexRel;
985
986         state->hash_mask = hash_mask;
987
988         MemoryContextSwitchTo(oldcontext);
989
990         return state;
991 }
992
993 Tuplesortstate *
994 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
995                                           bool nullsFirstFlag,
996                                           int workMem, bool randomAccess)
997 {
998         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
999         MemoryContext oldcontext;
1000         int16           typlen;
1001         bool            typbyval;
1002
1003         oldcontext = MemoryContextSwitchTo(state->sortcontext);
1004
1005 #ifdef TRACE_SORT
1006         if (trace_sort)
1007                 elog(LOG,
1008                          "begin datum sort: workMem = %d, randomAccess = %c",
1009                          workMem, randomAccess ? 't' : 'f');
1010 #endif
1011
1012         state->nKeys = 1;                       /* always a one-column sort */
1013
1014         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
1015                                                                 false,  /* no unique check */
1016                                                                 1,
1017                                                                 workMem,
1018                                                                 randomAccess);
1019
1020         state->comparetup = comparetup_datum;
1021         state->copytup = copytup_datum;
1022         state->writetup = writetup_datum;
1023         state->readtup = readtup_datum;
1024         state->abbrevNext = 10;
1025
1026         state->datumType = datumType;
1027
1028         /* lookup necessary attributes of the datum type */
1029         get_typlenbyval(datumType, &typlen, &typbyval);
1030         state->datumTypeLen = typlen;
1031         state->tuples = !typbyval;
1032
1033         /* Prepare SortSupport data */
1034         state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
1035
1036         state->sortKeys->ssup_cxt = CurrentMemoryContext;
1037         state->sortKeys->ssup_collation = sortCollation;
1038         state->sortKeys->ssup_nulls_first = nullsFirstFlag;
1039
1040         /*
1041          * Abbreviation is possible here only for by-reference types.  In theory,
1042          * a pass-by-value datatype could have an abbreviated form that is cheaper
1043          * to compare.  In a tuple sort, we could support that, because we can
1044          * always extract the original datum from the tuple is needed.  Here, we
1045          * can't, because a datum sort only stores a single copy of the datum;
1046          * the "tuple" field of each sortTuple is NULL.
1047          */
1048         state->sortKeys->abbreviate = !typbyval;
1049
1050         PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
1051
1052         /*
1053          * The "onlyKey" optimization cannot be used with abbreviated keys, since
1054          * tie-breaker comparisons may be required.  Typically, the optimization
1055          * is only of value to pass-by-value types anyway, whereas abbreviated
1056          * keys are typically only of value to pass-by-reference types.
1057          */
1058         if (!state->sortKeys->abbrev_converter)
1059                 state->onlyKey = state->sortKeys;
1060
1061         MemoryContextSwitchTo(oldcontext);
1062
1063         return state;
1064 }
1065
1066 /*
1067  * tuplesort_set_bound
1068  *
1069  *      Advise tuplesort that at most the first N result tuples are required.
1070  *
1071  * Must be called before inserting any tuples.  (Actually, we could allow it
1072  * as long as the sort hasn't spilled to disk, but there seems no need for
1073  * delayed calls at the moment.)
1074  *
1075  * This is a hint only. The tuplesort may still return more tuples than
1076  * requested.
1077  */
1078 void
1079 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
1080 {
1081         /* Assert we're called before loading any tuples */
1082         Assert(state->status == TSS_INITIAL);
1083         Assert(state->memtupcount == 0);
1084         Assert(!state->bounded);
1085
1086 #ifdef DEBUG_BOUNDED_SORT
1087         /* Honor GUC setting that disables the feature (for easy testing) */
1088         if (!optimize_bounded_sort)
1089                 return;
1090 #endif
1091
1092         /* We want to be able to compute bound * 2, so limit the setting */
1093         if (bound > (int64) (INT_MAX / 2))
1094                 return;
1095
1096         state->bounded = true;
1097         state->bound = (int) bound;
1098
1099         /*
1100          * Bounded sorts are not an effective target for abbreviated key
1101          * optimization.  Disable by setting state to be consistent with no
1102          * abbreviation support.
1103          */
1104         state->sortKeys->abbrev_converter = NULL;
1105         if (state->sortKeys->abbrev_full_comparator)
1106                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
1107
1108         /* Not strictly necessary, but be tidy */
1109         state->sortKeys->abbrev_abort = NULL;
1110         state->sortKeys->abbrev_full_comparator = NULL;
1111 }
1112
1113 /*
1114  * tuplesort_end
1115  *
1116  *      Release resources and clean up.
1117  *
1118  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1119  * pointing to garbage.  Be careful not to attempt to use or free such
1120  * pointers afterwards!
1121  */
1122 void
1123 tuplesort_end(Tuplesortstate *state)
1124 {
1125         /* context swap probably not needed, but let's be safe */
1126         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1127
1128 #ifdef TRACE_SORT
1129         long            spaceUsed;
1130
1131         if (state->tapeset)
1132                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1133         else
1134                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1135 #endif
1136
1137         /*
1138          * Delete temporary "tape" files, if any.
1139          *
1140          * Note: want to include this in reported total cost of sort, hence need
1141          * for two #ifdef TRACE_SORT sections.
1142          */
1143         if (state->tapeset)
1144                 LogicalTapeSetClose(state->tapeset);
1145
1146 #ifdef TRACE_SORT
1147         if (trace_sort)
1148         {
1149                 if (state->tapeset)
1150                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
1151                                  spaceUsed, pg_rusage_show(&state->ru_start));
1152                 else
1153                         elog(LOG, "internal sort ended, %ld KB used: %s",
1154                                  spaceUsed, pg_rusage_show(&state->ru_start));
1155         }
1156
1157         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1158 #else
1159
1160         /*
1161          * If you disabled TRACE_SORT, you can still probe sort__done, but you
1162          * ain't getting space-used stats.
1163          */
1164         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1165 #endif
1166
1167         /* Free any execution state created for CLUSTER case */
1168         if (state->estate != NULL)
1169         {
1170                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1171
1172                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1173                 FreeExecutorState(state->estate);
1174         }
1175
1176         MemoryContextSwitchTo(oldcontext);
1177
1178         /*
1179          * Free the per-sort memory context, thereby releasing all working memory,
1180          * including the Tuplesortstate struct itself.
1181          */
1182         MemoryContextDelete(state->sortcontext);
1183 }
1184
1185 /*
1186  * Grow the memtuples[] array, if possible within our memory constraint.  We
1187  * must not exceed INT_MAX tuples in memory or the caller-provided memory
1188  * limit.  Return TRUE if we were able to enlarge the array, FALSE if not.
1189  *
1190  * Normally, at each increment we double the size of the array.  When doing
1191  * that would exceed a limit, we attempt one last, smaller increase (and then
1192  * clear the growmemtuples flag so we don't try any more).  That allows us to
1193  * use memory as fully as permitted; sticking to the pure doubling rule could
1194  * result in almost half going unused.  Because availMem moves around with
1195  * tuple addition/removal, we need some rule to prevent making repeated small
1196  * increases in memtupsize, which would just be useless thrashing.  The
1197  * growmemtuples flag accomplishes that and also prevents useless
1198  * recalculations in this function.
1199  */
1200 static bool
1201 grow_memtuples(Tuplesortstate *state)
1202 {
1203         int                     newmemtupsize;
1204         int                     memtupsize = state->memtupsize;
1205         int64           memNowUsed = state->allowedMem - state->availMem;
1206
1207         /* Forget it if we've already maxed out memtuples, per comment above */
1208         if (!state->growmemtuples)
1209                 return false;
1210
1211         /* Select new value of memtupsize */
1212         if (memNowUsed <= state->availMem)
1213         {
1214                 /*
1215                  * We've used no more than half of allowedMem; double our usage,
1216                  * clamping at INT_MAX tuples.
1217                  */
1218                 if (memtupsize < INT_MAX / 2)
1219                         newmemtupsize = memtupsize * 2;
1220                 else
1221                 {
1222                         newmemtupsize = INT_MAX;
1223                         state->growmemtuples = false;
1224                 }
1225         }
1226         else
1227         {
1228                 /*
1229                  * This will be the last increment of memtupsize.  Abandon doubling
1230                  * strategy and instead increase as much as we safely can.
1231                  *
1232                  * To stay within allowedMem, we can't increase memtupsize by more
1233                  * than availMem / sizeof(SortTuple) elements.  In practice, we want
1234                  * to increase it by considerably less, because we need to leave some
1235                  * space for the tuples to which the new array slots will refer.  We
1236                  * assume the new tuples will be about the same size as the tuples
1237                  * we've already seen, and thus we can extrapolate from the space
1238                  * consumption so far to estimate an appropriate new size for the
1239                  * memtuples array.  The optimal value might be higher or lower than
1240                  * this estimate, but it's hard to know that in advance.  We again
1241                  * clamp at INT_MAX tuples.
1242                  *
1243                  * This calculation is safe against enlarging the array so much that
1244                  * LACKMEM becomes true, because the memory currently used includes
1245                  * the present array; thus, there would be enough allowedMem for the
1246                  * new array elements even if no other memory were currently used.
1247                  *
1248                  * We do the arithmetic in float8, because otherwise the product of
1249                  * memtupsize and allowedMem could overflow.  Any inaccuracy in the
1250                  * result should be insignificant; but even if we computed a
1251                  * completely insane result, the checks below will prevent anything
1252                  * really bad from happening.
1253                  */
1254                 double          grow_ratio;
1255
1256                 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1257                 if (memtupsize * grow_ratio < INT_MAX)
1258                         newmemtupsize = (int) (memtupsize * grow_ratio);
1259                 else
1260                         newmemtupsize = INT_MAX;
1261
1262                 /* We won't make any further enlargement attempts */
1263                 state->growmemtuples = false;
1264         }
1265
1266         /* Must enlarge array by at least one element, else report failure */
1267         if (newmemtupsize <= memtupsize)
1268                 goto noalloc;
1269
1270         /*
1271          * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize.  Clamp
1272          * to ensure our request won't be rejected.  Note that we can easily
1273          * exhaust address space before facing this outcome.  (This is presently
1274          * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1275          * don't rely on that at this distance.)
1276          */
1277         if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1278         {
1279                 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1280                 state->growmemtuples = false;   /* can't grow any more */
1281         }
1282
1283         /*
1284          * We need to be sure that we do not cause LACKMEM to become true, else
1285          * the space management algorithm will go nuts.  The code above should
1286          * never generate a dangerous request, but to be safe, check explicitly
1287          * that the array growth fits within availMem.  (We could still cause
1288          * LACKMEM if the memory chunk overhead associated with the memtuples
1289          * array were to increase.  That shouldn't happen because we chose the
1290          * initial array size large enough to ensure that palloc will be treating
1291          * both old and new arrays as separate chunks.  But we'll check LACKMEM
1292          * explicitly below just in case.)
1293          */
1294         if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1295                 goto noalloc;
1296
1297         /* OK, do it */
1298         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1299         state->memtupsize = newmemtupsize;
1300         state->memtuples = (SortTuple *)
1301                 repalloc_huge(state->memtuples,
1302                                           state->memtupsize * sizeof(SortTuple));
1303         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1304         if (LACKMEM(state))
1305                 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1306         return true;
1307
1308 noalloc:
1309         /* If for any reason we didn't realloc, shut off future attempts */
1310         state->growmemtuples = false;
1311         return false;
1312 }
1313
1314 /*
1315  * Accept one tuple while collecting input data for sort.
1316  *
1317  * Note that the input data is always copied; the caller need not save it.
1318  */
1319 void
1320 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1321 {
1322         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1323         SortTuple       stup;
1324
1325         /*
1326          * Copy the given tuple into memory we control, and decrease availMem.
1327          * Then call the common code.
1328          */
1329         COPYTUP(state, &stup, (void *) slot);
1330
1331         puttuple_common(state, &stup);
1332
1333         MemoryContextSwitchTo(oldcontext);
1334 }
1335
1336 /*
1337  * Accept one tuple while collecting input data for sort.
1338  *
1339  * Note that the input data is always copied; the caller need not save it.
1340  */
1341 void
1342 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1343 {
1344         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1345         SortTuple       stup;
1346
1347         /*
1348          * Copy the given tuple into memory we control, and decrease availMem.
1349          * Then call the common code.
1350          */
1351         COPYTUP(state, &stup, (void *) tup);
1352
1353         puttuple_common(state, &stup);
1354
1355         MemoryContextSwitchTo(oldcontext);
1356 }
1357
1358 /*
1359  * Collect one index tuple while collecting input data for sort, building
1360  * it from caller-supplied values.
1361  */
1362 void
1363 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1364                                                           ItemPointer self, Datum *values,
1365                                                           bool *isnull)
1366 {
1367         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1368         SortTuple       stup;
1369         Datum           original;
1370         IndexTuple      tuple;
1371
1372         stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1373         tuple = ((IndexTuple) stup.tuple);
1374         tuple->t_tid = *self;
1375         USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1376         /* set up first-column key value */
1377         original = index_getattr(tuple,
1378                                                          1,
1379                                                          RelationGetDescr(state->indexRel),
1380                                                          &stup.isnull1);
1381
1382         MemoryContextSwitchTo(state->sortcontext);
1383
1384         if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1385         {
1386                 /*
1387                  * Store ordinary Datum representation, or NULL value.  If there is a
1388                  * converter it won't expect NULL values, and cost model is not
1389                  * required to account for NULL, so in that case we avoid calling
1390                  * converter and just set datum1 to zeroed representation (to be
1391                  * consistent, and to support cheap inequality tests for NULL
1392                  * abbreviated keys).
1393                  */
1394                 stup.datum1 = original;
1395         }
1396         else if (!consider_abort_common(state))
1397         {
1398                 /* Store abbreviated key representation */
1399                 stup.datum1 = state->sortKeys->abbrev_converter(original,
1400                                                                                                                 state->sortKeys);
1401         }
1402         else
1403         {
1404                 /* Abort abbreviation */
1405                 int                     i;
1406
1407                 stup.datum1 = original;
1408
1409                 /*
1410                  * Set state to be consistent with never trying abbreviation.
1411                  *
1412                  * Alter datum1 representation in already-copied tuples, so as to
1413                  * ensure a consistent representation (current tuple was just
1414                  * handled).  It does not matter if some dumped tuples are already
1415                  * sorted on tape, since serialized tuples lack abbreviated keys
1416                  * (TSS_BUILDRUNS state prevents control reaching here in any
1417                  * case).
1418                  */
1419                 for (i = 0; i < state->memtupcount; i++)
1420                 {
1421                         SortTuple  *mtup = &state->memtuples[i];
1422
1423                         tuple = mtup->tuple;
1424                         mtup->datum1 = index_getattr(tuple,
1425                                                                                  1,
1426                                                                                  RelationGetDescr(state->indexRel),
1427                                                                                  &stup.isnull1);
1428                 }
1429         }
1430
1431         puttuple_common(state, &stup);
1432
1433         MemoryContextSwitchTo(oldcontext);
1434 }
1435
1436 /*
1437  * Accept one Datum while collecting input data for sort.
1438  *
1439  * If the Datum is pass-by-ref type, the value will be copied.
1440  */
1441 void
1442 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1443 {
1444         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1445         SortTuple       stup;
1446
1447         /*
1448          * Pass-by-value types or null values are just stored directly in
1449          * stup.datum1 (and stup.tuple is not used and set to NULL).
1450          *
1451          * Non-null pass-by-reference values need to be copied into memory we
1452          * control, and possibly abbreviated. The copied value is pointed to by
1453          * stup.tuple and is treated as the canonical copy (e.g. to return via
1454          * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1455          * abbreviated value if abbreviation is happening, otherwise it's
1456          * identical to stup.tuple.
1457          */
1458
1459         if (isNull || !state->tuples)
1460         {
1461                 /*
1462                  * Set datum1 to zeroed representation for NULLs (to be consistent, and
1463                  * to support cheap inequality tests for NULL abbreviated keys).
1464                  */
1465                 stup.datum1 = !isNull ? val : (Datum) 0;
1466                 stup.isnull1 = isNull;
1467                 stup.tuple = NULL;              /* no separate storage */
1468                 MemoryContextSwitchTo(state->sortcontext);
1469         }
1470         else
1471         {
1472                 Datum           original = datumCopy(val, false, state->datumTypeLen);
1473
1474                 stup.isnull1 = false;
1475                 stup.tuple = DatumGetPointer(original);
1476                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1477                 MemoryContextSwitchTo(state->sortcontext);
1478
1479                 if (!state->sortKeys->abbrev_converter)
1480                 {
1481                         stup.datum1 = original;
1482                 }
1483                 else if (!consider_abort_common(state))
1484                 {
1485                         /* Store abbreviated key representation */
1486                         stup.datum1 = state->sortKeys->abbrev_converter(original,
1487                                                                                                                         state->sortKeys);
1488                 }
1489                 else
1490                 {
1491                         /* Abort abbreviation */
1492                         int                     i;
1493
1494                         stup.datum1 = original;
1495
1496                         /*
1497                          * Set state to be consistent with never trying abbreviation.
1498                          *
1499                          * Alter datum1 representation in already-copied tuples, so as to
1500                          * ensure a consistent representation (current tuple was just
1501                          * handled).  It does not matter if some dumped tuples are
1502                          * already sorted on tape, since serialized tuples lack
1503                          * abbreviated keys (TSS_BUILDRUNS state prevents control
1504                          * reaching here in any case).
1505                          */
1506                         for (i = 0; i < state->memtupcount; i++)
1507                         {
1508                                 SortTuple  *mtup = &state->memtuples[i];
1509
1510                                 mtup->datum1 = PointerGetDatum(mtup->tuple);
1511                         }
1512                 }
1513         }
1514
1515         puttuple_common(state, &stup);
1516
1517         MemoryContextSwitchTo(oldcontext);
1518 }
1519
1520 /*
1521  * Shared code for tuple and datum cases.
1522  */
1523 static void
1524 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1525 {
1526         switch (state->status)
1527         {
1528                 case TSS_INITIAL:
1529
1530                         /*
1531                          * Save the tuple into the unsorted array.  First, grow the array
1532                          * as needed.  Note that we try to grow the array when there is
1533                          * still one free slot remaining --- if we fail, there'll still be
1534                          * room to store the incoming tuple, and then we'll switch to
1535                          * tape-based operation.
1536                          */
1537                         if (state->memtupcount >= state->memtupsize - 1)
1538                         {
1539                                 (void) grow_memtuples(state);
1540                                 Assert(state->memtupcount < state->memtupsize);
1541                         }
1542                         state->memtuples[state->memtupcount++] = *tuple;
1543
1544                         /*
1545                          * Check if it's time to switch over to a bounded heapsort. We do
1546                          * so if the input tuple count exceeds twice the desired tuple
1547                          * count (this is a heuristic for where heapsort becomes cheaper
1548                          * than a quicksort), or if we've just filled workMem and have
1549                          * enough tuples to meet the bound.
1550                          *
1551                          * Note that once we enter TSS_BOUNDED state we will always try to
1552                          * complete the sort that way.  In the worst case, if later input
1553                          * tuples are larger than earlier ones, this might cause us to
1554                          * exceed workMem significantly.
1555                          */
1556                         if (state->bounded &&
1557                                 (state->memtupcount > state->bound * 2 ||
1558                                  (state->memtupcount > state->bound && LACKMEM(state))))
1559                         {
1560 #ifdef TRACE_SORT
1561                                 if (trace_sort)
1562                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1563                                                  state->memtupcount,
1564                                                  pg_rusage_show(&state->ru_start));
1565 #endif
1566                                 make_bounded_heap(state);
1567                                 return;
1568                         }
1569
1570                         /*
1571                          * Done if we still fit in available memory and have array slots.
1572                          */
1573                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1574                                 return;
1575
1576                         /*
1577                          * Nope; time to switch to tape-based operation.
1578                          */
1579                         inittapes(state);
1580
1581                         /*
1582                          * Dump tuples until we are back under the limit.
1583                          */
1584                         dumptuples(state, false);
1585                         break;
1586
1587                 case TSS_BOUNDED:
1588
1589                         /*
1590                          * We don't want to grow the array here, so check whether the new
1591                          * tuple can be discarded before putting it in.  This should be a
1592                          * good speed optimization, too, since when there are many more
1593                          * input tuples than the bound, most input tuples can be discarded
1594                          * with just this one comparison.  Note that because we currently
1595                          * have the sort direction reversed, we must check for <= not >=.
1596                          */
1597                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1598                         {
1599                                 /* new tuple <= top of the heap, so we can discard it */
1600                                 free_sort_tuple(state, tuple);
1601                                 CHECK_FOR_INTERRUPTS();
1602                         }
1603                         else
1604                         {
1605                                 /* discard top of heap, sift up, insert new tuple */
1606                                 free_sort_tuple(state, &state->memtuples[0]);
1607                                 tuplesort_heap_siftup(state, false);
1608                                 tuplesort_heap_insert(state, tuple, 0, false);
1609                         }
1610                         break;
1611
1612                 case TSS_BUILDRUNS:
1613
1614                         /*
1615                          * Insert the tuple into the heap, with run number currentRun if
1616                          * it can go into the current run, else HEAP_RUN_NEXT.  The tuple
1617                          * can go into the current run if it is >= the first
1618                          * not-yet-output tuple.  (Actually, it could go into the current
1619                          * run if it is >= the most recently output tuple ... but that
1620                          * would require keeping around the tuple we last output, and it's
1621                          * simplest to let writetup free each tuple as soon as it's
1622                          * written.)
1623                          *
1624                          * Note that this only applies when:
1625                          *
1626                          * - currentRun is RUN_FIRST
1627                          *
1628                          * - Replacement selection is in use (typically it is never used).
1629                          *
1630                          * When these two conditions are not both true, all tuples are
1631                          * appended indifferently, much like the TSS_INITIAL case.
1632                          *
1633                          * There should always be room to store the incoming tuple.
1634                          */
1635                         Assert(!state->replaceActive || state->memtupcount > 0);
1636                         if (state->replaceActive &&
1637                                 COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1638                         {
1639                                 Assert(state->currentRun == RUN_FIRST);
1640
1641                                 /*
1642                                  * Insert tuple into first, fully heapified run.
1643                                  *
1644                                  * Unlike classic replacement selection, which this module was
1645                                  * previously based on, only RUN_FIRST tuples are fully
1646                                  * heapified.  Any second/next run tuples are appended
1647                                  * indifferently.  While HEAP_RUN_NEXT tuples may be sifted
1648                                  * out of the way of first run tuples, COMPARETUP() will never
1649                                  * be called for the run's tuples during sifting (only our
1650                                  * initial COMPARETUP() call is required for the tuple, to
1651                                  * determine that the tuple does not belong in RUN_FIRST).
1652                                  */
1653                                 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1654                         }
1655                         else
1656                         {
1657                                 /*
1658                                  * Tuple was determined to not belong to heapified RUN_FIRST,
1659                                  * or replacement selection not in play.  Append the tuple to
1660                                  * memtuples indifferently.
1661                                  *
1662                                  * dumptuples() does not trust that the next run's tuples are
1663                                  * heapified.  Anything past the first run will always be
1664                                  * quicksorted even when replacement selection is initially
1665                                  * used.  (When it's never used, every tuple still takes this
1666                                  * path.)
1667                                  */
1668                                 tuple->tupindex = HEAP_RUN_NEXT;
1669                                 state->memtuples[state->memtupcount++] = *tuple;
1670                         }
1671
1672                         /*
1673                          * If we are over the memory limit, dump tuples till we're under.
1674                          */
1675                         dumptuples(state, false);
1676                         break;
1677
1678                 default:
1679                         elog(ERROR, "invalid tuplesort state");
1680                         break;
1681         }
1682 }
1683
1684 static bool
1685 consider_abort_common(Tuplesortstate *state)
1686 {
1687         Assert(state->sortKeys[0].abbrev_converter != NULL);
1688         Assert(state->sortKeys[0].abbrev_abort != NULL);
1689         Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1690
1691         /*
1692          * Check effectiveness of abbreviation optimization.  Consider aborting
1693          * when still within memory limit.
1694          */
1695         if (state->status == TSS_INITIAL &&
1696                 state->memtupcount >= state->abbrevNext)
1697         {
1698                 state->abbrevNext *= 2;
1699
1700                 /*
1701                  * Check opclass-supplied abbreviation abort routine.  It may indicate
1702                  * that abbreviation should not proceed.
1703                  */
1704                 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1705                                                                                    state->sortKeys))
1706                         return false;
1707
1708                 /*
1709                  * Finally, restore authoritative comparator, and indicate that
1710                  * abbreviation is not in play by setting abbrev_converter to NULL
1711                  */
1712                 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1713                 state->sortKeys[0].abbrev_converter = NULL;
1714                 /* Not strictly necessary, but be tidy */
1715                 state->sortKeys[0].abbrev_abort = NULL;
1716                 state->sortKeys[0].abbrev_full_comparator = NULL;
1717
1718                 /* Give up - expect original pass-by-value representation */
1719                 return true;
1720         }
1721
1722         return false;
1723 }
1724
1725 /*
1726  * All tuples have been provided; finish the sort.
1727  */
1728 void
1729 tuplesort_performsort(Tuplesortstate *state)
1730 {
1731         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1732
1733 #ifdef TRACE_SORT
1734         if (trace_sort)
1735                 elog(LOG, "performsort starting: %s",
1736                          pg_rusage_show(&state->ru_start));
1737 #endif
1738
1739         switch (state->status)
1740         {
1741                 case TSS_INITIAL:
1742
1743                         /*
1744                          * We were able to accumulate all the tuples within the allowed
1745                          * amount of memory.  Just qsort 'em and we're done.
1746                          */
1747                         tuplesort_sort_memtuples(state);
1748                         state->current = 0;
1749                         state->eof_reached = false;
1750                         state->markpos_offset = 0;
1751                         state->markpos_eof = false;
1752                         state->status = TSS_SORTEDINMEM;
1753                         break;
1754
1755                 case TSS_BOUNDED:
1756
1757                         /*
1758                          * We were able to accumulate all the tuples required for output
1759                          * in memory, using a heap to eliminate excess tuples.  Now we
1760                          * have to transform the heap to a properly-sorted array.
1761                          */
1762                         sort_bounded_heap(state);
1763                         state->current = 0;
1764                         state->eof_reached = false;
1765                         state->markpos_offset = 0;
1766                         state->markpos_eof = false;
1767                         state->status = TSS_SORTEDINMEM;
1768                         break;
1769
1770                 case TSS_BUILDRUNS:
1771
1772                         /*
1773                          * Finish tape-based sort.  First, flush all tuples remaining in
1774                          * memory out to tape; then merge until we have a single remaining
1775                          * run (or, if !randomAccess, one run per tape). Note that
1776                          * mergeruns sets the correct state->status.
1777                          */
1778                         dumptuples(state, true);
1779                         mergeruns(state);
1780                         state->eof_reached = false;
1781                         state->markpos_block = 0L;
1782                         state->markpos_offset = 0;
1783                         state->markpos_eof = false;
1784                         break;
1785
1786                 default:
1787                         elog(ERROR, "invalid tuplesort state");
1788                         break;
1789         }
1790
1791 #ifdef TRACE_SORT
1792         if (trace_sort)
1793         {
1794                 if (state->status == TSS_FINALMERGE)
1795                         elog(LOG, "performsort done (except %d-way final merge): %s",
1796                                  state->activeTapes,
1797                                  pg_rusage_show(&state->ru_start));
1798                 else
1799                         elog(LOG, "performsort done: %s",
1800                                  pg_rusage_show(&state->ru_start));
1801         }
1802 #endif
1803
1804         MemoryContextSwitchTo(oldcontext);
1805 }
1806
1807 /*
1808  * Internal routine to fetch the next tuple in either forward or back
1809  * direction into *stup.  Returns FALSE if no more tuples.
1810  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1811  * Otherwise, caller should not use tuple following next call here.
1812  */
1813 static bool
1814 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1815                                                   SortTuple *stup, bool *should_free)
1816 {
1817         unsigned int tuplen;
1818
1819         switch (state->status)
1820         {
1821                 case TSS_SORTEDINMEM:
1822                         Assert(forward || state->randomAccess);
1823                         Assert(!state->batchUsed);
1824                         *should_free = false;
1825                         if (forward)
1826                         {
1827                                 if (state->current < state->memtupcount)
1828                                 {
1829                                         *stup = state->memtuples[state->current++];
1830                                         return true;
1831                                 }
1832                                 state->eof_reached = true;
1833
1834                                 /*
1835                                  * Complain if caller tries to retrieve more tuples than
1836                                  * originally asked for in a bounded sort.  This is because
1837                                  * returning EOF here might be the wrong thing.
1838                                  */
1839                                 if (state->bounded && state->current >= state->bound)
1840                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1841
1842                                 return false;
1843                         }
1844                         else
1845                         {
1846                                 if (state->current <= 0)
1847                                         return false;
1848
1849                                 /*
1850                                  * if all tuples are fetched already then we return last
1851                                  * tuple, else - tuple before last returned.
1852                                  */
1853                                 if (state->eof_reached)
1854                                         state->eof_reached = false;
1855                                 else
1856                                 {
1857                                         state->current--;       /* last returned tuple */
1858                                         if (state->current <= 0)
1859                                                 return false;
1860                                 }
1861                                 *stup = state->memtuples[state->current - 1];
1862                                 return true;
1863                         }
1864                         break;
1865
1866                 case TSS_SORTEDONTAPE:
1867                         Assert(forward || state->randomAccess);
1868                         Assert(!state->batchUsed);
1869                         *should_free = true;
1870                         if (forward)
1871                         {
1872                                 if (state->eof_reached)
1873                                         return false;
1874                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1875                                 {
1876                                         READTUP(state, stup, state->result_tape, tuplen);
1877                                         return true;
1878                                 }
1879                                 else
1880                                 {
1881                                         state->eof_reached = true;
1882                                         return false;
1883                                 }
1884                         }
1885
1886                         /*
1887                          * Backward.
1888                          *
1889                          * if all tuples are fetched already then we return last tuple,
1890                          * else - tuple before last returned.
1891                          */
1892                         if (state->eof_reached)
1893                         {
1894                                 /*
1895                                  * Seek position is pointing just past the zero tuplen at the
1896                                  * end of file; back up to fetch last tuple's ending length
1897                                  * word.  If seek fails we must have a completely empty file.
1898                                  */
1899                                 if (!LogicalTapeBackspace(state->tapeset,
1900                                                                                   state->result_tape,
1901                                                                                   2 * sizeof(unsigned int)))
1902                                         return false;
1903                                 state->eof_reached = false;
1904                         }
1905                         else
1906                         {
1907                                 /*
1908                                  * Back up and fetch previously-returned tuple's ending length
1909                                  * word.  If seek fails, assume we are at start of file.
1910                                  */
1911                                 if (!LogicalTapeBackspace(state->tapeset,
1912                                                                                   state->result_tape,
1913                                                                                   sizeof(unsigned int)))
1914                                         return false;
1915                                 tuplen = getlen(state, state->result_tape, false);
1916
1917                                 /*
1918                                  * Back up to get ending length word of tuple before it.
1919                                  */
1920                                 if (!LogicalTapeBackspace(state->tapeset,
1921                                                                                   state->result_tape,
1922                                                                                   tuplen + 2 * sizeof(unsigned int)))
1923                                 {
1924                                         /*
1925                                          * If that fails, presumably the prev tuple is the first
1926                                          * in the file.  Back up so that it becomes next to read
1927                                          * in forward direction (not obviously right, but that is
1928                                          * what in-memory case does).
1929                                          */
1930                                         if (!LogicalTapeBackspace(state->tapeset,
1931                                                                                           state->result_tape,
1932                                                                                           tuplen + sizeof(unsigned int)))
1933                                                 elog(ERROR, "bogus tuple length in backward scan");
1934                                         return false;
1935                                 }
1936                         }
1937
1938                         tuplen = getlen(state, state->result_tape, false);
1939
1940                         /*
1941                          * Now we have the length of the prior tuple, back up and read it.
1942                          * Note: READTUP expects we are positioned after the initial
1943                          * length word of the tuple, so back up to that point.
1944                          */
1945                         if (!LogicalTapeBackspace(state->tapeset,
1946                                                                           state->result_tape,
1947                                                                           tuplen))
1948                                 elog(ERROR, "bogus tuple length in backward scan");
1949                         READTUP(state, stup, state->result_tape, tuplen);
1950                         return true;
1951
1952                 case TSS_FINALMERGE:
1953                         Assert(forward);
1954                         Assert(state->batchUsed || !state->tuples);
1955                         /* For now, assume tuple is stored in tape's batch memory */
1956                         *should_free = false;
1957
1958                         /*
1959                          * This code should match the inner loop of mergeonerun().
1960                          */
1961                         if (state->memtupcount > 0)
1962                         {
1963                                 int                     srcTape = state->memtuples[0].tupindex;
1964                                 int                     tupIndex;
1965                                 SortTuple  *newtup;
1966
1967                                 /*
1968                                  * Returned tuple is still counted in our memory space most
1969                                  * of the time.  See mergebatchone() for discussion of why
1970                                  * caller may occasionally be required to free returned
1971                                  * tuple, and how preread memory is managed with regard to
1972                                  * edge cases more generally.
1973                                  */
1974                                 *stup = state->memtuples[0];
1975                                 tuplesort_heap_siftup(state, false);
1976                                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1977                                 {
1978                                         /*
1979                                          * out of preloaded data on this tape, try to read more
1980                                          *
1981                                          * Unlike mergeonerun(), we only preload from the single
1982                                          * tape that's run dry, though not before preparing its
1983                                          * batch memory for a new round of sequential consumption.
1984                                          * See mergepreread() comments.
1985                                          */
1986                                         if (state->batchUsed)
1987                                                 mergebatchone(state, srcTape, stup, should_free);
1988
1989                                         mergeprereadone(state, srcTape);
1990
1991                                         /*
1992                                          * if still no data, we've reached end of run on this tape
1993                                          */
1994                                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1995                                         {
1996                                                 /* Free tape's buffer, avoiding dangling pointer */
1997                                                 if (state->batchUsed)
1998                                                         mergebatchfreetape(state, srcTape, stup, should_free);
1999                                                 return true;
2000                                         }
2001                                 }
2002                                 /* pull next preread tuple from list, insert in heap */
2003                                 newtup = &state->memtuples[tupIndex];
2004                                 state->mergenext[srcTape] = newtup->tupindex;
2005                                 if (state->mergenext[srcTape] == 0)
2006                                         state->mergelast[srcTape] = 0;
2007                                 tuplesort_heap_insert(state, newtup, srcTape, false);
2008                                 /* put the now-unused memtuples entry on the freelist */
2009                                 newtup->tupindex = state->mergefreelist;
2010                                 state->mergefreelist = tupIndex;
2011                                 state->mergeavailslots[srcTape]++;
2012                                 return true;
2013                         }
2014                         return false;
2015
2016                 default:
2017                         elog(ERROR, "invalid tuplesort state");
2018                         return false;           /* keep compiler quiet */
2019         }
2020 }
2021
2022 /*
2023  * Fetch the next tuple in either forward or back direction.
2024  * If successful, put tuple in slot and return TRUE; else, clear the slot
2025  * and return FALSE.
2026  *
2027  * Caller may optionally be passed back abbreviated value (on TRUE return
2028  * value) when abbreviation was used, which can be used to cheaply avoid
2029  * equality checks that might otherwise be required.  Caller can safely make a
2030  * determination of "non-equal tuple" based on simple binary inequality.  A
2031  * NULL value in leading attribute will set abbreviated value to zeroed
2032  * representation, which caller may rely on in abbreviated inequality check.
2033  */
2034 bool
2035 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
2036                                            TupleTableSlot *slot, Datum *abbrev)
2037 {
2038         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2039         SortTuple       stup;
2040         bool            should_free;
2041
2042         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2043                 stup.tuple = NULL;
2044
2045         MemoryContextSwitchTo(oldcontext);
2046
2047         if (stup.tuple)
2048         {
2049                 /* Record abbreviated key for caller */
2050                 if (state->sortKeys->abbrev_converter && abbrev)
2051                         *abbrev = stup.datum1;
2052
2053                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
2054                 return true;
2055         }
2056         else
2057         {
2058                 ExecClearTuple(slot);
2059                 return false;
2060         }
2061 }
2062
2063 /*
2064  * Fetch the next tuple in either forward or back direction.
2065  * Returns NULL if no more tuples.  If *should_free is set, the
2066  * caller must pfree the returned tuple when done with it.
2067  * If it is not set, caller should not use tuple following next
2068  * call here.
2069  */
2070 HeapTuple
2071 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
2072 {
2073         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2074         SortTuple       stup;
2075
2076         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2077                 stup.tuple = NULL;
2078
2079         MemoryContextSwitchTo(oldcontext);
2080
2081         return stup.tuple;
2082 }
2083
2084 /*
2085  * Fetch the next index tuple in either forward or back direction.
2086  * Returns NULL if no more tuples.  If *should_free is set, the
2087  * caller must pfree the returned tuple when done with it.
2088  * If it is not set, caller should not use tuple following next
2089  * call here.
2090  */
2091 IndexTuple
2092 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
2093                                                 bool *should_free)
2094 {
2095         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2096         SortTuple       stup;
2097
2098         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2099                 stup.tuple = NULL;
2100
2101         MemoryContextSwitchTo(oldcontext);
2102
2103         return (IndexTuple) stup.tuple;
2104 }
2105
2106 /*
2107  * Fetch the next Datum in either forward or back direction.
2108  * Returns FALSE if no more datums.
2109  *
2110  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
2111  * and is now owned by the caller.
2112  *
2113  * Caller may optionally be passed back abbreviated value (on TRUE return
2114  * value) when abbreviation was used, which can be used to cheaply avoid
2115  * equality checks that might otherwise be required.  Caller can safely make a
2116  * determination of "non-equal tuple" based on simple binary inequality.  A
2117  * NULL value will have a zeroed abbreviated value representation, which caller
2118  * may rely on in abbreviated inequality check.
2119  */
2120 bool
2121 tuplesort_getdatum(Tuplesortstate *state, bool forward,
2122                                    Datum *val, bool *isNull, Datum *abbrev)
2123 {
2124         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2125         SortTuple       stup;
2126         bool            should_free;
2127
2128         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2129         {
2130                 MemoryContextSwitchTo(oldcontext);
2131                 return false;
2132         }
2133
2134         /* Record abbreviated key for caller */
2135         if (state->sortKeys->abbrev_converter && abbrev)
2136                 *abbrev = stup.datum1;
2137
2138         if (stup.isnull1 || !state->tuples)
2139         {
2140                 *val = stup.datum1;
2141                 *isNull = stup.isnull1;
2142         }
2143         else
2144         {
2145                 /* use stup.tuple because stup.datum1 may be an abbreviation */
2146
2147                 if (should_free)
2148                         *val = PointerGetDatum(stup.tuple);
2149                 else
2150                         *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
2151                 *isNull = false;
2152         }
2153
2154         MemoryContextSwitchTo(oldcontext);
2155
2156         return true;
2157 }
2158
2159 /*
2160  * Advance over N tuples in either forward or back direction,
2161  * without returning any data.  N==0 is a no-op.
2162  * Returns TRUE if successful, FALSE if ran out of tuples.
2163  */
2164 bool
2165 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
2166 {
2167         MemoryContext oldcontext;
2168
2169         /*
2170          * We don't actually support backwards skip yet, because no callers need
2171          * it.  The API is designed to allow for that later, though.
2172          */
2173         Assert(forward);
2174         Assert(ntuples >= 0);
2175
2176         switch (state->status)
2177         {
2178                 case TSS_SORTEDINMEM:
2179                         if (state->memtupcount - state->current >= ntuples)
2180                         {
2181                                 state->current += ntuples;
2182                                 return true;
2183                         }
2184                         state->current = state->memtupcount;
2185                         state->eof_reached = true;
2186
2187                         /*
2188                          * Complain if caller tries to retrieve more tuples than
2189                          * originally asked for in a bounded sort.  This is because
2190                          * returning EOF here might be the wrong thing.
2191                          */
2192                         if (state->bounded && state->current >= state->bound)
2193                                 elog(ERROR, "retrieved too many tuples in a bounded sort");
2194
2195                         return false;
2196
2197                 case TSS_SORTEDONTAPE:
2198                 case TSS_FINALMERGE:
2199
2200                         /*
2201                          * We could probably optimize these cases better, but for now it's
2202                          * not worth the trouble.
2203                          */
2204                         oldcontext = MemoryContextSwitchTo(state->sortcontext);
2205                         while (ntuples-- > 0)
2206                         {
2207                                 SortTuple       stup;
2208                                 bool            should_free;
2209
2210                                 if (!tuplesort_gettuple_common(state, forward,
2211                                                                                            &stup, &should_free))
2212                                 {
2213                                         MemoryContextSwitchTo(oldcontext);
2214                                         return false;
2215                                 }
2216                                 if (should_free && stup.tuple)
2217                                         pfree(stup.tuple);
2218                                 CHECK_FOR_INTERRUPTS();
2219                         }
2220                         MemoryContextSwitchTo(oldcontext);
2221                         return true;
2222
2223                 default:
2224                         elog(ERROR, "invalid tuplesort state");
2225                         return false;           /* keep compiler quiet */
2226         }
2227 }
2228
2229 /*
2230  * tuplesort_merge_order - report merge order we'll use for given memory
2231  * (note: "merge order" just means the number of input tapes in the merge).
2232  *
2233  * This is exported for use by the planner.  allowedMem is in bytes.
2234  */
2235 int
2236 tuplesort_merge_order(int64 allowedMem)
2237 {
2238         int                     mOrder;
2239
2240         /*
2241          * We need one tape for each merge input, plus another one for the output,
2242          * and each of these tapes needs buffer space.  In addition we want
2243          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2244          * count).
2245          *
2246          * Note: you might be thinking we need to account for the memtuples[]
2247          * array in this calculation, but we effectively treat that as part of the
2248          * MERGE_BUFFER_SIZE workspace.
2249          */
2250         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2251                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2252
2253         /* Even in minimum memory, use at least a MINORDER merge */
2254         mOrder = Max(mOrder, MINORDER);
2255
2256         return mOrder;
2257 }
2258
2259 /*
2260  * useselection - determine algorithm to use to sort first run.
2261  *
2262  * It can sometimes be useful to use the replacement selection algorithm if it
2263  * results in one large run, and there is little available workMem.  See
2264  * remarks on RUN_SECOND optimization within dumptuples().
2265  */
2266 static bool
2267 useselection(Tuplesortstate *state)
2268 {
2269         /*
2270          * memtupsize might be noticeably higher than memtupcount here in atypical
2271          * cases.  It seems slightly preferable to not allow recent outliers to
2272          * impact this determination.  Note that caller's trace_sort output reports
2273          * memtupcount instead.
2274          */
2275         if (state->memtupsize <= replacement_sort_tuples)
2276                 return true;
2277
2278         return false;
2279 }
2280
2281 /*
2282  * inittapes - initialize for tape sorting.
2283  *
2284  * This is called only if we have found we don't have room to sort in memory.
2285  */
2286 static void
2287 inittapes(Tuplesortstate *state)
2288 {
2289         int                     maxTapes,
2290                                 j;
2291         int64           tapeSpace;
2292
2293         /* Compute number of tapes to use: merge order plus 1 */
2294         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2295
2296         /*
2297          * We must have at least 2*maxTapes slots in the memtuples[] array, else
2298          * we'd not have room for merge heap plus preread.  It seems unlikely that
2299          * this case would ever occur, but be safe.
2300          */
2301         maxTapes = Min(maxTapes, state->memtupsize / 2);
2302
2303         state->maxTapes = maxTapes;
2304         state->tapeRange = maxTapes - 1;
2305
2306 #ifdef TRACE_SORT
2307         if (trace_sort)
2308                 elog(LOG, "switching to external sort with %d tapes: %s",
2309                          maxTapes, pg_rusage_show(&state->ru_start));
2310 #endif
2311
2312         /*
2313          * Decrease availMem to reflect the space needed for tape buffers; but
2314          * don't decrease it to the point that we have no room for tuples. (That
2315          * case is only likely to occur if sorting pass-by-value Datums; in all
2316          * other scenarios the memtuples[] array is unlikely to occupy more than
2317          * half of allowedMem.  In the pass-by-value case it's not important to
2318          * account for tuple space, so we don't care if LACKMEM becomes
2319          * inaccurate.)
2320          */
2321         tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2322
2323         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2324                 USEMEM(state, tapeSpace);
2325
2326         /*
2327          * Make sure that the temp file(s) underlying the tape set are created in
2328          * suitable temp tablespaces.
2329          */
2330         PrepareTempTablespaces();
2331
2332         /*
2333          * Create the tape set and allocate the per-tape data arrays.
2334          */
2335         state->tapeset = LogicalTapeSetCreate(maxTapes);
2336
2337         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2338         state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
2339         state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
2340         state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
2341         state->mergeavailmem = (int64 *) palloc0(maxTapes * sizeof(int64));
2342         state->mergetuples = (char **) palloc0(maxTapes * sizeof(char *));
2343         state->mergecurrent = (char **) palloc0(maxTapes * sizeof(char *));
2344         state->mergetail = (char **) palloc0(maxTapes * sizeof(char *));
2345         state->mergeoverflow = (char **) palloc0(maxTapes * sizeof(char *));
2346         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2347         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2348         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2349         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2350
2351         /*
2352          * Give replacement selection a try based on user setting.  There will
2353          * be a switch to a simple hybrid sort-merge strategy after the first
2354          * run (iff we could not output one long run).
2355          */
2356         state->replaceActive = useselection(state);
2357
2358         if (state->replaceActive)
2359         {
2360                 /*
2361                  * Convert the unsorted contents of memtuples[] into a heap. Each
2362                  * tuple is marked as belonging to run number zero.
2363                  *
2364                  * NOTE: we pass false for checkIndex since there's no point in
2365                  * comparing indexes in this step, even though we do intend the
2366                  * indexes to be part of the sort key...
2367                  */
2368                 int                     ntuples = state->memtupcount;
2369
2370 #ifdef TRACE_SORT
2371                 if (trace_sort)
2372                         elog(LOG, "replacement selection will sort %d first run tuples",
2373                                  state->memtupcount);
2374 #endif
2375                 state->memtupcount = 0;         /* make the heap empty */
2376
2377                 for (j = 0; j < ntuples; j++)
2378                 {
2379                         /* Must copy source tuple to avoid possible overwrite */
2380                         SortTuple       stup = state->memtuples[j];
2381
2382                         tuplesort_heap_insert(state, &stup, 0, false);
2383                 }
2384                 Assert(state->memtupcount == ntuples);
2385         }
2386
2387         state->currentRun = RUN_FIRST;
2388
2389         /*
2390          * Initialize variables of Algorithm D (step D1).
2391          */
2392         for (j = 0; j < maxTapes; j++)
2393         {
2394                 state->tp_fib[j] = 1;
2395                 state->tp_runs[j] = 0;
2396                 state->tp_dummy[j] = 1;
2397                 state->tp_tapenum[j] = j;
2398         }
2399         state->tp_fib[state->tapeRange] = 0;
2400         state->tp_dummy[state->tapeRange] = 0;
2401
2402         state->Level = 1;
2403         state->destTape = 0;
2404
2405         state->status = TSS_BUILDRUNS;
2406 }
2407
2408 /*
2409  * selectnewtape -- select new tape for new initial run.
2410  *
2411  * This is called after finishing a run when we know another run
2412  * must be started.  This implements steps D3, D4 of Algorithm D.
2413  */
2414 static void
2415 selectnewtape(Tuplesortstate *state)
2416 {
2417         int                     j;
2418         int                     a;
2419
2420         /* Step D3: advance j (destTape) */
2421         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2422         {
2423                 state->destTape++;
2424                 return;
2425         }
2426         if (state->tp_dummy[state->destTape] != 0)
2427         {
2428                 state->destTape = 0;
2429                 return;
2430         }
2431
2432         /* Step D4: increase level */
2433         state->Level++;
2434         a = state->tp_fib[0];
2435         for (j = 0; j < state->tapeRange; j++)
2436         {
2437                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2438                 state->tp_fib[j] = a + state->tp_fib[j + 1];
2439         }
2440         state->destTape = 0;
2441 }
2442
2443 /*
2444  * mergeruns -- merge all the completed initial runs.
2445  *
2446  * This implements steps D5, D6 of Algorithm D.  All input data has
2447  * already been written to initial runs on tape (see dumptuples).
2448  */
2449 static void
2450 mergeruns(Tuplesortstate *state)
2451 {
2452         int                     tapenum,
2453                                 svTape,
2454                                 svRuns,
2455                                 svDummy;
2456
2457         Assert(state->status == TSS_BUILDRUNS);
2458         Assert(state->memtupcount == 0);
2459
2460         if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2461         {
2462                 /*
2463                  * If there are multiple runs to be merged, when we go to read back
2464                  * tuples from disk, abbreviated keys will not have been stored, and
2465                  * we don't care to regenerate them.  Disable abbreviation from this
2466                  * point on.
2467                  */
2468                 state->sortKeys->abbrev_converter = NULL;
2469                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2470
2471                 /* Not strictly necessary, but be tidy */
2472                 state->sortKeys->abbrev_abort = NULL;
2473                 state->sortKeys->abbrev_full_comparator = NULL;
2474         }
2475
2476         /*
2477          * If we produced only one initial run (quite likely if the total data
2478          * volume is between 1X and 2X workMem when replacement selection is used,
2479          * but something we particular count on when input is presorted), we can
2480          * just use that tape as the finished output, rather than doing a useless
2481          * merge.  (This obvious optimization is not in Knuth's algorithm.)
2482          */
2483         if (state->currentRun == RUN_SECOND)
2484         {
2485                 state->result_tape = state->tp_tapenum[state->destTape];
2486                 /* must freeze and rewind the finished output tape */
2487                 LogicalTapeFreeze(state->tapeset, state->result_tape);
2488                 state->status = TSS_SORTEDONTAPE;
2489                 return;
2490         }
2491
2492         /* End of step D2: rewind all output tapes to prepare for merging */
2493         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2494                 LogicalTapeRewind(state->tapeset, tapenum, false);
2495
2496         for (;;)
2497         {
2498                 /*
2499                  * At this point we know that tape[T] is empty.  If there's just one
2500                  * (real or dummy) run left on each input tape, then only one merge
2501                  * pass remains.  If we don't have to produce a materialized sorted
2502                  * tape, we can stop at this point and do the final merge on-the-fly.
2503                  */
2504                 if (!state->randomAccess)
2505                 {
2506                         bool            allOneRun = true;
2507
2508                         Assert(state->tp_runs[state->tapeRange] == 0);
2509                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2510                         {
2511                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2512                                 {
2513                                         allOneRun = false;
2514                                         break;
2515                                 }
2516                         }
2517                         if (allOneRun)
2518                         {
2519                                 /* Tell logtape.c we won't be writing anymore */
2520                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
2521                                 /* Initialize for the final merge pass */
2522                                 beginmerge(state, state->tuples);
2523                                 state->status = TSS_FINALMERGE;
2524                                 return;
2525                         }
2526                 }
2527
2528                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2529                 while (state->tp_runs[state->tapeRange - 1] ||
2530                            state->tp_dummy[state->tapeRange - 1])
2531                 {
2532                         bool            allDummy = true;
2533
2534                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2535                         {
2536                                 if (state->tp_dummy[tapenum] == 0)
2537                                 {
2538                                         allDummy = false;
2539                                         break;
2540                                 }
2541                         }
2542
2543                         if (allDummy)
2544                         {
2545                                 state->tp_dummy[state->tapeRange]++;
2546                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2547                                         state->tp_dummy[tapenum]--;
2548                         }
2549                         else
2550                                 mergeonerun(state);
2551                 }
2552
2553                 /* Step D6: decrease level */
2554                 if (--state->Level == 0)
2555                         break;
2556                 /* rewind output tape T to use as new input */
2557                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
2558                                                   false);
2559                 /* rewind used-up input tape P, and prepare it for write pass */
2560                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
2561                                                   true);
2562                 state->tp_runs[state->tapeRange - 1] = 0;
2563
2564                 /*
2565                  * reassign tape units per step D6; note we no longer care about A[]
2566                  */
2567                 svTape = state->tp_tapenum[state->tapeRange];
2568                 svDummy = state->tp_dummy[state->tapeRange];
2569                 svRuns = state->tp_runs[state->tapeRange];
2570                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2571                 {
2572                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2573                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2574                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2575                 }
2576                 state->tp_tapenum[0] = svTape;
2577                 state->tp_dummy[0] = svDummy;
2578                 state->tp_runs[0] = svRuns;
2579         }
2580
2581         /*
2582          * Done.  Knuth says that the result is on TAPE[1], but since we exited
2583          * the loop without performing the last iteration of step D6, we have not
2584          * rearranged the tape unit assignment, and therefore the result is on
2585          * TAPE[T].  We need to do it this way so that we can freeze the final
2586          * output tape while rewinding it.  The last iteration of step D6 would be
2587          * a waste of cycles anyway...
2588          */
2589         state->result_tape = state->tp_tapenum[state->tapeRange];
2590         LogicalTapeFreeze(state->tapeset, state->result_tape);
2591         state->status = TSS_SORTEDONTAPE;
2592 }
2593
2594 /*
2595  * Merge one run from each input tape, except ones with dummy runs.
2596  *
2597  * This is the inner loop of Algorithm D step D5.  We know that the
2598  * output tape is TAPE[T].
2599  */
2600 static void
2601 mergeonerun(Tuplesortstate *state)
2602 {
2603         int                     destTape = state->tp_tapenum[state->tapeRange];
2604         int                     srcTape;
2605         int                     tupIndex;
2606         SortTuple  *tup;
2607         int64           priorAvail,
2608                                 spaceFreed;
2609
2610         /*
2611          * Start the merge by loading one tuple from each active source tape into
2612          * the heap.  We can also decrease the input run/dummy run counts.
2613          */
2614         beginmerge(state, false);
2615
2616         /*
2617          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2618          * out, and replacing it with next tuple from same tape (if there is
2619          * another one).
2620          */
2621         while (state->memtupcount > 0)
2622         {
2623                 /* write the tuple to destTape */
2624                 priorAvail = state->availMem;
2625                 srcTape = state->memtuples[0].tupindex;
2626                 WRITETUP(state, destTape, &state->memtuples[0]);
2627                 /* writetup adjusted total free space, now fix per-tape space */
2628                 spaceFreed = state->availMem - priorAvail;
2629                 state->mergeavailmem[srcTape] += spaceFreed;
2630                 /* compact the heap */
2631                 tuplesort_heap_siftup(state, false);
2632                 if ((tupIndex = state->mergenext[srcTape]) == 0)
2633                 {
2634                         /* out of preloaded data on this tape, try to read more */
2635                         mergepreread(state);
2636                         /* if still no data, we've reached end of run on this tape */
2637                         if ((tupIndex = state->mergenext[srcTape]) == 0)
2638                                 continue;
2639                 }
2640                 /* pull next preread tuple from list, insert in heap */
2641                 tup = &state->memtuples[tupIndex];
2642                 state->mergenext[srcTape] = tup->tupindex;
2643                 if (state->mergenext[srcTape] == 0)
2644                         state->mergelast[srcTape] = 0;
2645                 tuplesort_heap_insert(state, tup, srcTape, false);
2646                 /* put the now-unused memtuples entry on the freelist */
2647                 tup->tupindex = state->mergefreelist;
2648                 state->mergefreelist = tupIndex;
2649                 state->mergeavailslots[srcTape]++;
2650         }
2651
2652         /*
2653          * Reset tuple memory.  We've freed all of the tuples that we previously
2654          * allocated, but AllocSetFree will have put those chunks of memory on
2655          * particular free lists, bucketed by size class.  Thus, although all of
2656          * that memory is free, it is effectively fragmented.  Resetting the
2657          * context gets us out from under that problem.
2658          */
2659         MemoryContextReset(state->tuplecontext);
2660
2661         /*
2662          * When the heap empties, we're done.  Write an end-of-run marker on the
2663          * output tape, and increment its count of real runs.
2664          */
2665         markrunend(state, destTape);
2666         state->tp_runs[state->tapeRange]++;
2667
2668 #ifdef TRACE_SORT
2669         if (trace_sort)
2670                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2671                          pg_rusage_show(&state->ru_start));
2672 #endif
2673 }
2674
2675 /*
2676  * beginmerge - initialize for a merge pass
2677  *
2678  * We decrease the counts of real and dummy runs for each tape, and mark
2679  * which tapes contain active input runs in mergeactive[].  Then, load
2680  * as many tuples as we can from each active input tape, and finally
2681  * fill the merge heap with the first tuple from each active tape.
2682  *
2683  * finalMergeBatch indicates if this is the beginning of a final on-the-fly
2684  * merge where a batched allocation of tuple memory is required.
2685  */
2686 static void
2687 beginmerge(Tuplesortstate *state, bool finalMergeBatch)
2688 {
2689         int                     activeTapes;
2690         int                     tapenum;
2691         int                     srcTape;
2692         int                     slotsPerTape;
2693         int64           spacePerTape;
2694
2695         /* Heap should be empty here */
2696         Assert(state->memtupcount == 0);
2697
2698         /* Adjust run counts and mark the active tapes */
2699         memset(state->mergeactive, 0,
2700                    state->maxTapes * sizeof(*state->mergeactive));
2701         activeTapes = 0;
2702         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2703         {
2704                 if (state->tp_dummy[tapenum] > 0)
2705                         state->tp_dummy[tapenum]--;
2706                 else
2707                 {
2708                         Assert(state->tp_runs[tapenum] > 0);
2709                         state->tp_runs[tapenum]--;
2710                         srcTape = state->tp_tapenum[tapenum];
2711                         state->mergeactive[srcTape] = true;
2712                         activeTapes++;
2713                 }
2714         }
2715         state->activeTapes = activeTapes;
2716
2717         /* Clear merge-pass state variables */
2718         memset(state->mergenext, 0,
2719                    state->maxTapes * sizeof(*state->mergenext));
2720         memset(state->mergelast, 0,
2721                    state->maxTapes * sizeof(*state->mergelast));
2722         state->mergefreelist = 0;       /* nothing in the freelist */
2723         state->mergefirstfree = activeTapes;            /* 1st slot avail for preread */
2724
2725         if (finalMergeBatch)
2726         {
2727                 /* Free outright buffers for tape never actually allocated */
2728                 FREEMEM(state, (state->maxTapes - activeTapes) * TAPE_BUFFER_OVERHEAD);
2729
2730                 /*
2731                  * Grow memtuples one last time, since the palloc() overhead no longer
2732                  * incurred can make a big difference
2733                  */
2734                 batchmemtuples(state);
2735         }
2736
2737         /*
2738          * Initialize space allocation to let each active input tape have an equal
2739          * share of preread space.
2740          */
2741         Assert(activeTapes > 0);
2742         slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2743         Assert(slotsPerTape > 0);
2744         spacePerTape = MAXALIGN_DOWN(state->availMem / activeTapes);
2745         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2746         {
2747                 if (state->mergeactive[srcTape])
2748                 {
2749                         state->mergeavailslots[srcTape] = slotsPerTape;
2750                         state->mergeavailmem[srcTape] = spacePerTape;
2751                 }
2752         }
2753
2754         /*
2755          * Preallocate tuple batch memory for each tape.  This is the memory used
2756          * for tuples themselves (not SortTuples), so it's never used by
2757          * pass-by-value datum sorts.  Memory allocation is performed here at most
2758          * once per sort, just in advance of the final on-the-fly merge step.
2759          */
2760         if (finalMergeBatch)
2761                 mergebatch(state, spacePerTape);
2762
2763         /*
2764          * Preread as many tuples as possible (and at least one) from each active
2765          * tape
2766          */
2767         mergepreread(state);
2768
2769         /* Load the merge heap with the first tuple from each input tape */
2770         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2771         {
2772                 int                     tupIndex = state->mergenext[srcTape];
2773                 SortTuple  *tup;
2774
2775                 if (tupIndex)
2776                 {
2777                         tup = &state->memtuples[tupIndex];
2778                         state->mergenext[srcTape] = tup->tupindex;
2779                         if (state->mergenext[srcTape] == 0)
2780                                 state->mergelast[srcTape] = 0;
2781                         tuplesort_heap_insert(state, tup, srcTape, false);
2782                         /* put the now-unused memtuples entry on the freelist */
2783                         tup->tupindex = state->mergefreelist;
2784                         state->mergefreelist = tupIndex;
2785                         state->mergeavailslots[srcTape]++;
2786
2787 #ifdef TRACE_SORT
2788                         if (trace_sort && finalMergeBatch)
2789                         {
2790                                 int64           perTapeKB = (spacePerTape + 1023) / 1024;
2791                                 int64           usedSpaceKB;
2792                                 int                     usedSlots;
2793
2794                                 /*
2795                                  * Report how effective batchmemtuples() was in balancing
2796                                  * the number of slots against the need for memory for the
2797                                  * underlying tuples (e.g. IndexTuples).  The big preread of
2798                                  * all tapes when switching to FINALMERGE state should be
2799                                  * fairly representative of memory utilization during the
2800                                  * final merge step, and in any case is the only point at
2801                                  * which all tapes are guaranteed to have depleted either
2802                                  * their batch memory allowance or slot allowance.  Ideally,
2803                                  * both will be completely depleted for every tape by now.
2804                                  */
2805                                 usedSpaceKB = (state->mergecurrent[srcTape] -
2806                                                            state->mergetuples[srcTape] + 1023) / 1024;
2807                                 usedSlots = slotsPerTape - state->mergeavailslots[srcTape];
2808
2809                                 elog(LOG, "tape %d initially used " INT64_FORMAT " KB of "
2810                                          INT64_FORMAT " KB batch (%2.3f) and %d out of %d slots "
2811                                          "(%2.3f)", srcTape,
2812                                          usedSpaceKB, perTapeKB,
2813                                          (double) usedSpaceKB / (double) perTapeKB,
2814                                          usedSlots, slotsPerTape,
2815                                          (double) usedSlots / (double) slotsPerTape);
2816                         }
2817 #endif
2818                 }
2819         }
2820 }
2821
2822 /*
2823  * batchmemtuples - grow memtuples without palloc overhead
2824  *
2825  * When called, availMem should be approximately the amount of memory we'd
2826  * require to allocate memtupsize - memtupcount tuples (not SortTuples/slots)
2827  * that were allocated with palloc() overhead, and in doing so use up all
2828  * allocated slots.  However, though slots and tuple memory is in balance
2829  * following the last grow_memtuples() call, that's predicated on the observed
2830  * average tuple size for the "final" grow_memtuples() call, which includes
2831  * palloc overhead.  During the final merge pass, where we will arrange to
2832  * squeeze out the palloc overhead, we might need more slots in the memtuples
2833  * array.
2834  *
2835  * To make that happen, arrange for the amount of remaining memory to be
2836  * exactly equal to the palloc overhead multiplied by the current size of
2837  * the memtuples array, force the grow_memtuples flag back to true (it's
2838  * probably but not necessarily false on entry to this routine), and then
2839  * call grow_memtuples.  This simulates loading enough tuples to fill the
2840  * whole memtuples array and then having some space left over because of the
2841  * elided palloc overhead.  We expect that grow_memtuples() will conclude that
2842  * it can't double the size of the memtuples array but that it can increase
2843  * it by some percentage; but if it does decide to double it, that just means
2844  * that we've never managed to use many slots in the memtuples array, in which
2845  * case doubling it shouldn't hurt anything anyway.
2846  */
2847 static void
2848 batchmemtuples(Tuplesortstate *state)
2849 {
2850         int64                   refund;
2851         int64                   availMemLessRefund;
2852         int                             memtupsize = state->memtupsize;
2853
2854         /* For simplicity, assume no memtuples are actually currently counted */
2855         Assert(state->memtupcount == 0);
2856
2857         /*
2858          * Refund STANDARDCHUNKHEADERSIZE per tuple.
2859          *
2860          * This sometimes fails to make memory use perfectly balanced, but it
2861          * should never make the situation worse.  Note that Assert-enabled builds
2862          * get a larger refund, due to a varying STANDARDCHUNKHEADERSIZE.
2863          */
2864         refund = memtupsize * STANDARDCHUNKHEADERSIZE;
2865         availMemLessRefund = state->availMem - refund;
2866
2867         /*
2868          * To establish balanced memory use after refunding palloc overhead,
2869          * temporarily have our accounting indicate that we've allocated all
2870          * memory we're allowed to less that refund, and call grow_memtuples()
2871          * to have it increase the number of slots.
2872          */
2873         state->growmemtuples = true;
2874         USEMEM(state, availMemLessRefund);
2875         (void) grow_memtuples(state);
2876         /* Should not matter, but be tidy */
2877         FREEMEM(state, availMemLessRefund);
2878         state->growmemtuples = false;
2879
2880 #ifdef TRACE_SORT
2881         if (trace_sort)
2882         {
2883                 Size    OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024;
2884                 Size    NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024;
2885
2886                 elog(LOG, "grew memtuples %1.2fx from %d (%zu KB) to %d (%zu KB) for final merge",
2887                          (double) NewKb / (double) OldKb,
2888                          memtupsize, OldKb,
2889                          state->memtupsize, NewKb);
2890         }
2891 #endif
2892 }
2893
2894 /*
2895  * mergebatch - initialize tuple memory in batch
2896  *
2897  * This allows sequential access to sorted tuples buffered in memory from
2898  * tapes/runs on disk during a final on-the-fly merge step.  Note that the
2899  * memory is not used for SortTuples, but for the underlying tuples (e.g.
2900  * MinimalTuples).
2901  *
2902  * Note that when batch memory is used, there is a simple division of space
2903  * into large buffers (one per active tape).  The conventional incremental
2904  * memory accounting (calling USEMEM() and FREEMEM()) is abandoned.  Instead,
2905  * when each tape's memory budget is exceeded, a retail palloc() "overflow" is
2906  * performed, which is then immediately detected in a way that is analogous to
2907  * LACKMEM().  This keeps each tape's use of memory fair, which is always a
2908  * goal.
2909  */
2910 static void
2911 mergebatch(Tuplesortstate *state, int64 spacePerTape)
2912 {
2913         int             srcTape;
2914
2915         Assert(state->activeTapes > 0);
2916         Assert(state->tuples);
2917
2918         /*
2919          * For the purposes of tuplesort's memory accounting, the batch allocation
2920          * is special, and regular memory accounting through USEMEM() calls is
2921          * abandoned (see mergeprereadone()).
2922          */
2923         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2924         {
2925                 char       *mergetuples;
2926
2927                 if (!state->mergeactive[srcTape])
2928                         continue;
2929
2930                 /* Allocate buffer for each active tape */
2931                 mergetuples = MemoryContextAllocHuge(state->tuplecontext,
2932                                                                                          spacePerTape);
2933
2934                 /* Initialize state for tape */
2935                 state->mergetuples[srcTape] = mergetuples;
2936                 state->mergecurrent[srcTape] = mergetuples;
2937                 state->mergetail[srcTape] = mergetuples;
2938                 state->mergeoverflow[srcTape] = NULL;
2939         }
2940
2941         state->batchUsed = true;
2942         state->spacePerTape = spacePerTape;
2943 }
2944
2945 /*
2946  * mergebatchone - prepare batch memory for one merge input tape
2947  *
2948  * This is called following the exhaustion of preread tuples for one input
2949  * tape.  All that actually occurs is that the state for the source tape is
2950  * reset to indicate that all memory may be reused.
2951  *
2952  * This routine must deal with fixing up the tuple that is about to be returned
2953  * to the client, due to "overflow" allocations.
2954  */
2955 static void
2956 mergebatchone(Tuplesortstate *state, int srcTape, SortTuple *rtup,
2957                           bool *should_free)
2958 {
2959         Assert(state->batchUsed);
2960
2961         /*
2962          * Tuple about to be returned to caller ("stup") is final preread tuple
2963          * from tape, just removed from the top of the heap.  Special steps around
2964          * memory management must be performed for that tuple, to make sure it
2965          * isn't overwritten early.
2966          */
2967         if (!state->mergeoverflow[srcTape])
2968         {
2969                 Size    tupLen;
2970
2971                 /*
2972                  * Mark tuple buffer range for reuse, but be careful to move final,
2973                  * tail tuple to start of space for next run so that it's available
2974                  * to caller when stup is returned, and remains available at least
2975                  * until the next tuple is requested.
2976                  */
2977                 tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
2978                 state->mergecurrent[srcTape] = state->mergetuples[srcTape];
2979                 memmove(state->mergecurrent[srcTape], state->mergetail[srcTape],
2980                                 tupLen);
2981
2982                 /* Make SortTuple at top of the merge heap point to new tuple */
2983                 rtup->tuple = (void *) state->mergecurrent[srcTape];
2984
2985                 state->mergetail[srcTape] = state->mergecurrent[srcTape];
2986                 state->mergecurrent[srcTape] += tupLen;
2987         }
2988         else
2989         {
2990                 /*
2991                  * Handle an "overflow" retail palloc.
2992                  *
2993                  * This is needed when we run out of tuple memory for the tape.
2994                  */
2995                 state->mergecurrent[srcTape] = state->mergetuples[srcTape];
2996                 state->mergetail[srcTape] = state->mergetuples[srcTape];
2997
2998                 if (rtup->tuple)
2999                 {
3000                         Assert(rtup->tuple == (void *) state->mergeoverflow[srcTape]);
3001                         /* Caller should free palloc'd tuple */
3002                         *should_free = true;
3003                 }
3004                 state->mergeoverflow[srcTape] = NULL;
3005         }
3006 }
3007
3008 /*
3009  * mergebatchfreetape - handle final clean-up for batch memory once tape is
3010  * about to become exhausted
3011  *
3012  * All tuples are returned from tape, but a single final tuple, *rtup, is to be
3013  * passed back to caller.  Free tape's batch allocation buffer while ensuring
3014  * that the final tuple is managed appropriately.
3015  */
3016 static void
3017 mergebatchfreetape(Tuplesortstate *state, int srcTape, SortTuple *rtup,
3018                                    bool *should_free)
3019 {
3020         Assert(state->batchUsed);
3021         Assert(state->status == TSS_FINALMERGE);
3022
3023         /*
3024          * Tuple may or may not already be an overflow allocation from
3025          * mergebatchone()
3026          */
3027         if (!*should_free && rtup->tuple)
3028         {
3029                 /*
3030                  * Final tuple still in tape's batch allocation.
3031                  *
3032                  * Return palloc()'d copy to caller, and have it freed in a similar
3033                  * manner to overflow allocation.  Otherwise, we'd free batch memory
3034                  * and pass back a pointer to garbage.  Note that we deliberately
3035                  * allocate this in the parent tuplesort context, to be on the safe
3036                  * side.
3037                  */
3038                 Size            tuplen;
3039                 void       *oldTuple = rtup->tuple;
3040
3041                 tuplen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
3042                 rtup->tuple = MemoryContextAlloc(state->sortcontext, tuplen);
3043                 memcpy(rtup->tuple, oldTuple, tuplen);
3044                 *should_free = true;
3045         }
3046
3047         /* Free spacePerTape-sized buffer */
3048         pfree(state->mergetuples[srcTape]);
3049 }
3050
3051 /*
3052  * mergebatchalloc - allocate memory for one tuple using a batch memory
3053  * "logical allocation".
3054  *
3055  * This is used for the final on-the-fly merge phase only.  READTUP() routines
3056  * receive memory from here in place of palloc() and USEMEM() calls.
3057  *
3058  * Tuple tapenum is passed, ensuring each tape's tuples are stored in sorted,
3059  * contiguous order (while allowing safe reuse of memory made available to
3060  * each tape).  This maximizes locality of access as tuples are returned by
3061  * final merge.
3062  *
3063  * Caller must not subsequently attempt to free memory returned here.  In
3064  * general, only mergebatch* functions know about how memory returned from
3065  * here should be freed, and this function's caller must ensure that batch
3066  * memory management code will definitely have the opportunity to do the right
3067  * thing during the final on-the-fly merge.
3068  */
3069 static void *
3070 mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen)
3071 {
3072         Size            reserve_tuplen = MAXALIGN(tuplen);
3073         char       *ret;
3074
3075         /* Should overflow at most once before mergebatchone() call: */
3076         Assert(state->mergeoverflow[tapenum] == NULL);
3077         Assert(state->batchUsed);
3078
3079         /* It should be possible to use precisely spacePerTape memory at once */
3080         if (state->mergecurrent[tapenum] + reserve_tuplen <=
3081                 state->mergetuples[tapenum] + state->spacePerTape)
3082         {
3083                 /*
3084                  * Usual case -- caller is returned pointer into its tape's buffer, and
3085                  * an offset from that point is recorded as where tape has consumed up
3086                  * to for current round of preloading.
3087                  */
3088                 ret = state->mergetail[tapenum] = state->mergecurrent[tapenum];
3089                 state->mergecurrent[tapenum] += reserve_tuplen;
3090         }
3091         else
3092         {
3093                 /*
3094                  * Allocate memory, and record as tape's overflow allocation.  This
3095                  * will be detected quickly, in a similar fashion to a LACKMEM()
3096                  * condition, and should not happen again before a new round of
3097                  * preloading for caller's tape.  Note that we deliberately allocate
3098                  * this in the parent tuplesort context, to be on the safe side.
3099                  *
3100                  * Sometimes, this does not happen because merging runs out of slots
3101                  * before running out of memory.
3102                  */
3103                 ret = state->mergeoverflow[tapenum] =
3104                         MemoryContextAlloc(state->sortcontext, tuplen);
3105         }
3106
3107         return ret;
3108 }
3109
3110 /*
3111  * mergepreread - load tuples from merge input tapes
3112  *
3113  * This routine exists to improve sequentiality of reads during a merge pass,
3114  * as explained in the header comments of this file.  Load tuples from each
3115  * active source tape until the tape's run is exhausted or it has used up
3116  * its fair share of available memory.  In any case, we guarantee that there
3117  * is at least one preread tuple available from each unexhausted input tape.
3118  *
3119  * We invoke this routine at the start of a merge pass for initial load,
3120  * and then whenever any tape's preread data runs out.  Note that we load
3121  * as much data as possible from all tapes, not just the one that ran out.
3122  * This is because logtape.c works best with a usage pattern that alternates
3123  * between reading a lot of data and writing a lot of data, so whenever we
3124  * are forced to read, we should fill working memory completely.
3125  *
3126  * In FINALMERGE state, we *don't* use this routine, but instead just preread
3127  * from the single tape that ran dry.  There's no read/write alternation in
3128  * that state and so no point in scanning through all the tapes to fix one.
3129  * (Moreover, there may be quite a lot of inactive tapes in that state, since
3130  * we might have had many fewer runs than tapes.  In a regular tape-to-tape
3131  * merge we can expect most of the tapes to be active.  Plus, only
3132  * FINALMERGE state has to consider memory management for a batch
3133  * allocation.)
3134  */
3135 static void
3136 mergepreread(Tuplesortstate *state)
3137 {
3138         int                     srcTape;
3139
3140         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
3141                 mergeprereadone(state, srcTape);
3142 }
3143
3144 /*
3145  * mergeprereadone - load tuples from one merge input tape
3146  *
3147  * Read tuples from the specified tape until it has used up its free memory
3148  * or array slots; but ensure that we have at least one tuple, if any are
3149  * to be had.
3150  */
3151 static void
3152 mergeprereadone(Tuplesortstate *state, int srcTape)
3153 {
3154         unsigned int tuplen;
3155         SortTuple       stup;
3156         int                     tupIndex;
3157         int64           priorAvail,
3158                                 spaceUsed;
3159
3160         if (!state->mergeactive[srcTape])
3161                 return;                                 /* tape's run is already exhausted */
3162
3163         /*
3164          * Manage per-tape availMem.  Only actually matters when batch memory not
3165          * in use.
3166          */
3167         priorAvail = state->availMem;
3168         state->availMem = state->mergeavailmem[srcTape];
3169
3170         /*
3171          * When batch memory is used if final on-the-fly merge, only mergeoverflow
3172          * test is relevant; otherwise, only LACKMEM() test is relevant.
3173          */
3174         while ((state->mergeavailslots[srcTape] > 0 &&
3175                         state->mergeoverflow[srcTape] == NULL && !LACKMEM(state)) ||
3176                    state->mergenext[srcTape] == 0)
3177         {
3178                 /* read next tuple, if any */
3179                 if ((tuplen = getlen(state, srcTape, true)) == 0)
3180                 {
3181                         state->mergeactive[srcTape] = false;
3182                         break;
3183                 }
3184                 READTUP(state, &stup, srcTape, tuplen);
3185                 /* find a free slot in memtuples[] for it */
3186                 tupIndex = state->mergefreelist;
3187                 if (tupIndex)
3188                         state->mergefreelist = state->memtuples[tupIndex].tupindex;
3189                 else
3190                 {
3191                         tupIndex = state->mergefirstfree++;
3192                         Assert(tupIndex < state->memtupsize);
3193                 }
3194                 state->mergeavailslots[srcTape]--;
3195                 /* store tuple, append to list for its tape */
3196                 stup.tupindex = 0;
3197                 state->memtuples[tupIndex] = stup;
3198                 if (state->mergelast[srcTape])
3199                         state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
3200                 else
3201                         state->mergenext[srcTape] = tupIndex;
3202                 state->mergelast[srcTape] = tupIndex;
3203         }
3204         /* update per-tape and global availmem counts */
3205         spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
3206         state->mergeavailmem[srcTape] = state->availMem;
3207         state->availMem = priorAvail - spaceUsed;
3208 }
3209
3210 /*
3211  * dumptuples - remove tuples from memtuples and write to tape
3212  *
3213  * This is used during initial-run building, but not during merging.
3214  *
3215  * When alltuples = false and replacement selection is still active, dump
3216  * only enough tuples to get under the availMem limit (and leave at least
3217  * one tuple in memtuples, since puttuple will then assume it is a heap that
3218  * has a tuple to compare to).  We always insist there be at least one free
3219  * slot in the memtuples[] array.
3220  *
3221  * When alltuples = true, dump everything currently in memory.  (This
3222  * case is only used at end of input data, although in practice only the
3223  * first run could fail to dump all tuples when we LACKMEM(), and only
3224  * when replacement selection is active.)
3225  *
3226  * If, when replacement selection is active, we see that the tuple run
3227  * number at the top of the heap has changed, start a new run.  This must be
3228  * the first run, because replacement selection is always abandoned for all
3229  * further runs.
3230  */
3231 static void
3232 dumptuples(Tuplesortstate *state, bool alltuples)
3233 {
3234         while (alltuples ||
3235                    (LACKMEM(state) && state->memtupcount > 1) ||
3236                    state->memtupcount >= state->memtupsize)
3237         {
3238                 if (state->replaceActive)
3239                 {
3240                         /*
3241                          * Still holding out for a case favorable to replacement selection.
3242                          * Still incrementally spilling using heap.
3243                          *
3244                          * Dump the heap's frontmost entry, and sift up to remove it from
3245                          * the heap.
3246                          */
3247                         Assert(state->memtupcount > 0);
3248                         WRITETUP(state, state->tp_tapenum[state->destTape],
3249                                          &state->memtuples[0]);
3250                         tuplesort_heap_siftup(state, true);
3251                 }
3252                 else
3253                 {
3254                         /*
3255                          * Once committed to quicksorting runs, never incrementally
3256                          * spill
3257                          */
3258                         dumpbatch(state, alltuples);
3259                         break;
3260                 }
3261
3262                 /*
3263                  * If top run number has changed, we've finished the current run
3264                  * (this can only be the first run), and will no longer spill
3265                  * incrementally.
3266                  */
3267                 if (state->memtupcount == 0 ||
3268                         state->memtuples[0].tupindex == HEAP_RUN_NEXT)
3269                 {
3270                         markrunend(state, state->tp_tapenum[state->destTape]);
3271                         Assert(state->currentRun == RUN_FIRST);
3272                         state->currentRun++;
3273                         state->tp_runs[state->destTape]++;
3274                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3275
3276 #ifdef TRACE_SORT
3277                         if (trace_sort)
3278                                 elog(LOG, "finished incrementally writing %s run %d to tape %d: %s",
3279                                          (state->memtupcount == 0) ? "only" : "first",
3280                                          state->currentRun, state->destTape,
3281                                          pg_rusage_show(&state->ru_start));
3282 #endif
3283                         /*
3284                          * Done if heap is empty, which is possible when there is only one
3285                          * long run.
3286                          */
3287                         Assert(state->currentRun == RUN_SECOND);
3288                         if (state->memtupcount == 0)
3289                         {
3290                                 /*
3291                                  * Replacement selection best case; no final merge required,
3292                                  * because there was only one initial run (second run has no
3293                                  * tuples).  See RUN_SECOND case in mergeruns().
3294                                  */
3295                                 break;
3296                         }
3297
3298                         /*
3299                          * Abandon replacement selection for second run (as well as any
3300                          * subsequent runs).
3301                          */
3302                         state->replaceActive = false;
3303
3304                         /*
3305                          * First tuple of next run should not be heapified, and so will
3306                          * bear placeholder run number.  In practice this must actually be
3307                          * the second run, which just became the currentRun, so we're
3308                          * clear to quicksort and dump the tuples in batch next time
3309                          * memtuples becomes full.
3310                          */
3311                         Assert(state->memtuples[0].tupindex == HEAP_RUN_NEXT);
3312                         selectnewtape(state);
3313                 }
3314         }
3315 }
3316
3317 /*
3318  * dumpbatch - sort and dump all memtuples, forming one run on tape
3319  *
3320  * Second or subsequent runs are never heapified by this module (although
3321  * heapification still respects run number differences between the first and
3322  * second runs), and a heap (replacement selection priority queue) is often
3323  * avoided in the first place.
3324  */
3325 static void
3326 dumpbatch(Tuplesortstate *state, bool alltuples)
3327 {
3328         int                     memtupwrite;
3329         int                     i;
3330
3331         /*
3332          * Final call might require no sorting, in rare cases where we just so
3333          * happen to have previously LACKMEM()'d at the point where exactly all
3334          * remaining tuples are loaded into memory, just before input was
3335          * exhausted.
3336          *
3337          * In general, short final runs are quite possible.  Rather than
3338          * allowing a special case where there was a superfluous
3339          * selectnewtape() call (i.e. a call with no subsequent run actually
3340          * written to destTape), we prefer to write out a 0 tuple run.
3341          *
3342          * mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and
3343          * will reliably mark the tape inactive for the merge when called from
3344          * beginmerge().  This case is therefore similar to the case where
3345          * mergeonerun() finds a dummy run for the tape, and so doesn't need to
3346          * merge a run from the tape (or conceptually "merges" the dummy run,
3347          * if you prefer).  According to Knuth, Algorithm D "isn't strictly
3348          * optimal" in its method of distribution and dummy run assignment;
3349          * this edge case seems very unlikely to make that appreciably worse.
3350          */
3351         Assert(state->status == TSS_BUILDRUNS);
3352
3353         /*
3354          * It seems unlikely that this limit will ever be exceeded, but take no
3355          * chances
3356          */
3357         if (state->currentRun == INT_MAX)
3358                 ereport(ERROR,
3359                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3360                                  errmsg("cannot have more than %d runs for an external sort",
3361                                                 INT_MAX)));
3362
3363         state->currentRun++;
3364
3365 #ifdef TRACE_SORT
3366         if (trace_sort)
3367                 elog(LOG, "starting quicksort of run %d: %s",
3368                          state->currentRun, pg_rusage_show(&state->ru_start));
3369 #endif
3370
3371         /*
3372          * Sort all tuples accumulated within the allowed amount of memory for this
3373          * run using quicksort
3374          */
3375         tuplesort_sort_memtuples(state);
3376
3377 #ifdef TRACE_SORT
3378         if (trace_sort)
3379                 elog(LOG, "finished quicksort of run %d: %s",
3380                          state->currentRun, pg_rusage_show(&state->ru_start));
3381 #endif
3382
3383         memtupwrite = state->memtupcount;
3384         for (i = 0; i < memtupwrite; i++)
3385         {
3386                 WRITETUP(state, state->tp_tapenum[state->destTape],
3387                                  &state->memtuples[i]);
3388                 state->memtupcount--;
3389         }
3390
3391         /*
3392          * Reset tuple memory.  We've freed all of the tuples that we previously
3393          * allocated.  It's important to avoid fragmentation when there is a stark
3394          * change in allocation patterns due to the use of batch memory.
3395          * Fragmentation due to AllocSetFree's bucketing by size class might be
3396          * particularly bad if this step wasn't taken.
3397          */
3398         MemoryContextReset(state->tuplecontext);
3399
3400         markrunend(state, state->tp_tapenum[state->destTape]);
3401         state->tp_runs[state->destTape]++;
3402         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3403
3404 #ifdef TRACE_SORT
3405         if (trace_sort)
3406                 elog(LOG, "finished writing run %d to tape %d: %s",
3407                          state->currentRun, state->destTape,
3408                          pg_rusage_show(&state->ru_start));
3409 #endif
3410
3411         if (!alltuples)
3412                 selectnewtape(state);
3413 }
3414
3415 /*
3416  * tuplesort_rescan             - rewind and replay the scan
3417  */
3418 void
3419 tuplesort_rescan(Tuplesortstate *state)
3420 {
3421         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3422
3423         Assert(state->randomAccess);
3424
3425         switch (state->status)
3426         {
3427                 case TSS_SORTEDINMEM:
3428                         state->current = 0;
3429                         state->eof_reached = false;
3430                         state->markpos_offset = 0;
3431                         state->markpos_eof = false;
3432                         break;
3433                 case TSS_SORTEDONTAPE:
3434                         LogicalTapeRewind(state->tapeset,
3435                                                           state->result_tape,
3436                                                           false);
3437                         state->eof_reached = false;
3438                         state->markpos_block = 0L;
3439                         state->markpos_offset = 0;
3440                         state->markpos_eof = false;
3441                         break;
3442                 default:
3443                         elog(ERROR, "invalid tuplesort state");
3444                         break;
3445         }
3446
3447         MemoryContextSwitchTo(oldcontext);
3448 }
3449
3450 /*
3451  * tuplesort_markpos    - saves current position in the merged sort file
3452  */
3453 void
3454 tuplesort_markpos(Tuplesortstate *state)
3455 {
3456         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3457
3458         Assert(state->randomAccess);
3459
3460         switch (state->status)
3461         {
3462                 case TSS_SORTEDINMEM:
3463                         state->markpos_offset = state->current;
3464                         state->markpos_eof = state->eof_reached;
3465                         break;
3466                 case TSS_SORTEDONTAPE:
3467                         LogicalTapeTell(state->tapeset,
3468                                                         state->result_tape,
3469                                                         &state->markpos_block,
3470                                                         &state->markpos_offset);
3471                         state->markpos_eof = state->eof_reached;
3472                         break;
3473                 default:
3474                         elog(ERROR, "invalid tuplesort state");
3475                         break;
3476         }
3477
3478         MemoryContextSwitchTo(oldcontext);
3479 }
3480
3481 /*
3482  * tuplesort_restorepos - restores current position in merged sort file to
3483  *                                                last saved position
3484  */
3485 void
3486 tuplesort_restorepos(Tuplesortstate *state)
3487 {
3488         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3489
3490         Assert(state->randomAccess);
3491
3492         switch (state->status)
3493         {
3494                 case TSS_SORTEDINMEM:
3495                         state->current = state->markpos_offset;
3496                         state->eof_reached = state->markpos_eof;
3497                         break;
3498                 case TSS_SORTEDONTAPE:
3499                         if (!LogicalTapeSeek(state->tapeset,
3500                                                                  state->result_tape,
3501                                                                  state->markpos_block,
3502                                                                  state->markpos_offset))
3503                                 elog(ERROR, "tuplesort_restorepos failed");
3504                         state->eof_reached = state->markpos_eof;
3505                         break;
3506                 default:
3507                         elog(ERROR, "invalid tuplesort state");
3508                         break;
3509         }
3510
3511         MemoryContextSwitchTo(oldcontext);
3512 }
3513
3514 /*
3515  * tuplesort_get_stats - extract summary statistics
3516  *
3517  * This can be called after tuplesort_performsort() finishes to obtain
3518  * printable summary information about how the sort was performed.
3519  * spaceUsed is measured in kilobytes.
3520  */
3521 void
3522 tuplesort_get_stats(Tuplesortstate *state,
3523                                         const char **sortMethod,
3524                                         const char **spaceType,
3525                                         long *spaceUsed)
3526 {
3527         /*
3528          * Note: it might seem we should provide both memory and disk usage for a
3529          * disk-based sort.  However, the current code doesn't track memory space
3530          * accurately once we have begun to return tuples to the caller (since we
3531          * don't account for pfree's the caller is expected to do), so we cannot
3532          * rely on availMem in a disk sort.  This does not seem worth the overhead
3533          * to fix.  Is it worth creating an API for the memory context code to
3534          * tell us how much is actually used in sortcontext?
3535          */
3536         if (state->tapeset)
3537         {
3538                 *spaceType = "Disk";
3539                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
3540         }
3541         else
3542         {
3543                 *spaceType = "Memory";
3544                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
3545         }
3546
3547         switch (state->status)
3548         {
3549                 case TSS_SORTEDINMEM:
3550                         if (state->boundUsed)
3551                                 *sortMethod = "top-N heapsort";
3552                         else
3553                                 *sortMethod = "quicksort";
3554                         break;
3555                 case TSS_SORTEDONTAPE:
3556                         *sortMethod = "external sort";
3557                         break;
3558                 case TSS_FINALMERGE:
3559                         *sortMethod = "external merge";
3560                         break;
3561                 default:
3562                         *sortMethod = "still in progress";
3563                         break;
3564         }
3565 }
3566
3567
3568 /*
3569  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
3570  *
3571  * Compare two SortTuples.  If checkIndex is true, use the tuple index
3572  * as the front of the sort key; otherwise, no.
3573  *
3574  * Note that for checkIndex callers, the heap invariant is never
3575  * maintained beyond the first run, and so there are no COMPARETUP()
3576  * calls needed to distinguish tuples in HEAP_RUN_NEXT.
3577  */
3578
3579 #define HEAPCOMPARE(tup1,tup2) \
3580         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex || \
3581                                         (tup1)->tupindex == HEAP_RUN_NEXT) ? \
3582          ((tup1)->tupindex) - ((tup2)->tupindex) : \
3583          COMPARETUP(state, tup1, tup2))
3584
3585 /*
3586  * Convert the existing unordered array of SortTuples to a bounded heap,
3587  * discarding all but the smallest "state->bound" tuples.
3588  *
3589  * When working with a bounded heap, we want to keep the largest entry
3590  * at the root (array entry zero), instead of the smallest as in the normal
3591  * sort case.  This allows us to discard the largest entry cheaply.
3592  * Therefore, we temporarily reverse the sort direction.
3593  *
3594  * We assume that all entries in a bounded heap will always have tupindex
3595  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
3596  * the direction of comparison for tupindexes.
3597  */
3598 static void
3599 make_bounded_heap(Tuplesortstate *state)
3600 {
3601         int                     tupcount = state->memtupcount;
3602         int                     i;
3603
3604         Assert(state->status == TSS_INITIAL);
3605         Assert(state->bounded);
3606         Assert(tupcount >= state->bound);
3607
3608         /* Reverse sort direction so largest entry will be at root */
3609         reversedirection(state);
3610
3611         state->memtupcount = 0;         /* make the heap empty */
3612         for (i = 0; i < tupcount; i++)
3613         {
3614                 if (state->memtupcount >= state->bound &&
3615                   COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
3616                 {
3617                         /* New tuple would just get thrown out, so skip it */
3618                         free_sort_tuple(state, &state->memtuples[i]);
3619                         CHECK_FOR_INTERRUPTS();
3620                 }
3621                 else
3622                 {
3623                         /* Insert next tuple into heap */
3624                         /* Must copy source tuple to avoid possible overwrite */
3625                         SortTuple       stup = state->memtuples[i];
3626
3627                         tuplesort_heap_insert(state, &stup, 0, false);
3628
3629                         /* If heap too full, discard largest entry */
3630                         if (state->memtupcount > state->bound)
3631                         {
3632                                 free_sort_tuple(state, &state->memtuples[0]);
3633                                 tuplesort_heap_siftup(state, false);
3634                         }
3635                 }
3636         }
3637
3638         Assert(state->memtupcount == state->bound);
3639         state->status = TSS_BOUNDED;
3640 }
3641
3642 /*
3643  * Convert the bounded heap to a properly-sorted array
3644  */
3645 static void
3646 sort_bounded_heap(Tuplesortstate *state)
3647 {
3648         int                     tupcount = state->memtupcount;
3649
3650         Assert(state->status == TSS_BOUNDED);
3651         Assert(state->bounded);
3652         Assert(tupcount == state->bound);
3653
3654         /*
3655          * We can unheapify in place because each sift-up will remove the largest
3656          * entry, which we can promptly store in the newly freed slot at the end.
3657          * Once we're down to a single-entry heap, we're done.
3658          */
3659         while (state->memtupcount > 1)
3660         {
3661                 SortTuple       stup = state->memtuples[0];
3662
3663                 /* this sifts-up the next-largest entry and decreases memtupcount */
3664                 tuplesort_heap_siftup(state, false);
3665                 state->memtuples[state->memtupcount] = stup;
3666         }
3667         state->memtupcount = tupcount;
3668
3669         /*
3670          * Reverse sort direction back to the original state.  This is not
3671          * actually necessary but seems like a good idea for tidiness.
3672          */
3673         reversedirection(state);
3674
3675         state->status = TSS_SORTEDINMEM;
3676         state->boundUsed = true;
3677 }
3678
3679 /*
3680  * Sort all memtuples using specialized qsort() routines.
3681  *
3682  * Quicksort is used for small in-memory sorts.  Quicksort is also generally
3683  * preferred to replacement selection for generating runs during external sort
3684  * operations, although replacement selection is sometimes used for the first
3685  * run.
3686  */
3687 static void
3688 tuplesort_sort_memtuples(Tuplesortstate *state)
3689 {
3690         if (state->memtupcount > 1)
3691         {
3692                 /* Can we use the single-key sort function? */
3693                 if (state->onlyKey != NULL)
3694                         qsort_ssup(state->memtuples, state->memtupcount,
3695                                            state->onlyKey);
3696                 else
3697                         qsort_tuple(state->memtuples,
3698                                                 state->memtupcount,
3699                                                 state->comparetup,
3700                                                 state);
3701         }
3702 }
3703
3704 /*
3705  * Insert a new tuple into an empty or existing heap, maintaining the
3706  * heap invariant.  Caller is responsible for ensuring there's room.
3707  *
3708  * Note: we assume *tuple is a temporary variable that can be scribbled on.
3709  * For some callers, tuple actually points to a memtuples[] entry above the
3710  * end of the heap.  This is safe as long as it's not immediately adjacent
3711  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
3712  * is, it might get overwritten before being moved into the heap!
3713  */
3714 static void
3715 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
3716                                           int tupleindex, bool checkIndex)
3717 {
3718         SortTuple  *memtuples;
3719         int                     j;
3720
3721         /*
3722          * Save the tupleindex --- see notes above about writing on *tuple. It's a
3723          * historical artifact that tupleindex is passed as a separate argument
3724          * and not in *tuple, but it's notationally convenient so let's leave it
3725          * that way.
3726          */
3727         tuple->tupindex = tupleindex;
3728
3729         memtuples = state->memtuples;
3730         Assert(state->memtupcount < state->memtupsize);
3731         Assert(!checkIndex || tupleindex == RUN_FIRST);
3732
3733         CHECK_FOR_INTERRUPTS();
3734
3735         /*
3736          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
3737          * using 1-based array indexes, not 0-based.
3738          */
3739         j = state->memtupcount++;
3740         while (j > 0)
3741         {
3742                 int                     i = (j - 1) >> 1;
3743
3744                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
3745                         break;
3746                 memtuples[j] = memtuples[i];
3747                 j = i;
3748         }
3749         memtuples[j] = *tuple;
3750 }
3751
3752 /*
3753  * The tuple at state->memtuples[0] has been removed from the heap.
3754  * Decrement memtupcount, and sift up to maintain the heap invariant.
3755  */
3756 static void
3757 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
3758 {
3759         SortTuple  *memtuples = state->memtuples;
3760         SortTuple  *tuple;
3761         int                     i,
3762                                 n;
3763
3764         Assert(!checkIndex || state->currentRun == RUN_FIRST);
3765         if (--state->memtupcount <= 0)
3766                 return;
3767
3768         CHECK_FOR_INTERRUPTS();
3769
3770         n = state->memtupcount;
3771         tuple = &memtuples[n];          /* tuple that must be reinserted */
3772         i = 0;                                          /* i is where the "hole" is */
3773         for (;;)
3774         {
3775                 int                     j = 2 * i + 1;
3776
3777                 if (j >= n)
3778                         break;
3779                 if (j + 1 < n &&
3780                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3781                         j++;
3782                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3783                         break;
3784                 memtuples[i] = memtuples[j];
3785                 i = j;
3786         }
3787         memtuples[i] = *tuple;
3788 }
3789
3790 /*
3791  * Function to reverse the sort direction from its current state
3792  *
3793  * It is not safe to call this when performing hash tuplesorts
3794  */
3795 static void
3796 reversedirection(Tuplesortstate *state)
3797 {
3798         SortSupport sortKey = state->sortKeys;
3799         int                     nkey;
3800
3801         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3802         {
3803                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3804                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3805         }
3806 }
3807
3808
3809 /*
3810  * Tape interface routines
3811  */
3812
3813 static unsigned int
3814 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3815 {
3816         unsigned int len;
3817
3818         if (LogicalTapeRead(state->tapeset, tapenum,
3819                                                 &len, sizeof(len)) != sizeof(len))
3820                 elog(ERROR, "unexpected end of tape");
3821         if (len == 0 && !eofOK)
3822                 elog(ERROR, "unexpected end of data");
3823         return len;
3824 }
3825
3826 static void
3827 markrunend(Tuplesortstate *state, int tapenum)
3828 {
3829         unsigned int len = 0;
3830
3831         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3832 }
3833
3834 /*
3835  * Get memory for tuple from within READTUP() routine.  Allocate
3836  * memory and account for that, or consume from tape's batch
3837  * allocation.
3838  *
3839  * Memory returned here in the final on-the-fly merge case is recycled
3840  * from tape's batch allocation.  Otherwise, callers must pfree() or
3841  * reset tuple child memory context, and account for that with a
3842  * FREEMEM().  Currently, this only ever needs to happen in WRITETUP()
3843  * routines.
3844  */
3845 static void *
3846 readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen)
3847 {
3848         if (state->batchUsed)
3849         {
3850                 /*
3851                  * No USEMEM() call, because during final on-the-fly merge
3852                  * accounting is based on tape-private state. ("Overflow"
3853                  * allocations are detected as an indication that a new round
3854                  * or preloading is required. Preloading marks existing
3855                  * contents of tape's batch buffer for reuse.)
3856                  */
3857                 return mergebatchalloc(state, tapenum, tuplen);
3858         }
3859         else
3860         {
3861                 char       *ret;
3862
3863                 /* Batch allocation yet to be performed */
3864                 ret = MemoryContextAlloc(state->tuplecontext, tuplen);
3865                 USEMEM(state, GetMemoryChunkSpace(ret));
3866                 return ret;
3867         }
3868 }
3869
3870
3871 /*
3872  * Routines specialized for HeapTuple (actually MinimalTuple) case
3873  */
3874
3875 static int
3876 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3877 {
3878         SortSupport sortKey = state->sortKeys;
3879         HeapTupleData ltup;
3880         HeapTupleData rtup;
3881         TupleDesc       tupDesc;
3882         int                     nkey;
3883         int32           compare;
3884         AttrNumber      attno;
3885         Datum           datum1,
3886                                 datum2;
3887         bool            isnull1,
3888                                 isnull2;
3889
3890
3891         /* Compare the leading sort key */
3892         compare = ApplySortComparator(a->datum1, a->isnull1,
3893                                                                   b->datum1, b->isnull1,
3894                                                                   sortKey);
3895         if (compare != 0)
3896                 return compare;
3897
3898         /* Compare additional sort keys */
3899         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3900         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3901         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3902         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3903         tupDesc = state->tupDesc;
3904
3905         if (sortKey->abbrev_converter)
3906         {
3907                 attno = sortKey->ssup_attno;
3908
3909                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3910                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3911
3912                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3913                                                                                                 datum2, isnull2,
3914                                                                                                 sortKey);
3915                 if (compare != 0)
3916                         return compare;
3917         }
3918
3919         sortKey++;
3920         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3921         {
3922                 attno = sortKey->ssup_attno;
3923
3924                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3925                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3926
3927                 compare = ApplySortComparator(datum1, isnull1,
3928                                                                           datum2, isnull2,
3929                                                                           sortKey);
3930                 if (compare != 0)
3931                         return compare;
3932         }
3933
3934         return 0;
3935 }
3936
3937 static void
3938 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3939 {
3940         /*
3941          * We expect the passed "tup" to be a TupleTableSlot, and form a
3942          * MinimalTuple using the exported interface for that.
3943          */
3944         TupleTableSlot *slot = (TupleTableSlot *) tup;
3945         Datum           original;
3946         MinimalTuple tuple;
3947         HeapTupleData htup;
3948         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3949
3950         /* copy the tuple into sort storage */
3951         tuple = ExecCopySlotMinimalTuple(slot);
3952         stup->tuple = (void *) tuple;
3953         USEMEM(state, GetMemoryChunkSpace(tuple));
3954         /* set up first-column key value */
3955         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3956         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3957         original = heap_getattr(&htup,
3958                                                         state->sortKeys[0].ssup_attno,
3959                                                         state->tupDesc,
3960                                                         &stup->isnull1);
3961
3962         MemoryContextSwitchTo(oldcontext);
3963
3964         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3965         {
3966                 /*
3967                  * Store ordinary Datum representation, or NULL value.  If there is a
3968                  * converter it won't expect NULL values, and cost model is not
3969                  * required to account for NULL, so in that case we avoid calling
3970                  * converter and just set datum1 to zeroed representation (to be
3971                  * consistent, and to support cheap inequality tests for NULL
3972                  * abbreviated keys).
3973                  */
3974                 stup->datum1 = original;
3975         }
3976         else if (!consider_abort_common(state))
3977         {
3978                 /* Store abbreviated key representation */
3979                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3980                                                                                                                  state->sortKeys);
3981         }
3982         else
3983         {
3984                 /* Abort abbreviation */
3985                 int                     i;
3986
3987                 stup->datum1 = original;
3988
3989                 /*
3990                  * Set state to be consistent with never trying abbreviation.
3991                  *
3992                  * Alter datum1 representation in already-copied tuples, so as to
3993                  * ensure a consistent representation (current tuple was just
3994                  * handled).  It does not matter if some dumped tuples are already
3995                  * sorted on tape, since serialized tuples lack abbreviated keys
3996                  * (TSS_BUILDRUNS state prevents control reaching here in any
3997                  * case).
3998                  */
3999                 for (i = 0; i < state->memtupcount; i++)
4000                 {
4001                         SortTuple  *mtup = &state->memtuples[i];
4002
4003                         htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
4004                                 MINIMAL_TUPLE_OFFSET;
4005                         htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
4006                                                                                          MINIMAL_TUPLE_OFFSET);
4007
4008                         mtup->datum1 = heap_getattr(&htup,
4009                                                                                 state->sortKeys[0].ssup_attno,
4010                                                                                 state->tupDesc,
4011                                                                                 &mtup->isnull1);
4012                 }
4013         }
4014 }
4015
4016 static void
4017 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
4018 {
4019         MinimalTuple tuple = (MinimalTuple) stup->tuple;
4020
4021         /* the part of the MinimalTuple we'll write: */
4022         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
4023         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
4024
4025         /* total on-disk footprint: */
4026         unsigned int tuplen = tupbodylen + sizeof(int);
4027
4028         LogicalTapeWrite(state->tapeset, tapenum,
4029                                          (void *) &tuplen, sizeof(tuplen));
4030         LogicalTapeWrite(state->tapeset, tapenum,
4031                                          (void *) tupbody, tupbodylen);
4032         if (state->randomAccess)        /* need trailing length word? */
4033                 LogicalTapeWrite(state->tapeset, tapenum,
4034                                                  (void *) &tuplen, sizeof(tuplen));
4035
4036         FREEMEM(state, GetMemoryChunkSpace(tuple));
4037         heap_free_minimal_tuple(tuple);
4038 }
4039
4040 static void
4041 readtup_heap(Tuplesortstate *state, SortTuple *stup,
4042                          int tapenum, unsigned int len)
4043 {
4044         unsigned int tupbodylen = len - sizeof(int);
4045         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
4046         MinimalTuple tuple = (MinimalTuple) readtup_alloc(state, tapenum, tuplen);
4047         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
4048         HeapTupleData htup;
4049
4050         /* read in the tuple proper */
4051         tuple->t_len = tuplen;
4052         LogicalTapeReadExact(state->tapeset, tapenum,
4053                                                  tupbody, tupbodylen);
4054         if (state->randomAccess)        /* need trailing length word? */
4055                 LogicalTapeReadExact(state->tapeset, tapenum,
4056                                                          &tuplen, sizeof(tuplen));
4057         stup->tuple = (void *) tuple;
4058         /* set up first-column key value */
4059         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
4060         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
4061         stup->datum1 = heap_getattr(&htup,
4062                                                                 state->sortKeys[0].ssup_attno,
4063                                                                 state->tupDesc,
4064                                                                 &stup->isnull1);
4065 }
4066
4067 /*
4068  * Routines specialized for the CLUSTER case (HeapTuple data, with
4069  * comparisons per a btree index definition)
4070  */
4071
4072 static int
4073 comparetup_cluster(const SortTuple *a, const SortTuple *b,
4074                                    Tuplesortstate *state)
4075 {
4076         SortSupport sortKey = state->sortKeys;
4077         HeapTuple       ltup;
4078         HeapTuple       rtup;
4079         TupleDesc       tupDesc;
4080         int                     nkey;
4081         int32           compare;
4082         Datum           datum1,
4083                                 datum2;
4084         bool            isnull1,
4085                                 isnull2;
4086         AttrNumber      leading = state->indexInfo->ii_KeyAttrNumbers[0];
4087
4088         /* Be prepared to compare additional sort keys */
4089         ltup = (HeapTuple) a->tuple;
4090         rtup = (HeapTuple) b->tuple;
4091         tupDesc = state->tupDesc;
4092
4093         /* Compare the leading sort key, if it's simple */
4094         if (leading != 0)
4095         {
4096                 compare = ApplySortComparator(a->datum1, a->isnull1,
4097                                                                           b->datum1, b->isnull1,
4098                                                                           sortKey);
4099                 if (compare != 0)
4100                         return compare;
4101
4102                 if (sortKey->abbrev_converter)
4103                 {
4104                         datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
4105                         datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
4106
4107                         compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4108                                                                                                         datum2, isnull2,
4109                                                                                                         sortKey);
4110                 }
4111                 if (compare != 0 || state->nKeys == 1)
4112                         return compare;
4113                 /* Compare additional columns the hard way */
4114                 sortKey++;
4115                 nkey = 1;
4116         }
4117         else
4118         {
4119                 /* Must compare all keys the hard way */
4120                 nkey = 0;
4121         }
4122
4123         if (state->indexInfo->ii_Expressions == NULL)
4124         {
4125                 /* If not expression index, just compare the proper heap attrs */
4126
4127                 for (; nkey < state->nKeys; nkey++, sortKey++)
4128                 {
4129                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
4130
4131                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
4132                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
4133
4134                         compare = ApplySortComparator(datum1, isnull1,
4135                                                                                   datum2, isnull2,
4136                                                                                   sortKey);
4137                         if (compare != 0)
4138                                 return compare;
4139                 }
4140         }
4141         else
4142         {
4143                 /*
4144                  * In the expression index case, compute the whole index tuple and
4145                  * then compare values.  It would perhaps be faster to compute only as
4146                  * many columns as we need to compare, but that would require
4147                  * duplicating all the logic in FormIndexDatum.
4148                  */
4149                 Datum           l_index_values[INDEX_MAX_KEYS];
4150                 bool            l_index_isnull[INDEX_MAX_KEYS];
4151                 Datum           r_index_values[INDEX_MAX_KEYS];
4152                 bool            r_index_isnull[INDEX_MAX_KEYS];
4153                 TupleTableSlot *ecxt_scantuple;
4154
4155                 /* Reset context each time to prevent memory leakage */
4156                 ResetPerTupleExprContext(state->estate);
4157
4158                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
4159
4160                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
4161                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
4162                                            l_index_values, l_index_isnull);
4163
4164                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
4165                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
4166                                            r_index_values, r_index_isnull);
4167
4168                 for (; nkey < state->nKeys; nkey++, sortKey++)
4169                 {
4170                         compare = ApplySortComparator(l_index_values[nkey],
4171                                                                                   l_index_isnull[nkey],
4172                                                                                   r_index_values[nkey],
4173                                                                                   r_index_isnull[nkey],
4174                                                                                   sortKey);
4175                         if (compare != 0)
4176                                 return compare;
4177                 }
4178         }
4179
4180         return 0;
4181 }
4182
4183 static void
4184 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
4185 {
4186         HeapTuple       tuple = (HeapTuple) tup;
4187         Datum           original;
4188         MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
4189
4190         /* copy the tuple into sort storage */
4191         tuple = heap_copytuple(tuple);
4192         stup->tuple = (void *) tuple;
4193         USEMEM(state, GetMemoryChunkSpace(tuple));
4194
4195         MemoryContextSwitchTo(oldcontext);
4196
4197         /*
4198          * set up first-column key value, and potentially abbreviate, if it's a
4199          * simple column
4200          */
4201         if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
4202                 return;
4203
4204         original = heap_getattr(tuple,
4205                                                         state->indexInfo->ii_KeyAttrNumbers[0],
4206                                                         state->tupDesc,
4207                                                         &stup->isnull1);
4208
4209         if (!state->sortKeys->abbrev_converter || stup->isnull1)
4210         {
4211                 /*
4212                  * Store ordinary Datum representation, or NULL value.  If there is a
4213                  * converter it won't expect NULL values, and cost model is not
4214                  * required to account for NULL, so in that case we avoid calling
4215                  * converter and just set datum1 to zeroed representation (to be
4216                  * consistent, and to support cheap inequality tests for NULL
4217                  * abbreviated keys).
4218                  */
4219                 stup->datum1 = original;
4220         }
4221         else if (!consider_abort_common(state))
4222         {
4223                 /* Store abbreviated key representation */
4224                 stup->datum1 = state->sortKeys->abbrev_converter(original,
4225                                                                                                                  state->sortKeys);
4226         }
4227         else
4228         {
4229                 /* Abort abbreviation */
4230                 int                     i;
4231
4232                 stup->datum1 = original;
4233
4234                 /*
4235                  * Set state to be consistent with never trying abbreviation.
4236                  *
4237                  * Alter datum1 representation in already-copied tuples, so as to
4238                  * ensure a consistent representation (current tuple was just
4239                  * handled).  It does not matter if some dumped tuples are already
4240                  * sorted on tape, since serialized tuples lack abbreviated keys
4241                  * (TSS_BUILDRUNS state prevents control reaching here in any
4242                  * case).
4243                  */
4244                 for (i = 0; i < state->memtupcount; i++)
4245                 {
4246                         SortTuple  *mtup = &state->memtuples[i];
4247
4248                         tuple = (HeapTuple) mtup->tuple;
4249                         mtup->datum1 = heap_getattr(tuple,
4250                                                                           state->indexInfo->ii_KeyAttrNumbers[0],
4251                                                                                 state->tupDesc,
4252                                                                                 &stup->isnull1);
4253                 }
4254         }
4255 }
4256
4257 static void
4258 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
4259 {
4260         HeapTuple       tuple = (HeapTuple) stup->tuple;
4261         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
4262
4263         /* We need to store t_self, but not other fields of HeapTupleData */
4264         LogicalTapeWrite(state->tapeset, tapenum,
4265                                          &tuplen, sizeof(tuplen));
4266         LogicalTapeWrite(state->tapeset, tapenum,
4267                                          &tuple->t_self, sizeof(ItemPointerData));
4268         LogicalTapeWrite(state->tapeset, tapenum,
4269                                          tuple->t_data, tuple->t_len);
4270         if (state->randomAccess)        /* need trailing length word? */
4271                 LogicalTapeWrite(state->tapeset, tapenum,
4272                                                  &tuplen, sizeof(tuplen));
4273
4274         FREEMEM(state, GetMemoryChunkSpace(tuple));
4275         heap_freetuple(tuple);
4276 }
4277
4278 static void
4279 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
4280                                 int tapenum, unsigned int tuplen)
4281 {
4282         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
4283         HeapTuple       tuple = (HeapTuple) readtup_alloc(state,
4284                                                                                                   tapenum,
4285                                                                                                   t_len + HEAPTUPLESIZE);
4286
4287         /* Reconstruct the HeapTupleData header */
4288         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
4289         tuple->t_len = t_len;
4290         LogicalTapeReadExact(state->tapeset, tapenum,
4291                                                  &tuple->t_self, sizeof(ItemPointerData));
4292         /* We don't currently bother to reconstruct t_tableOid */
4293         tuple->t_tableOid = InvalidOid;
4294         /* Read in the tuple body */
4295         LogicalTapeReadExact(state->tapeset, tapenum,
4296                                                  tuple->t_data, tuple->t_len);
4297         if (state->randomAccess)        /* need trailing length word? */
4298                 LogicalTapeReadExact(state->tapeset, tapenum,
4299                                                          &tuplen, sizeof(tuplen));
4300         stup->tuple = (void *) tuple;
4301         /* set up first-column key value, if it's a simple column */
4302         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
4303                 stup->datum1 = heap_getattr(tuple,
4304                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
4305                                                                         state->tupDesc,
4306                                                                         &stup->isnull1);
4307 }
4308
4309
4310 /*
4311  * Routines specialized for IndexTuple case
4312  *
4313  * The btree and hash cases require separate comparison functions, but the
4314  * IndexTuple representation is the same so the copy/write/read support
4315  * functions can be shared.
4316  */
4317
4318 static int
4319 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
4320                                            Tuplesortstate *state)
4321 {
4322         /*
4323          * This is similar to comparetup_heap(), but expects index tuples.  There
4324          * is also special handling for enforcing uniqueness, and special
4325          * treatment for equal keys at the end.
4326          */
4327         SortSupport sortKey = state->sortKeys;
4328         IndexTuple      tuple1;
4329         IndexTuple      tuple2;
4330         int                     keysz;
4331         TupleDesc       tupDes;
4332         bool            equal_hasnull = false;
4333         int                     nkey;
4334         int32           compare;
4335         Datum           datum1,
4336                                 datum2;
4337         bool            isnull1,
4338                                 isnull2;
4339
4340
4341         /* Compare the leading sort key */
4342         compare = ApplySortComparator(a->datum1, a->isnull1,
4343                                                                   b->datum1, b->isnull1,
4344                                                                   sortKey);
4345         if (compare != 0)
4346                 return compare;
4347
4348         /* Compare additional sort keys */
4349         tuple1 = (IndexTuple) a->tuple;
4350         tuple2 = (IndexTuple) b->tuple;
4351         keysz = state->nKeys;
4352         tupDes = RelationGetDescr(state->indexRel);
4353
4354         if (sortKey->abbrev_converter)
4355         {
4356                 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
4357                 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
4358
4359                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4360                                                                                                 datum2, isnull2,
4361                                                                                                 sortKey);
4362                 if (compare != 0)
4363                         return compare;
4364         }
4365
4366         /* they are equal, so we only need to examine one null flag */
4367         if (a->isnull1)
4368                 equal_hasnull = true;
4369
4370         sortKey++;
4371         for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4372         {
4373                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
4374                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
4375
4376                 compare = ApplySortComparator(datum1, isnull1,
4377                                                                           datum2, isnull2,
4378                                                                           sortKey);
4379                 if (compare != 0)
4380                         return compare;         /* done when we find unequal attributes */
4381
4382                 /* they are equal, so we only need to examine one null flag */
4383                 if (isnull1)
4384                         equal_hasnull = true;
4385         }
4386
4387         /*
4388          * If btree has asked us to enforce uniqueness, complain if two equal
4389          * tuples are detected (unless there was at least one NULL field).
4390          *
4391          * It is sufficient to make the test here, because if two tuples are equal
4392          * they *must* get compared at some stage of the sort --- otherwise the
4393          * sort algorithm wouldn't have checked whether one must appear before the
4394          * other.
4395          */
4396         if (state->enforceUnique && !equal_hasnull)
4397         {
4398                 Datum           values[INDEX_MAX_KEYS];
4399                 bool            isnull[INDEX_MAX_KEYS];
4400                 char       *key_desc;
4401
4402                 /*
4403                  * Some rather brain-dead implementations of qsort (such as the one in
4404                  * QNX 4) will sometimes call the comparison routine to compare a
4405                  * value to itself, but we always use our own implementation, which
4406                  * does not.
4407                  */
4408                 Assert(tuple1 != tuple2);
4409
4410                 index_deform_tuple(tuple1, tupDes, values, isnull);
4411
4412                 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
4413
4414                 ereport(ERROR,
4415                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
4416                                  errmsg("could not create unique index \"%s\"",
4417                                                 RelationGetRelationName(state->indexRel)),
4418                                  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
4419                                  errdetail("Duplicate keys exist."),
4420                                  errtableconstraint(state->heapRel,
4421                                                                  RelationGetRelationName(state->indexRel))));
4422         }
4423
4424         /*
4425          * If key values are equal, we sort on ItemPointer.  This does not affect
4426          * validity of the finished index, but it may be useful to have index
4427          * scans in physical order.
4428          */
4429         {
4430                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4431                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4432
4433                 if (blk1 != blk2)
4434                         return (blk1 < blk2) ? -1 : 1;
4435         }
4436         {
4437                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4438                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4439
4440                 if (pos1 != pos2)
4441                         return (pos1 < pos2) ? -1 : 1;
4442         }
4443
4444         return 0;
4445 }
4446
4447 static int
4448 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
4449                                           Tuplesortstate *state)
4450 {
4451         uint32          hash1;
4452         uint32          hash2;
4453         IndexTuple      tuple1;
4454         IndexTuple      tuple2;
4455
4456         /*
4457          * Fetch hash keys and mask off bits we don't want to sort by. We know
4458          * that the first column of the index tuple is the hash key.
4459          */
4460         Assert(!a->isnull1);
4461         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
4462         Assert(!b->isnull1);
4463         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
4464
4465         if (hash1 > hash2)
4466                 return 1;
4467         else if (hash1 < hash2)
4468                 return -1;
4469
4470         /*
4471          * If hash values are equal, we sort on ItemPointer.  This does not affect
4472          * validity of the finished index, but it may be useful to have index
4473          * scans in physical order.
4474          */
4475         tuple1 = (IndexTuple) a->tuple;
4476         tuple2 = (IndexTuple) b->tuple;
4477
4478         {
4479                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4480                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4481
4482                 if (blk1 != blk2)
4483                         return (blk1 < blk2) ? -1 : 1;
4484         }
4485         {
4486                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4487                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4488
4489                 if (pos1 != pos2)
4490                         return (pos1 < pos2) ? -1 : 1;
4491         }
4492
4493         return 0;
4494 }
4495
4496 static void
4497 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
4498 {
4499         IndexTuple      tuple = (IndexTuple) tup;
4500         unsigned int tuplen = IndexTupleSize(tuple);
4501         IndexTuple      newtuple;
4502         Datum           original;
4503
4504         /* copy the tuple into sort storage */
4505         newtuple = (IndexTuple) MemoryContextAlloc(state->tuplecontext, tuplen);
4506         memcpy(newtuple, tuple, tuplen);
4507         USEMEM(state, GetMemoryChunkSpace(newtuple));
4508         stup->tuple = (void *) newtuple;
4509         /* set up first-column key value */
4510         original = index_getattr(newtuple,
4511                                                          1,
4512                                                          RelationGetDescr(state->indexRel),
4513                                                          &stup->isnull1);
4514
4515         if (!state->sortKeys->abbrev_converter || stup->isnull1)
4516         {
4517                 /*
4518                  * Store ordinary Datum representation, or NULL value.  If there is a
4519                  * converter it won't expect NULL values, and cost model is not
4520                  * required to account for NULL, so in that case we avoid calling
4521                  * converter and just set datum1 to zeroed representation (to be
4522                  * consistent, and to support cheap inequality tests for NULL
4523                  * abbreviated keys).
4524                  */
4525                 stup->datum1 = original;
4526         }
4527         else if (!consider_abort_common(state))
4528         {
4529                 /* Store abbreviated key representation */
4530                 stup->datum1 = state->sortKeys->abbrev_converter(original,
4531                                                                                                                  state->sortKeys);
4532         }
4533         else
4534         {
4535                 /* Abort abbreviation */
4536                 int                     i;
4537
4538                 stup->datum1 = original;
4539
4540                 /*
4541                  * Set state to be consistent with never trying abbreviation.
4542                  *
4543                  * Alter datum1 representation in already-copied tuples, so as to
4544                  * ensure a consistent representation (current tuple was just
4545                  * handled).  It does not matter if some dumped tuples are already
4546                  * sorted on tape, since serialized tuples lack abbreviated keys
4547                  * (TSS_BUILDRUNS state prevents control reaching here in any
4548                  * case).
4549                  */
4550                 for (i = 0; i < state->memtupcount; i++)
4551                 {
4552                         SortTuple  *mtup = &state->memtuples[i];
4553
4554                         tuple = (IndexTuple) mtup->tuple;
4555                         mtup->datum1 = index_getattr(tuple,
4556                                                                                  1,
4557                                                                                  RelationGetDescr(state->indexRel),
4558                                                                                  &stup->isnull1);
4559                 }
4560         }
4561 }
4562
4563 static void
4564 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
4565 {
4566         IndexTuple      tuple = (IndexTuple) stup->tuple;
4567         unsigned int tuplen;
4568
4569         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
4570         LogicalTapeWrite(state->tapeset, tapenum,
4571                                          (void *) &tuplen, sizeof(tuplen));
4572         LogicalTapeWrite(state->tapeset, tapenum,
4573                                          (void *) tuple, IndexTupleSize(tuple));
4574         if (state->randomAccess)        /* need trailing length word? */
4575                 LogicalTapeWrite(state->tapeset, tapenum,
4576                                                  (void *) &tuplen, sizeof(tuplen));
4577
4578         FREEMEM(state, GetMemoryChunkSpace(tuple));
4579         pfree(tuple);
4580 }
4581
4582 static void
4583 readtup_index(Tuplesortstate *state, SortTuple *stup,
4584                           int tapenum, unsigned int len)
4585 {
4586         unsigned int tuplen = len - sizeof(unsigned int);
4587         IndexTuple      tuple = (IndexTuple) readtup_alloc(state, tapenum, tuplen);
4588
4589         LogicalTapeReadExact(state->tapeset, tapenum,
4590                                                  tuple, tuplen);
4591         if (state->randomAccess)        /* need trailing length word? */
4592                 LogicalTapeReadExact(state->tapeset, tapenum,
4593                                                          &tuplen, sizeof(tuplen));
4594         stup->tuple = (void *) tuple;
4595         /* set up first-column key value */
4596         stup->datum1 = index_getattr(tuple,
4597                                                                  1,
4598                                                                  RelationGetDescr(state->indexRel),
4599                                                                  &stup->isnull1);
4600 }
4601
4602 /*
4603  * Routines specialized for DatumTuple case
4604  */
4605
4606 static int
4607 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
4608 {
4609         int                     compare;
4610
4611         compare = ApplySortComparator(a->datum1, a->isnull1,
4612                                                                   b->datum1, b->isnull1,
4613                                                                   state->sortKeys);
4614         if (compare != 0)
4615                 return compare;
4616
4617         /* if we have abbreviations, then "tuple" has the original value */
4618
4619         if (state->sortKeys->abbrev_converter)
4620                 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
4621                                                                            PointerGetDatum(b->tuple), b->isnull1,
4622                                                                                                 state->sortKeys);
4623
4624         return compare;
4625 }
4626
4627 static void
4628 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
4629 {
4630         /* Not currently needed */
4631         elog(ERROR, "copytup_datum() should not be called");
4632 }
4633
4634 static void
4635 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
4636 {
4637         void       *waddr;
4638         unsigned int tuplen;
4639         unsigned int writtenlen;
4640
4641         if (stup->isnull1)
4642         {
4643                 waddr = NULL;
4644                 tuplen = 0;
4645         }
4646         else if (!state->tuples)
4647         {
4648                 waddr = &stup->datum1;
4649                 tuplen = sizeof(Datum);
4650         }
4651         else
4652         {
4653                 waddr = stup->tuple;
4654                 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
4655                 Assert(tuplen != 0);
4656         }
4657
4658         writtenlen = tuplen + sizeof(unsigned int);
4659
4660         LogicalTapeWrite(state->tapeset, tapenum,
4661                                          (void *) &writtenlen, sizeof(writtenlen));
4662         LogicalTapeWrite(state->tapeset, tapenum,
4663                                          waddr, tuplen);
4664         if (state->randomAccess)        /* need trailing length word? */
4665                 LogicalTapeWrite(state->tapeset, tapenum,
4666                                                  (void *) &writtenlen, sizeof(writtenlen));
4667
4668         if (stup->tuple)
4669         {
4670                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4671                 pfree(stup->tuple);
4672         }
4673 }
4674
4675 static void
4676 readtup_datum(Tuplesortstate *state, SortTuple *stup,
4677                           int tapenum, unsigned int len)
4678 {
4679         unsigned int tuplen = len - sizeof(unsigned int);
4680
4681         if (tuplen == 0)
4682         {
4683                 /* it's NULL */
4684                 stup->datum1 = (Datum) 0;
4685                 stup->isnull1 = true;
4686                 stup->tuple = NULL;
4687         }
4688         else if (!state->tuples)
4689         {
4690                 Assert(tuplen == sizeof(Datum));
4691                 LogicalTapeReadExact(state->tapeset, tapenum,
4692                                                          &stup->datum1, tuplen);
4693                 stup->isnull1 = false;
4694                 stup->tuple = NULL;
4695         }
4696         else
4697         {
4698                 void       *raddr = readtup_alloc(state, tapenum, tuplen);
4699
4700                 LogicalTapeReadExact(state->tapeset, tapenum,
4701                                                          raddr, tuplen);
4702                 stup->datum1 = PointerGetDatum(raddr);
4703                 stup->isnull1 = false;
4704                 stup->tuple = raddr;
4705         }
4706
4707         if (state->randomAccess)        /* need trailing length word? */
4708                 LogicalTapeReadExact(state->tapeset, tapenum,
4709                                                          &tuplen, sizeof(tuplen));
4710 }
4711
4712 /*
4713  * Convenience routine to free a tuple previously loaded into sort memory
4714  */
4715 static void
4716 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
4717 {
4718         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4719         pfree(stup->tuple);
4720 }