]> granicus.if.org Git - postgresql/blob - src/backend/utils/sort/tuplesort.c
Improve comments about abbreviation abort.
[postgresql] / src / backend / utils / sort / tuplesort.c
1 /*-------------------------------------------------------------------------
2  *
3  * tuplesort.c
4  *        Generalized tuple sorting routines.
5  *
6  * This module handles sorting of heap tuples, index tuples, or single
7  * Datums (and could easily support other kinds of sortable objects,
8  * if necessary).  It works efficiently for both small and large amounts
9  * of data.  Small amounts are sorted in-memory using qsort().  Large
10  * amounts are sorted using temporary files and a standard external sort
11  * algorithm.
12  *
13  * See Knuth, volume 3, for more than you want to know about the external
14  * sorting algorithm.  We divide the input into sorted runs using replacement
15  * selection, in the form of a priority tree implemented as a heap
16  * (essentially his Algorithm 5.2.3H), then merge the runs using polyphase
17  * merge, Knuth's Algorithm 5.4.2D.  The logical "tapes" used by Algorithm D
18  * are implemented by logtape.c, which avoids space wastage by recycling
19  * disk space as soon as each block is read from its "tape".
20  *
21  * We do not form the initial runs using Knuth's recommended replacement
22  * selection data structure (Algorithm 5.4.1R), because it uses a fixed
23  * number of records in memory at all times.  Since we are dealing with
24  * tuples that may vary considerably in size, we want to be able to vary
25  * the number of records kept in memory to ensure full utilization of the
26  * allowed sort memory space.  So, we keep the tuples in a variable-size
27  * heap, with the next record to go out at the top of the heap.  Like
28  * Algorithm 5.4.1R, each record is stored with the run number that it
29  * must go into, and we use (run number, key) as the ordering key for the
30  * heap.  When the run number at the top of the heap changes, we know that
31  * no more records of the prior run are left in the heap.
32  *
33  * The approximate amount of memory allowed for any one sort operation
34  * is specified in kilobytes by the caller (most pass work_mem).  Initially,
35  * we absorb tuples and simply store them in an unsorted array as long as
36  * we haven't exceeded workMem.  If we reach the end of the input without
37  * exceeding workMem, we sort the array using qsort() and subsequently return
38  * tuples just by scanning the tuple array sequentially.  If we do exceed
39  * workMem, we construct a heap using Algorithm H and begin to emit tuples
40  * into sorted runs in temporary tapes, emitting just enough tuples at each
41  * step to get back within the workMem limit.  Whenever the run number at
42  * the top of the heap changes, we begin a new run with a new output tape
43  * (selected per Algorithm D).  After the end of the input is reached,
44  * we dump out remaining tuples in memory into a final run (or two),
45  * then merge the runs using Algorithm D.
46  *
47  * When merging runs, we use a heap containing just the frontmost tuple from
48  * each source run; we repeatedly output the smallest tuple and insert the
49  * next tuple from its source tape (if any).  When the heap empties, the merge
50  * is complete.  The basic merge algorithm thus needs very little memory ---
51  * only M tuples for an M-way merge, and M is constrained to a small number.
52  * However, we can still make good use of our full workMem allocation by
53  * pre-reading additional tuples from each source tape.  Without prereading,
54  * our access pattern to the temporary file would be very erratic; on average
55  * we'd read one block from each of M source tapes during the same time that
56  * we're writing M blocks to the output tape, so there is no sequentiality of
57  * access at all, defeating the read-ahead methods used by most Unix kernels.
58  * Worse, the output tape gets written into a very random sequence of blocks
59  * of the temp file, ensuring that things will be even worse when it comes
60  * time to read that tape.  A straightforward merge pass thus ends up doing a
61  * lot of waiting for disk seeks.  We can improve matters by prereading from
62  * each source tape sequentially, loading about workMem/M bytes from each tape
63  * in turn.  Then we run the merge algorithm, writing but not reading until
64  * one of the preloaded tuple series runs out.  Then we switch back to preread
65  * mode, fill memory again, and repeat.  This approach helps to localize both
66  * read and write accesses.
67  *
68  * When the caller requests random access to the sort result, we form
69  * the final sorted run on a logical tape which is then "frozen", so
70  * that we can access it randomly.  When the caller does not need random
71  * access, we return from tuplesort_performsort() as soon as we are down
72  * to one run per logical tape.  The final merge is then performed
73  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
74  * saves one cycle of writing all the data out to disk and reading it in.
75  *
76  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
77  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
78  * to Knuth's figure 70 (section 5.4.2).  However, Knuth is assuming that
79  * tape drives are expensive beasts, and in particular that there will always
80  * be many more runs than tape drives.  In our implementation a "tape drive"
81  * doesn't cost much more than a few Kb of memory buffers, so we can afford
82  * to have lots of them.  In particular, if we can have as many tape drives
83  * as sorted runs, we can eliminate any repeated I/O at all.  In the current
84  * code we determine the number of tapes M on the basis of workMem: we want
85  * workMem/M to be large enough that we read a fair amount of data each time
86  * we preread from a tape, so as to maintain the locality of access described
87  * above.  Nonetheless, with large workMem we can have many tapes.
88  *
89  *
90  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
91  * Portions Copyright (c) 1994, Regents of the University of California
92  *
93  * IDENTIFICATION
94  *        src/backend/utils/sort/tuplesort.c
95  *
96  *-------------------------------------------------------------------------
97  */
98
99 #include "postgres.h"
100
101 #include <limits.h>
102
103 #include "access/htup_details.h"
104 #include "access/nbtree.h"
105 #include "catalog/index.h"
106 #include "commands/tablespace.h"
107 #include "executor/executor.h"
108 #include "miscadmin.h"
109 #include "pg_trace.h"
110 #include "utils/datum.h"
111 #include "utils/logtape.h"
112 #include "utils/lsyscache.h"
113 #include "utils/memutils.h"
114 #include "utils/pg_rusage.h"
115 #include "utils/rel.h"
116 #include "utils/sortsupport.h"
117 #include "utils/tuplesort.h"
118
119
120 /* sort-type codes for sort__start probes */
121 #define HEAP_SORT               0
122 #define INDEX_SORT              1
123 #define DATUM_SORT              2
124 #define CLUSTER_SORT    3
125
126 /* GUC variables */
127 #ifdef TRACE_SORT
128 bool            trace_sort = false;
129 #endif
130
131 #ifdef DEBUG_BOUNDED_SORT
132 bool            optimize_bounded_sort = true;
133 #endif
134
135
136 /*
137  * The objects we actually sort are SortTuple structs.  These contain
138  * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
139  * which is a separate palloc chunk --- we assume it is just one chunk and
140  * can be freed by a simple pfree().  SortTuples also contain the tuple's
141  * first key column in Datum/nullflag format, and an index integer.
142  *
143  * Storing the first key column lets us save heap_getattr or index_getattr
144  * calls during tuple comparisons.  We could extract and save all the key
145  * columns not just the first, but this would increase code complexity and
146  * overhead, and wouldn't actually save any comparison cycles in the common
147  * case where the first key determines the comparison result.  Note that
148  * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
149  *
150  * There is one special case: when the sort support infrastructure provides an
151  * "abbreviated key" representation, where the key is (typically) a pass by
152  * value proxy for a pass by reference type.  In this case, the abbreviated key
153  * is stored in datum1 in place of the actual first key column.
154  *
155  * When sorting single Datums, the data value is represented directly by
156  * datum1/isnull1 for pass by value types (or null values).  If the datatype is
157  * pass-by-reference and isnull1 is false, then "tuple" points to a separately
158  * palloc'd data value, otherwise "tuple" is NULL.  The value of datum1 is then
159  * either the same pointer as "tuple", or is an abbreviated key value as
160  * described above.  Accordingly, "tuple" is always used in preference to
161  * datum1 as the authoritative value for pass-by-reference cases.
162  *
163  * While building initial runs, tupindex holds the tuple's run number.  During
164  * merge passes, we re-use it to hold the input tape number that each tuple in
165  * the heap was read from, or to hold the index of the next tuple pre-read
166  * from the same tape in the case of pre-read entries.  tupindex goes unused
167  * if the sort occurs entirely in memory.
168  */
169 typedef struct
170 {
171         void       *tuple;                      /* the tuple proper */
172         Datum           datum1;                 /* value of first key column */
173         bool            isnull1;                /* is first key column NULL? */
174         int                     tupindex;               /* see notes above */
175 } SortTuple;
176
177
178 /*
179  * Possible states of a Tuplesort object.  These denote the states that
180  * persist between calls of Tuplesort routines.
181  */
182 typedef enum
183 {
184         TSS_INITIAL,                            /* Loading tuples; still within memory limit */
185         TSS_BOUNDED,                            /* Loading tuples into bounded-size heap */
186         TSS_BUILDRUNS,                          /* Loading tuples; writing to tape */
187         TSS_SORTEDINMEM,                        /* Sort completed entirely in memory */
188         TSS_SORTEDONTAPE,                       /* Sort completed, final run is on tape */
189         TSS_FINALMERGE                          /* Performing final merge on-the-fly */
190 } TupSortStatus;
191
192 /*
193  * Parameters for calculation of number of tapes to use --- see inittapes()
194  * and tuplesort_merge_order().
195  *
196  * In this calculation we assume that each tape will cost us about 3 blocks
197  * worth of buffer space (which is an underestimate for very large data
198  * volumes, but it's probably close enough --- see logtape.c).
199  *
200  * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
201  * tape during a preread cycle (see discussion at top of file).
202  */
203 #define MINORDER                6               /* minimum merge order */
204 #define TAPE_BUFFER_OVERHEAD            (BLCKSZ * 3)
205 #define MERGE_BUFFER_SIZE                       (BLCKSZ * 32)
206
207 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
208                                                                                                 Tuplesortstate *state);
209
210 /*
211  * Private state of a Tuplesort operation.
212  */
213 struct Tuplesortstate
214 {
215         TupSortStatus status;           /* enumerated value as shown above */
216         int                     nKeys;                  /* number of columns in sort key */
217         bool            randomAccess;   /* did caller request random access? */
218         bool            bounded;                /* did caller specify a maximum number of
219                                                                  * tuples to return? */
220         bool            boundUsed;              /* true if we made use of a bounded heap */
221         int                     bound;                  /* if bounded, the maximum number of tuples */
222         int64           availMem;               /* remaining memory available, in bytes */
223         int64           allowedMem;             /* total memory allowed, in bytes */
224         int                     maxTapes;               /* number of tapes (Knuth's T) */
225         int                     tapeRange;              /* maxTapes-1 (Knuth's P) */
226         MemoryContext sortcontext;      /* memory context holding all sort data */
227         LogicalTapeSet *tapeset;        /* logtape.c object for tapes in a temp file */
228
229         /*
230          * These function pointers decouple the routines that must know what kind
231          * of tuple we are sorting from the routines that don't need to know it.
232          * They are set up by the tuplesort_begin_xxx routines.
233          *
234          * Function to compare two tuples; result is per qsort() convention, ie:
235          * <0, 0, >0 according as a<b, a=b, a>b.  The API must match
236          * qsort_arg_comparator.
237          */
238         SortTupleComparator comparetup;
239
240         /*
241          * Function to copy a supplied input tuple into palloc'd space and set up
242          * its SortTuple representation (ie, set tuple/datum1/isnull1).  Also,
243          * state->availMem must be decreased by the amount of space used for the
244          * tuple copy (note the SortTuple struct itself is not counted).
245          */
246         void            (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
247
248         /*
249          * Function to write a stored tuple onto tape.  The representation of the
250          * tuple on tape need not be the same as it is in memory; requirements on
251          * the tape representation are given below.  After writing the tuple,
252          * pfree() the out-of-line data (not the SortTuple struct!), and increase
253          * state->availMem by the amount of memory space thereby released.
254          */
255         void            (*writetup) (Tuplesortstate *state, int tapenum,
256                                                                                  SortTuple *stup);
257
258         /*
259          * Function to read a stored tuple from tape back into memory. 'len' is
260          * the already-read length of the stored tuple.  Create a palloc'd copy,
261          * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
262          * decrease state->availMem by the amount of memory space consumed.
263          */
264         void            (*readtup) (Tuplesortstate *state, SortTuple *stup,
265                                                                                 int tapenum, unsigned int len);
266
267         /*
268          * This array holds the tuples now in sort memory.  If we are in state
269          * INITIAL, the tuples are in no particular order; if we are in state
270          * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
271          * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
272          * H.  (Note that memtupcount only counts the tuples that are part of the
273          * heap --- during merge passes, memtuples[] entries beyond tapeRange are
274          * never in the heap and are used to hold pre-read tuples.)  In state
275          * SORTEDONTAPE, the array is not used.
276          */
277         SortTuple  *memtuples;          /* array of SortTuple structs */
278         int                     memtupcount;    /* number of tuples currently present */
279         int                     memtupsize;             /* allocated length of memtuples array */
280         bool            growmemtuples;  /* memtuples' growth still underway? */
281
282         /*
283          * While building initial runs, this is the current output run number
284          * (starting at 0).  Afterwards, it is the number of initial runs we made.
285          */
286         int                     currentRun;
287
288         /*
289          * Unless otherwise noted, all pointer variables below are pointers to
290          * arrays of length maxTapes, holding per-tape data.
291          */
292
293         /*
294          * These variables are only used during merge passes.  mergeactive[i] is
295          * true if we are reading an input run from (actual) tape number i and
296          * have not yet exhausted that run.  mergenext[i] is the memtuples index
297          * of the next pre-read tuple (next to be loaded into the heap) for tape
298          * i, or 0 if we are out of pre-read tuples.  mergelast[i] similarly
299          * points to the last pre-read tuple from each tape.  mergeavailslots[i]
300          * is the number of unused memtuples[] slots reserved for tape i, and
301          * mergeavailmem[i] is the amount of unused space allocated for tape i.
302          * mergefreelist and mergefirstfree keep track of unused locations in the
303          * memtuples[] array.  The memtuples[].tupindex fields link together
304          * pre-read tuples for each tape as well as recycled locations in
305          * mergefreelist. It is OK to use 0 as a null link in these lists, because
306          * memtuples[0] is part of the merge heap and is never a pre-read tuple.
307          */
308         bool       *mergeactive;        /* active input run source? */
309         int                *mergenext;          /* first preread tuple for each source */
310         int                *mergelast;          /* last preread tuple for each source */
311         int                *mergeavailslots;    /* slots left for prereading each tape */
312         int64      *mergeavailmem;      /* availMem for prereading each tape */
313         int                     mergefreelist;  /* head of freelist of recycled slots */
314         int                     mergefirstfree; /* first slot never used in this merge */
315
316         /*
317          * Variables for Algorithm D.  Note that destTape is a "logical" tape
318          * number, ie, an index into the tp_xxx[] arrays.  Be careful to keep
319          * "logical" and "actual" tape numbers straight!
320          */
321         int                     Level;                  /* Knuth's l */
322         int                     destTape;               /* current output tape (Knuth's j, less 1) */
323         int                *tp_fib;                     /* Target Fibonacci run counts (A[]) */
324         int                *tp_runs;            /* # of real runs on each tape */
325         int                *tp_dummy;           /* # of dummy runs for each tape (D[]) */
326         int                *tp_tapenum;         /* Actual tape numbers (TAPE[]) */
327         int                     activeTapes;    /* # of active input tapes in merge pass */
328
329         /*
330          * These variables are used after completion of sorting to keep track of
331          * the next tuple to return.  (In the tape case, the tape's current read
332          * position is also critical state.)
333          */
334         int                     result_tape;    /* actual tape number of finished output */
335         int                     current;                /* array index (only used if SORTEDINMEM) */
336         bool            eof_reached;    /* reached EOF (needed for cursors) */
337
338         /* markpos_xxx holds marked position for mark and restore */
339         long            markpos_block;  /* tape block# (only used if SORTEDONTAPE) */
340         int                     markpos_offset; /* saved "current", or offset in tape block */
341         bool            markpos_eof;    /* saved "eof_reached" */
342
343         /*
344          * The sortKeys variable is used by every case other than the hash index
345          * case; it is set by tuplesort_begin_xxx.  tupDesc is only used by the
346          * MinimalTuple and CLUSTER routines, though.
347          */
348         TupleDesc       tupDesc;
349         SortSupport sortKeys;           /* array of length nKeys */
350
351         /*
352          * This variable is shared by the single-key MinimalTuple case and the
353          * Datum case (which both use qsort_ssup()).  Otherwise it's NULL.
354          */
355         SortSupport onlyKey;
356
357         /*
358          * Additional state for managing "abbreviated key" sortsupport routines
359          * (which currently may be used by all cases except the hash index case).
360          * Tracks the intervals at which the optimization's effectiveness is
361          * tested.
362          */
363         int64           abbrevNext;             /* Tuple # at which to next check
364                                                                  * applicability */
365
366         /*
367          * These variables are specific to the CLUSTER case; they are set by
368          * tuplesort_begin_cluster.
369          */
370         IndexInfo  *indexInfo;          /* info about index being used for reference */
371         EState     *estate;                     /* for evaluating index expressions */
372
373         /*
374          * These variables are specific to the IndexTuple case; they are set by
375          * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
376          */
377         Relation        heapRel;                /* table the index is being built on */
378         Relation        indexRel;               /* index being built */
379
380         /* These are specific to the index_btree subcase: */
381         bool            enforceUnique;  /* complain if we find duplicate tuples */
382
383         /* These are specific to the index_hash subcase: */
384         uint32          hash_mask;              /* mask for sortable part of hash code */
385
386         /*
387          * These variables are specific to the Datum case; they are set by
388          * tuplesort_begin_datum and used only by the DatumTuple routines.
389          */
390         Oid                     datumType;
391         /* we need typelen and byval in order to know how to copy the Datums. */
392         int                     datumTypeLen;
393         bool            datumTypeByVal;
394
395         /*
396          * Resource snapshot for time of sort start.
397          */
398 #ifdef TRACE_SORT
399         PGRUsage        ru_start;
400 #endif
401 };
402
403 #define COMPARETUP(state,a,b)   ((*(state)->comparetup) (a, b, state))
404 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
405 #define WRITETUP(state,tape,stup)       ((*(state)->writetup) (state, tape, stup))
406 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
407 #define LACKMEM(state)          ((state)->availMem < 0)
408 #define USEMEM(state,amt)       ((state)->availMem -= (amt))
409 #define FREEMEM(state,amt)      ((state)->availMem += (amt))
410
411 /*
412  * NOTES about on-tape representation of tuples:
413  *
414  * We require the first "unsigned int" of a stored tuple to be the total size
415  * on-tape of the tuple, including itself (so it is never zero; an all-zero
416  * unsigned int is used to delimit runs).  The remainder of the stored tuple
417  * may or may not match the in-memory representation of the tuple ---
418  * any conversion needed is the job of the writetup and readtup routines.
419  *
420  * If state->randomAccess is true, then the stored representation of the
421  * tuple must be followed by another "unsigned int" that is a copy of the
422  * length --- so the total tape space used is actually sizeof(unsigned int)
423  * more than the stored length value.  This allows read-backwards.  When
424  * randomAccess is not true, the write/read routines may omit the extra
425  * length word.
426  *
427  * writetup is expected to write both length words as well as the tuple
428  * data.  When readtup is called, the tape is positioned just after the
429  * front length word; readtup must read the tuple data and advance past
430  * the back length word (if present).
431  *
432  * The write/read routines can make use of the tuple description data
433  * stored in the Tuplesortstate record, if needed.  They are also expected
434  * to adjust state->availMem by the amount of memory space (not tape space!)
435  * released or consumed.  There is no error return from either writetup
436  * or readtup; they should ereport() on failure.
437  *
438  *
439  * NOTES about memory consumption calculations:
440  *
441  * We count space allocated for tuples against the workMem limit, plus
442  * the space used by the variable-size memtuples array.  Fixed-size space
443  * is not counted; it's small enough to not be interesting.
444  *
445  * Note that we count actual space used (as shown by GetMemoryChunkSpace)
446  * rather than the originally-requested size.  This is important since
447  * palloc can add substantial overhead.  It's not a complete answer since
448  * we won't count any wasted space in palloc allocation blocks, but it's
449  * a lot better than what we were doing before 7.3.
450  */
451
452 /* When using this macro, beware of double evaluation of len */
453 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
454         do { \
455                 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
456                         elog(ERROR, "unexpected end of data"); \
457         } while(0)
458
459
460 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
461 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
462 static bool consider_abort_common(Tuplesortstate *state);
463 static void inittapes(Tuplesortstate *state);
464 static void selectnewtape(Tuplesortstate *state);
465 static void mergeruns(Tuplesortstate *state);
466 static void mergeonerun(Tuplesortstate *state);
467 static void beginmerge(Tuplesortstate *state);
468 static void mergepreread(Tuplesortstate *state);
469 static void mergeprereadone(Tuplesortstate *state, int srcTape);
470 static void dumptuples(Tuplesortstate *state, bool alltuples);
471 static void make_bounded_heap(Tuplesortstate *state);
472 static void sort_bounded_heap(Tuplesortstate *state);
473 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
474                                           int tupleindex, bool checkIndex);
475 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
476 static void reversedirection(Tuplesortstate *state);
477 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
478 static void markrunend(Tuplesortstate *state, int tapenum);
479 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
480                                 Tuplesortstate *state);
481 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
482 static void writetup_heap(Tuplesortstate *state, int tapenum,
483                           SortTuple *stup);
484 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
485                          int tapenum, unsigned int len);
486 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
487                                    Tuplesortstate *state);
488 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
489 static void writetup_cluster(Tuplesortstate *state, int tapenum,
490                                  SortTuple *stup);
491 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
492                                 int tapenum, unsigned int len);
493 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
494                                            Tuplesortstate *state);
495 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
496                                           Tuplesortstate *state);
497 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
498 static void writetup_index(Tuplesortstate *state, int tapenum,
499                            SortTuple *stup);
500 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
501                           int tapenum, unsigned int len);
502 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
503                                  Tuplesortstate *state);
504 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
505 static void writetup_datum(Tuplesortstate *state, int tapenum,
506                            SortTuple *stup);
507 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
508                           int tapenum, unsigned int len);
509 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
510
511 /*
512  * Special versions of qsort just for SortTuple objects.  qsort_tuple() sorts
513  * any variant of SortTuples, using the appropriate comparetup function.
514  * qsort_ssup() is specialized for the case where the comparetup function
515  * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
516  * and Datum sorts.
517  */
518 #include "qsort_tuple.c"
519
520
521 /*
522  *              tuplesort_begin_xxx
523  *
524  * Initialize for a tuple sort operation.
525  *
526  * After calling tuplesort_begin, the caller should call tuplesort_putXXX
527  * zero or more times, then call tuplesort_performsort when all the tuples
528  * have been supplied.  After performsort, retrieve the tuples in sorted
529  * order by calling tuplesort_getXXX until it returns false/NULL.  (If random
530  * access was requested, rescan, markpos, and restorepos can also be called.)
531  * Call tuplesort_end to terminate the operation and release memory/disk space.
532  *
533  * Each variant of tuplesort_begin has a workMem parameter specifying the
534  * maximum number of kilobytes of RAM to use before spilling data to disk.
535  * (The normal value of this parameter is work_mem, but some callers use
536  * other values.)  Each variant also has a randomAccess parameter specifying
537  * whether the caller needs non-sequential access to the sort result.
538  */
539
540 static Tuplesortstate *
541 tuplesort_begin_common(int workMem, bool randomAccess)
542 {
543         Tuplesortstate *state;
544         MemoryContext sortcontext;
545         MemoryContext oldcontext;
546
547         /*
548          * Create a working memory context for this sort operation. All data
549          * needed by the sort will live inside this context.
550          */
551         sortcontext = AllocSetContextCreate(CurrentMemoryContext,
552                                                                                 "TupleSort",
553                                                                                 ALLOCSET_DEFAULT_MINSIZE,
554                                                                                 ALLOCSET_DEFAULT_INITSIZE,
555                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
556
557         /*
558          * Make the Tuplesortstate within the per-sort context.  This way, we
559          * don't need a separate pfree() operation for it at shutdown.
560          */
561         oldcontext = MemoryContextSwitchTo(sortcontext);
562
563         state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
564
565 #ifdef TRACE_SORT
566         if (trace_sort)
567                 pg_rusage_init(&state->ru_start);
568 #endif
569
570         state->status = TSS_INITIAL;
571         state->randomAccess = randomAccess;
572         state->bounded = false;
573         state->boundUsed = false;
574         state->allowedMem = workMem * (int64) 1024;
575         state->availMem = state->allowedMem;
576         state->sortcontext = sortcontext;
577         state->tapeset = NULL;
578
579         state->memtupcount = 0;
580
581         /*
582          * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
583          * see comments in grow_memtuples().
584          */
585         state->memtupsize = Max(1024,
586                                                 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
587
588         state->growmemtuples = true;
589         state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
590
591         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
592
593         /* workMem must be large enough for the minimal memtuples array */
594         if (LACKMEM(state))
595                 elog(ERROR, "insufficient memory allowed for sort");
596
597         state->currentRun = 0;
598
599         /*
600          * maxTapes, tapeRange, and Algorithm D variables will be initialized by
601          * inittapes(), if needed
602          */
603
604         state->result_tape = -1;        /* flag that result tape has not been formed */
605
606         MemoryContextSwitchTo(oldcontext);
607
608         return state;
609 }
610
611 Tuplesortstate *
612 tuplesort_begin_heap(TupleDesc tupDesc,
613                                          int nkeys, AttrNumber *attNums,
614                                          Oid *sortOperators, Oid *sortCollations,
615                                          bool *nullsFirstFlags,
616                                          int workMem, bool randomAccess)
617 {
618         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
619         MemoryContext oldcontext;
620         int                     i;
621
622         oldcontext = MemoryContextSwitchTo(state->sortcontext);
623
624         AssertArg(nkeys > 0);
625
626 #ifdef TRACE_SORT
627         if (trace_sort)
628                 elog(LOG,
629                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
630                          nkeys, workMem, randomAccess ? 't' : 'f');
631 #endif
632
633         state->nKeys = nkeys;
634
635         TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
636                                                                 false,  /* no unique check */
637                                                                 nkeys,
638                                                                 workMem,
639                                                                 randomAccess);
640
641         state->comparetup = comparetup_heap;
642         state->copytup = copytup_heap;
643         state->writetup = writetup_heap;
644         state->readtup = readtup_heap;
645
646         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
647         state->abbrevNext = 10;
648
649         /* Prepare SortSupport data for each column */
650         state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
651
652         for (i = 0; i < nkeys; i++)
653         {
654                 SortSupport sortKey = state->sortKeys + i;
655
656                 AssertArg(attNums[i] != 0);
657                 AssertArg(sortOperators[i] != 0);
658
659                 sortKey->ssup_cxt = CurrentMemoryContext;
660                 sortKey->ssup_collation = sortCollations[i];
661                 sortKey->ssup_nulls_first = nullsFirstFlags[i];
662                 sortKey->ssup_attno = attNums[i];
663                 /* Convey if abbreviation optimization is applicable in principle */
664                 sortKey->abbreviate = (i == 0);
665
666                 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
667         }
668
669         /*
670          * The "onlyKey" optimization cannot be used with abbreviated keys, since
671          * tie-breaker comparisons may be required.  Typically, the optimization
672          * is only of value to pass-by-value types anyway, whereas abbreviated
673          * keys are typically only of value to pass-by-reference types.
674          */
675         if (nkeys == 1 && !state->sortKeys->abbrev_converter)
676                 state->onlyKey = state->sortKeys;
677
678         MemoryContextSwitchTo(oldcontext);
679
680         return state;
681 }
682
683 Tuplesortstate *
684 tuplesort_begin_cluster(TupleDesc tupDesc,
685                                                 Relation indexRel,
686                                                 int workMem, bool randomAccess)
687 {
688         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
689         ScanKey         indexScanKey;
690         MemoryContext oldcontext;
691         int                     i;
692
693         Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
694
695         oldcontext = MemoryContextSwitchTo(state->sortcontext);
696
697 #ifdef TRACE_SORT
698         if (trace_sort)
699                 elog(LOG,
700                          "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
701                          RelationGetNumberOfAttributes(indexRel),
702                          workMem, randomAccess ? 't' : 'f');
703 #endif
704
705         state->nKeys = RelationGetNumberOfAttributes(indexRel);
706
707         TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
708                                                                 false,  /* no unique check */
709                                                                 state->nKeys,
710                                                                 workMem,
711                                                                 randomAccess);
712
713         state->comparetup = comparetup_cluster;
714         state->copytup = copytup_cluster;
715         state->writetup = writetup_cluster;
716         state->readtup = readtup_cluster;
717         state->abbrevNext = 10;
718
719         state->indexInfo = BuildIndexInfo(indexRel);
720
721         state->tupDesc = tupDesc;       /* assume we need not copy tupDesc */
722
723         indexScanKey = _bt_mkscankey_nodata(indexRel);
724
725         if (state->indexInfo->ii_Expressions != NULL)
726         {
727                 TupleTableSlot *slot;
728                 ExprContext *econtext;
729
730                 /*
731                  * We will need to use FormIndexDatum to evaluate the index
732                  * expressions.  To do that, we need an EState, as well as a
733                  * TupleTableSlot to put the table tuples into.  The econtext's
734                  * scantuple has to point to that slot, too.
735                  */
736                 state->estate = CreateExecutorState();
737                 slot = MakeSingleTupleTableSlot(tupDesc);
738                 econtext = GetPerTupleExprContext(state->estate);
739                 econtext->ecxt_scantuple = slot;
740         }
741
742         /* Prepare SortSupport data for each column */
743         state->sortKeys = (SortSupport) palloc0(state->nKeys *
744                                                                                         sizeof(SortSupportData));
745
746         for (i = 0; i < state->nKeys; i++)
747         {
748                 SortSupport sortKey = state->sortKeys + i;
749                 ScanKey         scanKey = indexScanKey + i;
750                 int16           strategy;
751
752                 sortKey->ssup_cxt = CurrentMemoryContext;
753                 sortKey->ssup_collation = scanKey->sk_collation;
754                 sortKey->ssup_nulls_first =
755                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
756                 sortKey->ssup_attno = scanKey->sk_attno;
757                 /* Convey if abbreviation optimization is applicable in principle */
758                 sortKey->abbreviate = (i == 0);
759
760                 AssertState(sortKey->ssup_attno != 0);
761
762                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
763                         BTGreaterStrategyNumber : BTLessStrategyNumber;
764
765                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
766         }
767
768         _bt_freeskey(indexScanKey);
769
770         MemoryContextSwitchTo(oldcontext);
771
772         return state;
773 }
774
775 Tuplesortstate *
776 tuplesort_begin_index_btree(Relation heapRel,
777                                                         Relation indexRel,
778                                                         bool enforceUnique,
779                                                         int workMem, bool randomAccess)
780 {
781         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
782         ScanKey         indexScanKey;
783         MemoryContext oldcontext;
784         int                     i;
785
786         oldcontext = MemoryContextSwitchTo(state->sortcontext);
787
788 #ifdef TRACE_SORT
789         if (trace_sort)
790                 elog(LOG,
791                          "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
792                          enforceUnique ? 't' : 'f',
793                          workMem, randomAccess ? 't' : 'f');
794 #endif
795
796         state->nKeys = RelationGetNumberOfAttributes(indexRel);
797
798         TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
799                                                                 enforceUnique,
800                                                                 state->nKeys,
801                                                                 workMem,
802                                                                 randomAccess);
803
804         state->comparetup = comparetup_index_btree;
805         state->copytup = copytup_index;
806         state->writetup = writetup_index;
807         state->readtup = readtup_index;
808         state->abbrevNext = 10;
809
810         state->heapRel = heapRel;
811         state->indexRel = indexRel;
812         state->enforceUnique = enforceUnique;
813
814         indexScanKey = _bt_mkscankey_nodata(indexRel);
815         state->nKeys = RelationGetNumberOfAttributes(indexRel);
816
817         /* Prepare SortSupport data for each column */
818         state->sortKeys = (SortSupport) palloc0(state->nKeys *
819                                                                                         sizeof(SortSupportData));
820
821         for (i = 0; i < state->nKeys; i++)
822         {
823                 SortSupport sortKey = state->sortKeys + i;
824                 ScanKey         scanKey = indexScanKey + i;
825                 int16           strategy;
826
827                 sortKey->ssup_cxt = CurrentMemoryContext;
828                 sortKey->ssup_collation = scanKey->sk_collation;
829                 sortKey->ssup_nulls_first =
830                         (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
831                 sortKey->ssup_attno = scanKey->sk_attno;
832                 /* Convey if abbreviation optimization is applicable in principle */
833                 sortKey->abbreviate = (i == 0);
834
835                 AssertState(sortKey->ssup_attno != 0);
836
837                 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
838                         BTGreaterStrategyNumber : BTLessStrategyNumber;
839
840                 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
841         }
842
843         _bt_freeskey(indexScanKey);
844
845         MemoryContextSwitchTo(oldcontext);
846
847         return state;
848 }
849
850 Tuplesortstate *
851 tuplesort_begin_index_hash(Relation heapRel,
852                                                    Relation indexRel,
853                                                    uint32 hash_mask,
854                                                    int workMem, bool randomAccess)
855 {
856         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
857         MemoryContext oldcontext;
858
859         oldcontext = MemoryContextSwitchTo(state->sortcontext);
860
861 #ifdef TRACE_SORT
862         if (trace_sort)
863                 elog(LOG,
864                 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
865                          hash_mask,
866                          workMem, randomAccess ? 't' : 'f');
867 #endif
868
869         state->nKeys = 1;                       /* Only one sort column, the hash code */
870
871         state->comparetup = comparetup_index_hash;
872         state->copytup = copytup_index;
873         state->writetup = writetup_index;
874         state->readtup = readtup_index;
875
876         state->heapRel = heapRel;
877         state->indexRel = indexRel;
878
879         state->hash_mask = hash_mask;
880
881         MemoryContextSwitchTo(oldcontext);
882
883         return state;
884 }
885
886 Tuplesortstate *
887 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
888                                           bool nullsFirstFlag,
889                                           int workMem, bool randomAccess)
890 {
891         Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
892         MemoryContext oldcontext;
893         int16           typlen;
894         bool            typbyval;
895
896         oldcontext = MemoryContextSwitchTo(state->sortcontext);
897
898 #ifdef TRACE_SORT
899         if (trace_sort)
900                 elog(LOG,
901                          "begin datum sort: workMem = %d, randomAccess = %c",
902                          workMem, randomAccess ? 't' : 'f');
903 #endif
904
905         state->nKeys = 1;                       /* always a one-column sort */
906
907         TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
908                                                                 false,  /* no unique check */
909                                                                 1,
910                                                                 workMem,
911                                                                 randomAccess);
912
913         state->comparetup = comparetup_datum;
914         state->copytup = copytup_datum;
915         state->writetup = writetup_datum;
916         state->readtup = readtup_datum;
917         state->abbrevNext = 10;
918
919         state->datumType = datumType;
920
921         /* lookup necessary attributes of the datum type */
922         get_typlenbyval(datumType, &typlen, &typbyval);
923         state->datumTypeLen = typlen;
924         state->datumTypeByVal = typbyval;
925
926         /* Prepare SortSupport data */
927         state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
928
929         state->sortKeys->ssup_cxt = CurrentMemoryContext;
930         state->sortKeys->ssup_collation = sortCollation;
931         state->sortKeys->ssup_nulls_first = nullsFirstFlag;
932
933         /* abbreviation is possible here only for by-reference types */
934         state->sortKeys->abbreviate = !typbyval;
935
936         PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
937
938         /*
939          * The "onlyKey" optimization cannot be used with abbreviated keys, since
940          * tie-breaker comparisons may be required.  Typically, the optimization
941          * is only of value to pass-by-value types anyway, whereas abbreviated
942          * keys are typically only of value to pass-by-reference types.
943          */
944         if (!state->sortKeys->abbrev_converter)
945                 state->onlyKey = state->sortKeys;
946
947         MemoryContextSwitchTo(oldcontext);
948
949         return state;
950 }
951
952 /*
953  * tuplesort_set_bound
954  *
955  *      Advise tuplesort that at most the first N result tuples are required.
956  *
957  * Must be called before inserting any tuples.  (Actually, we could allow it
958  * as long as the sort hasn't spilled to disk, but there seems no need for
959  * delayed calls at the moment.)
960  *
961  * This is a hint only. The tuplesort may still return more tuples than
962  * requested.
963  */
964 void
965 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
966 {
967         /* Assert we're called before loading any tuples */
968         Assert(state->status == TSS_INITIAL);
969         Assert(state->memtupcount == 0);
970         Assert(!state->bounded);
971
972 #ifdef DEBUG_BOUNDED_SORT
973         /* Honor GUC setting that disables the feature (for easy testing) */
974         if (!optimize_bounded_sort)
975                 return;
976 #endif
977
978         /* We want to be able to compute bound * 2, so limit the setting */
979         if (bound > (int64) (INT_MAX / 2))
980                 return;
981
982         state->bounded = true;
983         state->bound = (int) bound;
984
985         /*
986          * Bounded sorts are not an effective target for abbreviated key
987          * optimization.  Disable by setting state to be consistent with no
988          * abbreviation support.
989          */
990         state->sortKeys->abbrev_converter = NULL;
991         if (state->sortKeys->abbrev_full_comparator)
992                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
993
994         /* Not strictly necessary, but be tidy */
995         state->sortKeys->abbrev_abort = NULL;
996         state->sortKeys->abbrev_full_comparator = NULL;
997 }
998
999 /*
1000  * tuplesort_end
1001  *
1002  *      Release resources and clean up.
1003  *
1004  * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1005  * pointing to garbage.  Be careful not to attempt to use or free such
1006  * pointers afterwards!
1007  */
1008 void
1009 tuplesort_end(Tuplesortstate *state)
1010 {
1011         /* context swap probably not needed, but let's be safe */
1012         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1013
1014 #ifdef TRACE_SORT
1015         long            spaceUsed;
1016
1017         if (state->tapeset)
1018                 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1019         else
1020                 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1021 #endif
1022
1023         /*
1024          * Delete temporary "tape" files, if any.
1025          *
1026          * Note: want to include this in reported total cost of sort, hence need
1027          * for two #ifdef TRACE_SORT sections.
1028          */
1029         if (state->tapeset)
1030                 LogicalTapeSetClose(state->tapeset);
1031
1032 #ifdef TRACE_SORT
1033         if (trace_sort)
1034         {
1035                 if (state->tapeset)
1036                         elog(LOG, "external sort ended, %ld disk blocks used: %s",
1037                                  spaceUsed, pg_rusage_show(&state->ru_start));
1038                 else
1039                         elog(LOG, "internal sort ended, %ld KB used: %s",
1040                                  spaceUsed, pg_rusage_show(&state->ru_start));
1041         }
1042
1043         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1044 #else
1045
1046         /*
1047          * If you disabled TRACE_SORT, you can still probe sort__done, but you
1048          * ain't getting space-used stats.
1049          */
1050         TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1051 #endif
1052
1053         /* Free any execution state created for CLUSTER case */
1054         if (state->estate != NULL)
1055         {
1056                 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1057
1058                 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1059                 FreeExecutorState(state->estate);
1060         }
1061
1062         MemoryContextSwitchTo(oldcontext);
1063
1064         /*
1065          * Free the per-sort memory context, thereby releasing all working memory,
1066          * including the Tuplesortstate struct itself.
1067          */
1068         MemoryContextDelete(state->sortcontext);
1069 }
1070
1071 /*
1072  * Grow the memtuples[] array, if possible within our memory constraint.  We
1073  * must not exceed INT_MAX tuples in memory or the caller-provided memory
1074  * limit.  Return TRUE if we were able to enlarge the array, FALSE if not.
1075  *
1076  * Normally, at each increment we double the size of the array.  When doing
1077  * that would exceed a limit, we attempt one last, smaller increase (and then
1078  * clear the growmemtuples flag so we don't try any more).  That allows us to
1079  * use memory as fully as permitted; sticking to the pure doubling rule could
1080  * result in almost half going unused.  Because availMem moves around with
1081  * tuple addition/removal, we need some rule to prevent making repeated small
1082  * increases in memtupsize, which would just be useless thrashing.  The
1083  * growmemtuples flag accomplishes that and also prevents useless
1084  * recalculations in this function.
1085  */
1086 static bool
1087 grow_memtuples(Tuplesortstate *state)
1088 {
1089         int                     newmemtupsize;
1090         int                     memtupsize = state->memtupsize;
1091         int64           memNowUsed = state->allowedMem - state->availMem;
1092
1093         /* Forget it if we've already maxed out memtuples, per comment above */
1094         if (!state->growmemtuples)
1095                 return false;
1096
1097         /* Select new value of memtupsize */
1098         if (memNowUsed <= state->availMem)
1099         {
1100                 /*
1101                  * We've used no more than half of allowedMem; double our usage,
1102                  * clamping at INT_MAX tuples.
1103                  */
1104                 if (memtupsize < INT_MAX / 2)
1105                         newmemtupsize = memtupsize * 2;
1106                 else
1107                 {
1108                         newmemtupsize = INT_MAX;
1109                         state->growmemtuples = false;
1110                 }
1111         }
1112         else
1113         {
1114                 /*
1115                  * This will be the last increment of memtupsize.  Abandon doubling
1116                  * strategy and instead increase as much as we safely can.
1117                  *
1118                  * To stay within allowedMem, we can't increase memtupsize by more
1119                  * than availMem / sizeof(SortTuple) elements.  In practice, we want
1120                  * to increase it by considerably less, because we need to leave some
1121                  * space for the tuples to which the new array slots will refer.  We
1122                  * assume the new tuples will be about the same size as the tuples
1123                  * we've already seen, and thus we can extrapolate from the space
1124                  * consumption so far to estimate an appropriate new size for the
1125                  * memtuples array.  The optimal value might be higher or lower than
1126                  * this estimate, but it's hard to know that in advance.  We again
1127                  * clamp at INT_MAX tuples.
1128                  *
1129                  * This calculation is safe against enlarging the array so much that
1130                  * LACKMEM becomes true, because the memory currently used includes
1131                  * the present array; thus, there would be enough allowedMem for the
1132                  * new array elements even if no other memory were currently used.
1133                  *
1134                  * We do the arithmetic in float8, because otherwise the product of
1135                  * memtupsize and allowedMem could overflow.  Any inaccuracy in the
1136                  * result should be insignificant; but even if we computed a
1137                  * completely insane result, the checks below will prevent anything
1138                  * really bad from happening.
1139                  */
1140                 double          grow_ratio;
1141
1142                 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1143                 if (memtupsize * grow_ratio < INT_MAX)
1144                         newmemtupsize = (int) (memtupsize * grow_ratio);
1145                 else
1146                         newmemtupsize = INT_MAX;
1147
1148                 /* We won't make any further enlargement attempts */
1149                 state->growmemtuples = false;
1150         }
1151
1152         /* Must enlarge array by at least one element, else report failure */
1153         if (newmemtupsize <= memtupsize)
1154                 goto noalloc;
1155
1156         /*
1157          * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize.  Clamp
1158          * to ensure our request won't be rejected.  Note that we can easily
1159          * exhaust address space before facing this outcome.  (This is presently
1160          * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1161          * don't rely on that at this distance.)
1162          */
1163         if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1164         {
1165                 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1166                 state->growmemtuples = false;   /* can't grow any more */
1167         }
1168
1169         /*
1170          * We need to be sure that we do not cause LACKMEM to become true, else
1171          * the space management algorithm will go nuts.  The code above should
1172          * never generate a dangerous request, but to be safe, check explicitly
1173          * that the array growth fits within availMem.  (We could still cause
1174          * LACKMEM if the memory chunk overhead associated with the memtuples
1175          * array were to increase.  That shouldn't happen because we chose the
1176          * initial array size large enough to ensure that palloc will be treating
1177          * both old and new arrays as separate chunks.  But we'll check LACKMEM
1178          * explicitly below just in case.)
1179          */
1180         if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1181                 goto noalloc;
1182
1183         /* OK, do it */
1184         FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1185         state->memtupsize = newmemtupsize;
1186         state->memtuples = (SortTuple *)
1187                 repalloc_huge(state->memtuples,
1188                                           state->memtupsize * sizeof(SortTuple));
1189         USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1190         if (LACKMEM(state))
1191                 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1192         return true;
1193
1194 noalloc:
1195         /* If for any reason we didn't realloc, shut off future attempts */
1196         state->growmemtuples = false;
1197         return false;
1198 }
1199
1200 /*
1201  * Accept one tuple while collecting input data for sort.
1202  *
1203  * Note that the input data is always copied; the caller need not save it.
1204  */
1205 void
1206 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1207 {
1208         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1209         SortTuple       stup;
1210
1211         /*
1212          * Copy the given tuple into memory we control, and decrease availMem.
1213          * Then call the common code.
1214          */
1215         COPYTUP(state, &stup, (void *) slot);
1216
1217         puttuple_common(state, &stup);
1218
1219         MemoryContextSwitchTo(oldcontext);
1220 }
1221
1222 /*
1223  * Accept one tuple while collecting input data for sort.
1224  *
1225  * Note that the input data is always copied; the caller need not save it.
1226  */
1227 void
1228 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1229 {
1230         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1231         SortTuple       stup;
1232
1233         /*
1234          * Copy the given tuple into memory we control, and decrease availMem.
1235          * Then call the common code.
1236          */
1237         COPYTUP(state, &stup, (void *) tup);
1238
1239         puttuple_common(state, &stup);
1240
1241         MemoryContextSwitchTo(oldcontext);
1242 }
1243
1244 /*
1245  * Collect one index tuple while collecting input data for sort, building
1246  * it from caller-supplied values.
1247  */
1248 void
1249 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1250                                                           ItemPointer self, Datum *values,
1251                                                           bool *isnull)
1252 {
1253         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1254         SortTuple       stup;
1255         Datum           original;
1256         IndexTuple      tuple;
1257
1258         stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1259         tuple = ((IndexTuple) stup.tuple);
1260         tuple->t_tid = *self;
1261         USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1262         /* set up first-column key value */
1263         original = index_getattr(tuple,
1264                                                          1,
1265                                                          RelationGetDescr(state->indexRel),
1266                                                          &stup.isnull1);
1267
1268         if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1269         {
1270                 /*
1271                  * Store ordinary Datum representation, or NULL value.  If there is a
1272                  * converter it won't expect NULL values, and cost model is not
1273                  * required to account for NULL, so in that case we avoid calling
1274                  * converter and just set datum1 to "void" representation (to be
1275                  * consistent).
1276                  */
1277                 stup.datum1 = original;
1278         }
1279         else if (!consider_abort_common(state))
1280         {
1281                 /* Store abbreviated key representation */
1282                 stup.datum1 = state->sortKeys->abbrev_converter(original,
1283                                                                                                                 state->sortKeys);
1284         }
1285         else
1286         {
1287                 /* Abort abbreviation */
1288                 int                     i;
1289
1290                 stup.datum1 = original;
1291
1292                 /*
1293                  * Set state to be consistent with never trying abbreviation.
1294                  *
1295                  * Alter datum1 representation in already-copied tuples, so as to
1296                  * ensure a consistent representation (current tuple was just
1297                  * handled).  It does not matter if some dumped tuples are already
1298                  * sorted on tape, since serialized tuples lack abbreviated keys
1299                  * (TSS_BUILDRUNS state prevents control reaching here in any
1300                  * case).
1301                  */
1302                 for (i = 0; i < state->memtupcount; i++)
1303                 {
1304                         SortTuple  *mtup = &state->memtuples[i];
1305
1306                         tuple = mtup->tuple;
1307                         mtup->datum1 = index_getattr(tuple,
1308                                                                                  1,
1309                                                                                  RelationGetDescr(state->indexRel),
1310                                                                                  &stup.isnull1);
1311                 }
1312         }
1313
1314         puttuple_common(state, &stup);
1315
1316         MemoryContextSwitchTo(oldcontext);
1317 }
1318
1319 /*
1320  * Accept one Datum while collecting input data for sort.
1321  *
1322  * If the Datum is pass-by-ref type, the value will be copied.
1323  */
1324 void
1325 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1326 {
1327         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1328         SortTuple       stup;
1329
1330         /*
1331          * Pass-by-value types or null values are just stored directly in
1332          * stup.datum1 (and stup.tuple is not used and set to NULL).
1333          *
1334          * Non-null pass-by-reference values need to be copied into memory we
1335          * control, and possibly abbreviated. The copied value is pointed to by
1336          * stup.tuple and is treated as the canonical copy (e.g. to return via
1337          * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1338          * abbreviated value if abbreviation is happening, otherwise it's
1339          * identical to stup.tuple.
1340          */
1341
1342         if (isNull || state->datumTypeByVal)
1343         {
1344                 stup.datum1 = val;
1345                 stup.isnull1 = isNull;
1346                 stup.tuple = NULL;              /* no separate storage */
1347         }
1348         else
1349         {
1350                 Datum           original = datumCopy(val, false, state->datumTypeLen);
1351
1352                 stup.isnull1 = false;
1353                 stup.tuple = DatumGetPointer(original);
1354                 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1355
1356                 if (!state->sortKeys->abbrev_converter)
1357                 {
1358                         stup.datum1 = original;
1359                 }
1360                 else if (!consider_abort_common(state))
1361                 {
1362                         /* Store abbreviated key representation */
1363                         stup.datum1 = state->sortKeys->abbrev_converter(original,
1364                                                                                                                         state->sortKeys);
1365                 }
1366                 else
1367                 {
1368                         /* Abort abbreviation */
1369                         int                     i;
1370
1371                         stup.datum1 = original;
1372
1373                         /*
1374                          * Set state to be consistent with never trying abbreviation.
1375                          *
1376                          * Alter datum1 representation in already-copied tuples, so as to
1377                          * ensure a consistent representation (current tuple was just
1378                          * handled).  It does not matter if some dumped tuples are
1379                          * already sorted on tape, since serialized tuples lack
1380                          * abbreviated keys (TSS_BUILDRUNS state prevents control
1381                          * reaching here in any case).
1382                          */
1383                         for (i = 0; i < state->memtupcount; i++)
1384                         {
1385                                 SortTuple  *mtup = &state->memtuples[i];
1386
1387                                 mtup->datum1 = PointerGetDatum(mtup->tuple);
1388                         }
1389                 }
1390         }
1391
1392         puttuple_common(state, &stup);
1393
1394         MemoryContextSwitchTo(oldcontext);
1395 }
1396
1397 /*
1398  * Shared code for tuple and datum cases.
1399  */
1400 static void
1401 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1402 {
1403         switch (state->status)
1404         {
1405                 case TSS_INITIAL:
1406
1407                         /*
1408                          * Save the tuple into the unsorted array.  First, grow the array
1409                          * as needed.  Note that we try to grow the array when there is
1410                          * still one free slot remaining --- if we fail, there'll still be
1411                          * room to store the incoming tuple, and then we'll switch to
1412                          * tape-based operation.
1413                          */
1414                         if (state->memtupcount >= state->memtupsize - 1)
1415                         {
1416                                 (void) grow_memtuples(state);
1417                                 Assert(state->memtupcount < state->memtupsize);
1418                         }
1419                         state->memtuples[state->memtupcount++] = *tuple;
1420
1421                         /*
1422                          * Check if it's time to switch over to a bounded heapsort. We do
1423                          * so if the input tuple count exceeds twice the desired tuple
1424                          * count (this is a heuristic for where heapsort becomes cheaper
1425                          * than a quicksort), or if we've just filled workMem and have
1426                          * enough tuples to meet the bound.
1427                          *
1428                          * Note that once we enter TSS_BOUNDED state we will always try to
1429                          * complete the sort that way.  In the worst case, if later input
1430                          * tuples are larger than earlier ones, this might cause us to
1431                          * exceed workMem significantly.
1432                          */
1433                         if (state->bounded &&
1434                                 (state->memtupcount > state->bound * 2 ||
1435                                  (state->memtupcount > state->bound && LACKMEM(state))))
1436                         {
1437 #ifdef TRACE_SORT
1438                                 if (trace_sort)
1439                                         elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1440                                                  state->memtupcount,
1441                                                  pg_rusage_show(&state->ru_start));
1442 #endif
1443                                 make_bounded_heap(state);
1444                                 return;
1445                         }
1446
1447                         /*
1448                          * Done if we still fit in available memory and have array slots.
1449                          */
1450                         if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1451                                 return;
1452
1453                         /*
1454                          * Nope; time to switch to tape-based operation.
1455                          */
1456                         inittapes(state);
1457
1458                         /*
1459                          * Dump tuples until we are back under the limit.
1460                          */
1461                         dumptuples(state, false);
1462                         break;
1463
1464                 case TSS_BOUNDED:
1465
1466                         /*
1467                          * We don't want to grow the array here, so check whether the new
1468                          * tuple can be discarded before putting it in.  This should be a
1469                          * good speed optimization, too, since when there are many more
1470                          * input tuples than the bound, most input tuples can be discarded
1471                          * with just this one comparison.  Note that because we currently
1472                          * have the sort direction reversed, we must check for <= not >=.
1473                          */
1474                         if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1475                         {
1476                                 /* new tuple <= top of the heap, so we can discard it */
1477                                 free_sort_tuple(state, tuple);
1478                                 CHECK_FOR_INTERRUPTS();
1479                         }
1480                         else
1481                         {
1482                                 /* discard top of heap, sift up, insert new tuple */
1483                                 free_sort_tuple(state, &state->memtuples[0]);
1484                                 tuplesort_heap_siftup(state, false);
1485                                 tuplesort_heap_insert(state, tuple, 0, false);
1486                         }
1487                         break;
1488
1489                 case TSS_BUILDRUNS:
1490
1491                         /*
1492                          * Insert the tuple into the heap, with run number currentRun if
1493                          * it can go into the current run, else run number currentRun+1.
1494                          * The tuple can go into the current run if it is >= the first
1495                          * not-yet-output tuple.  (Actually, it could go into the current
1496                          * run if it is >= the most recently output tuple ... but that
1497                          * would require keeping around the tuple we last output, and it's
1498                          * simplest to let writetup free each tuple as soon as it's
1499                          * written.)
1500                          *
1501                          * Note there will always be at least one tuple in the heap at
1502                          * this point; see dumptuples.
1503                          */
1504                         Assert(state->memtupcount > 0);
1505                         if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1506                                 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1507                         else
1508                                 tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
1509
1510                         /*
1511                          * If we are over the memory limit, dump tuples till we're under.
1512                          */
1513                         dumptuples(state, false);
1514                         break;
1515
1516                 default:
1517                         elog(ERROR, "invalid tuplesort state");
1518                         break;
1519         }
1520 }
1521
1522 static bool
1523 consider_abort_common(Tuplesortstate *state)
1524 {
1525         Assert(state->sortKeys[0].abbrev_converter != NULL);
1526         Assert(state->sortKeys[0].abbrev_abort != NULL);
1527         Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1528
1529         /*
1530          * Check effectiveness of abbreviation optimization.  Consider aborting
1531          * when still within memory limit.
1532          */
1533         if (state->status == TSS_INITIAL &&
1534                 state->memtupcount >= state->abbrevNext)
1535         {
1536                 state->abbrevNext *= 2;
1537
1538                 /*
1539                  * Check opclass-supplied abbreviation abort routine.  It may indicate
1540                  * that abbreviation should not proceed.
1541                  */
1542                 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1543                                                                                    state->sortKeys))
1544                         return false;
1545
1546                 /*
1547                  * Finally, restore authoritative comparator, and indicate that
1548                  * abbreviation is not in play by setting abbrev_converter to NULL
1549                  */
1550                 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1551                 state->sortKeys[0].abbrev_converter = NULL;
1552                 /* Not strictly necessary, but be tidy */
1553                 state->sortKeys[0].abbrev_abort = NULL;
1554                 state->sortKeys[0].abbrev_full_comparator = NULL;
1555
1556                 /* Give up - expect original pass-by-value representation */
1557                 return true;
1558         }
1559
1560         return false;
1561 }
1562
1563 /*
1564  * All tuples have been provided; finish the sort.
1565  */
1566 void
1567 tuplesort_performsort(Tuplesortstate *state)
1568 {
1569         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1570
1571 #ifdef TRACE_SORT
1572         if (trace_sort)
1573                 elog(LOG, "performsort starting: %s",
1574                          pg_rusage_show(&state->ru_start));
1575 #endif
1576
1577         switch (state->status)
1578         {
1579                 case TSS_INITIAL:
1580
1581                         /*
1582                          * We were able to accumulate all the tuples within the allowed
1583                          * amount of memory.  Just qsort 'em and we're done.
1584                          */
1585                         if (state->memtupcount > 1)
1586                         {
1587                                 /* Can we use the single-key sort function? */
1588                                 if (state->onlyKey != NULL)
1589                                         qsort_ssup(state->memtuples, state->memtupcount,
1590                                                            state->onlyKey);
1591                                 else
1592                                         qsort_tuple(state->memtuples,
1593                                                                 state->memtupcount,
1594                                                                 state->comparetup,
1595                                                                 state);
1596                         }
1597                         state->current = 0;
1598                         state->eof_reached = false;
1599                         state->markpos_offset = 0;
1600                         state->markpos_eof = false;
1601                         state->status = TSS_SORTEDINMEM;
1602                         break;
1603
1604                 case TSS_BOUNDED:
1605
1606                         /*
1607                          * We were able to accumulate all the tuples required for output
1608                          * in memory, using a heap to eliminate excess tuples.  Now we
1609                          * have to transform the heap to a properly-sorted array.
1610                          */
1611                         sort_bounded_heap(state);
1612                         state->current = 0;
1613                         state->eof_reached = false;
1614                         state->markpos_offset = 0;
1615                         state->markpos_eof = false;
1616                         state->status = TSS_SORTEDINMEM;
1617                         break;
1618
1619                 case TSS_BUILDRUNS:
1620
1621                         /*
1622                          * Finish tape-based sort.  First, flush all tuples remaining in
1623                          * memory out to tape; then merge until we have a single remaining
1624                          * run (or, if !randomAccess, one run per tape). Note that
1625                          * mergeruns sets the correct state->status.
1626                          */
1627                         dumptuples(state, true);
1628                         mergeruns(state);
1629                         state->eof_reached = false;
1630                         state->markpos_block = 0L;
1631                         state->markpos_offset = 0;
1632                         state->markpos_eof = false;
1633                         break;
1634
1635                 default:
1636                         elog(ERROR, "invalid tuplesort state");
1637                         break;
1638         }
1639
1640 #ifdef TRACE_SORT
1641         if (trace_sort)
1642         {
1643                 if (state->status == TSS_FINALMERGE)
1644                         elog(LOG, "performsort done (except %d-way final merge): %s",
1645                                  state->activeTapes,
1646                                  pg_rusage_show(&state->ru_start));
1647                 else
1648                         elog(LOG, "performsort done: %s",
1649                                  pg_rusage_show(&state->ru_start));
1650         }
1651 #endif
1652
1653         MemoryContextSwitchTo(oldcontext);
1654 }
1655
1656 /*
1657  * Internal routine to fetch the next tuple in either forward or back
1658  * direction into *stup.  Returns FALSE if no more tuples.
1659  * If *should_free is set, the caller must pfree stup.tuple when done with it.
1660  */
1661 static bool
1662 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1663                                                   SortTuple *stup, bool *should_free)
1664 {
1665         unsigned int tuplen;
1666
1667         switch (state->status)
1668         {
1669                 case TSS_SORTEDINMEM:
1670                         Assert(forward || state->randomAccess);
1671                         *should_free = false;
1672                         if (forward)
1673                         {
1674                                 if (state->current < state->memtupcount)
1675                                 {
1676                                         *stup = state->memtuples[state->current++];
1677                                         return true;
1678                                 }
1679                                 state->eof_reached = true;
1680
1681                                 /*
1682                                  * Complain if caller tries to retrieve more tuples than
1683                                  * originally asked for in a bounded sort.  This is because
1684                                  * returning EOF here might be the wrong thing.
1685                                  */
1686                                 if (state->bounded && state->current >= state->bound)
1687                                         elog(ERROR, "retrieved too many tuples in a bounded sort");
1688
1689                                 return false;
1690                         }
1691                         else
1692                         {
1693                                 if (state->current <= 0)
1694                                         return false;
1695
1696                                 /*
1697                                  * if all tuples are fetched already then we return last
1698                                  * tuple, else - tuple before last returned.
1699                                  */
1700                                 if (state->eof_reached)
1701                                         state->eof_reached = false;
1702                                 else
1703                                 {
1704                                         state->current--;       /* last returned tuple */
1705                                         if (state->current <= 0)
1706                                                 return false;
1707                                 }
1708                                 *stup = state->memtuples[state->current - 1];
1709                                 return true;
1710                         }
1711                         break;
1712
1713                 case TSS_SORTEDONTAPE:
1714                         Assert(forward || state->randomAccess);
1715                         *should_free = true;
1716                         if (forward)
1717                         {
1718                                 if (state->eof_reached)
1719                                         return false;
1720                                 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1721                                 {
1722                                         READTUP(state, stup, state->result_tape, tuplen);
1723                                         return true;
1724                                 }
1725                                 else
1726                                 {
1727                                         state->eof_reached = true;
1728                                         return false;
1729                                 }
1730                         }
1731
1732                         /*
1733                          * Backward.
1734                          *
1735                          * if all tuples are fetched already then we return last tuple,
1736                          * else - tuple before last returned.
1737                          */
1738                         if (state->eof_reached)
1739                         {
1740                                 /*
1741                                  * Seek position is pointing just past the zero tuplen at the
1742                                  * end of file; back up to fetch last tuple's ending length
1743                                  * word.  If seek fails we must have a completely empty file.
1744                                  */
1745                                 if (!LogicalTapeBackspace(state->tapeset,
1746                                                                                   state->result_tape,
1747                                                                                   2 * sizeof(unsigned int)))
1748                                         return false;
1749                                 state->eof_reached = false;
1750                         }
1751                         else
1752                         {
1753                                 /*
1754                                  * Back up and fetch previously-returned tuple's ending length
1755                                  * word.  If seek fails, assume we are at start of file.
1756                                  */
1757                                 if (!LogicalTapeBackspace(state->tapeset,
1758                                                                                   state->result_tape,
1759                                                                                   sizeof(unsigned int)))
1760                                         return false;
1761                                 tuplen = getlen(state, state->result_tape, false);
1762
1763                                 /*
1764                                  * Back up to get ending length word of tuple before it.
1765                                  */
1766                                 if (!LogicalTapeBackspace(state->tapeset,
1767                                                                                   state->result_tape,
1768                                                                                   tuplen + 2 * sizeof(unsigned int)))
1769                                 {
1770                                         /*
1771                                          * If that fails, presumably the prev tuple is the first
1772                                          * in the file.  Back up so that it becomes next to read
1773                                          * in forward direction (not obviously right, but that is
1774                                          * what in-memory case does).
1775                                          */
1776                                         if (!LogicalTapeBackspace(state->tapeset,
1777                                                                                           state->result_tape,
1778                                                                                           tuplen + sizeof(unsigned int)))
1779                                                 elog(ERROR, "bogus tuple length in backward scan");
1780                                         return false;
1781                                 }
1782                         }
1783
1784                         tuplen = getlen(state, state->result_tape, false);
1785
1786                         /*
1787                          * Now we have the length of the prior tuple, back up and read it.
1788                          * Note: READTUP expects we are positioned after the initial
1789                          * length word of the tuple, so back up to that point.
1790                          */
1791                         if (!LogicalTapeBackspace(state->tapeset,
1792                                                                           state->result_tape,
1793                                                                           tuplen))
1794                                 elog(ERROR, "bogus tuple length in backward scan");
1795                         READTUP(state, stup, state->result_tape, tuplen);
1796                         return true;
1797
1798                 case TSS_FINALMERGE:
1799                         Assert(forward);
1800                         *should_free = true;
1801
1802                         /*
1803                          * This code should match the inner loop of mergeonerun().
1804                          */
1805                         if (state->memtupcount > 0)
1806                         {
1807                                 int                     srcTape = state->memtuples[0].tupindex;
1808                                 Size            tuplen;
1809                                 int                     tupIndex;
1810                                 SortTuple  *newtup;
1811
1812                                 *stup = state->memtuples[0];
1813                                 /* returned tuple is no longer counted in our memory space */
1814                                 if (stup->tuple)
1815                                 {
1816                                         tuplen = GetMemoryChunkSpace(stup->tuple);
1817                                         state->availMem += tuplen;
1818                                         state->mergeavailmem[srcTape] += tuplen;
1819                                 }
1820                                 tuplesort_heap_siftup(state, false);
1821                                 if ((tupIndex = state->mergenext[srcTape]) == 0)
1822                                 {
1823                                         /*
1824                                          * out of preloaded data on this tape, try to read more
1825                                          *
1826                                          * Unlike mergeonerun(), we only preload from the single
1827                                          * tape that's run dry.  See mergepreread() comments.
1828                                          */
1829                                         mergeprereadone(state, srcTape);
1830
1831                                         /*
1832                                          * if still no data, we've reached end of run on this tape
1833                                          */
1834                                         if ((tupIndex = state->mergenext[srcTape]) == 0)
1835                                                 return true;
1836                                 }
1837                                 /* pull next preread tuple from list, insert in heap */
1838                                 newtup = &state->memtuples[tupIndex];
1839                                 state->mergenext[srcTape] = newtup->tupindex;
1840                                 if (state->mergenext[srcTape] == 0)
1841                                         state->mergelast[srcTape] = 0;
1842                                 tuplesort_heap_insert(state, newtup, srcTape, false);
1843                                 /* put the now-unused memtuples entry on the freelist */
1844                                 newtup->tupindex = state->mergefreelist;
1845                                 state->mergefreelist = tupIndex;
1846                                 state->mergeavailslots[srcTape]++;
1847                                 return true;
1848                         }
1849                         return false;
1850
1851                 default:
1852                         elog(ERROR, "invalid tuplesort state");
1853                         return false;           /* keep compiler quiet */
1854         }
1855 }
1856
1857 /*
1858  * Fetch the next tuple in either forward or back direction.
1859  * If successful, put tuple in slot and return TRUE; else, clear the slot
1860  * and return FALSE.
1861  */
1862 bool
1863 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
1864                                            TupleTableSlot *slot)
1865 {
1866         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1867         SortTuple       stup;
1868         bool            should_free;
1869
1870         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1871                 stup.tuple = NULL;
1872
1873         MemoryContextSwitchTo(oldcontext);
1874
1875         if (stup.tuple)
1876         {
1877                 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
1878                 return true;
1879         }
1880         else
1881         {
1882                 ExecClearTuple(slot);
1883                 return false;
1884         }
1885 }
1886
1887 /*
1888  * Fetch the next tuple in either forward or back direction.
1889  * Returns NULL if no more tuples.  If *should_free is set, the
1890  * caller must pfree the returned tuple when done with it.
1891  */
1892 HeapTuple
1893 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
1894 {
1895         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1896         SortTuple       stup;
1897
1898         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1899                 stup.tuple = NULL;
1900
1901         MemoryContextSwitchTo(oldcontext);
1902
1903         return stup.tuple;
1904 }
1905
1906 /*
1907  * Fetch the next index tuple in either forward or back direction.
1908  * Returns NULL if no more tuples.  If *should_free is set, the
1909  * caller must pfree the returned tuple when done with it.
1910  */
1911 IndexTuple
1912 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
1913                                                 bool *should_free)
1914 {
1915         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1916         SortTuple       stup;
1917
1918         if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
1919                 stup.tuple = NULL;
1920
1921         MemoryContextSwitchTo(oldcontext);
1922
1923         return (IndexTuple) stup.tuple;
1924 }
1925
1926 /*
1927  * Fetch the next Datum in either forward or back direction.
1928  * Returns FALSE if no more datums.
1929  *
1930  * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1931  * and is now owned by the caller.
1932  */
1933 bool
1934 tuplesort_getdatum(Tuplesortstate *state, bool forward,
1935                                    Datum *val, bool *isNull)
1936 {
1937         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1938         SortTuple       stup;
1939         bool            should_free;
1940
1941         if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
1942         {
1943                 MemoryContextSwitchTo(oldcontext);
1944                 return false;
1945         }
1946
1947         if (stup.isnull1 || state->datumTypeByVal)
1948         {
1949                 *val = stup.datum1;
1950                 *isNull = stup.isnull1;
1951         }
1952         else
1953         {
1954                 /* use stup.tuple because stup.datum1 may be an abbreviation */
1955
1956                 if (should_free)
1957                         *val = PointerGetDatum(stup.tuple);
1958                 else
1959                         *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
1960                 *isNull = false;
1961         }
1962
1963         MemoryContextSwitchTo(oldcontext);
1964
1965         return true;
1966 }
1967
1968 /*
1969  * Advance over N tuples in either forward or back direction,
1970  * without returning any data.  N==0 is a no-op.
1971  * Returns TRUE if successful, FALSE if ran out of tuples.
1972  */
1973 bool
1974 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
1975 {
1976         MemoryContext oldcontext;
1977
1978         /*
1979          * We don't actually support backwards skip yet, because no callers need
1980          * it.  The API is designed to allow for that later, though.
1981          */
1982         Assert(forward);
1983         Assert(ntuples >= 0);
1984
1985         switch (state->status)
1986         {
1987                 case TSS_SORTEDINMEM:
1988                         if (state->memtupcount - state->current >= ntuples)
1989                         {
1990                                 state->current += ntuples;
1991                                 return true;
1992                         }
1993                         state->current = state->memtupcount;
1994                         state->eof_reached = true;
1995
1996                         /*
1997                          * Complain if caller tries to retrieve more tuples than
1998                          * originally asked for in a bounded sort.  This is because
1999                          * returning EOF here might be the wrong thing.
2000                          */
2001                         if (state->bounded && state->current >= state->bound)
2002                                 elog(ERROR, "retrieved too many tuples in a bounded sort");
2003
2004                         return false;
2005
2006                 case TSS_SORTEDONTAPE:
2007                 case TSS_FINALMERGE:
2008
2009                         /*
2010                          * We could probably optimize these cases better, but for now it's
2011                          * not worth the trouble.
2012                          */
2013                         oldcontext = MemoryContextSwitchTo(state->sortcontext);
2014                         while (ntuples-- > 0)
2015                         {
2016                                 SortTuple       stup;
2017                                 bool            should_free;
2018
2019                                 if (!tuplesort_gettuple_common(state, forward,
2020                                                                                            &stup, &should_free))
2021                                 {
2022                                         MemoryContextSwitchTo(oldcontext);
2023                                         return false;
2024                                 }
2025                                 if (should_free && stup.tuple)
2026                                         pfree(stup.tuple);
2027                                 CHECK_FOR_INTERRUPTS();
2028                         }
2029                         MemoryContextSwitchTo(oldcontext);
2030                         return true;
2031
2032                 default:
2033                         elog(ERROR, "invalid tuplesort state");
2034                         return false;           /* keep compiler quiet */
2035         }
2036 }
2037
2038 /*
2039  * tuplesort_merge_order - report merge order we'll use for given memory
2040  * (note: "merge order" just means the number of input tapes in the merge).
2041  *
2042  * This is exported for use by the planner.  allowedMem is in bytes.
2043  */
2044 int
2045 tuplesort_merge_order(int64 allowedMem)
2046 {
2047         int                     mOrder;
2048
2049         /*
2050          * We need one tape for each merge input, plus another one for the output,
2051          * and each of these tapes needs buffer space.  In addition we want
2052          * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2053          * count).
2054          *
2055          * Note: you might be thinking we need to account for the memtuples[]
2056          * array in this calculation, but we effectively treat that as part of the
2057          * MERGE_BUFFER_SIZE workspace.
2058          */
2059         mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2060                 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2061
2062         /* Even in minimum memory, use at least a MINORDER merge */
2063         mOrder = Max(mOrder, MINORDER);
2064
2065         return mOrder;
2066 }
2067
2068 /*
2069  * inittapes - initialize for tape sorting.
2070  *
2071  * This is called only if we have found we don't have room to sort in memory.
2072  */
2073 static void
2074 inittapes(Tuplesortstate *state)
2075 {
2076         int                     maxTapes,
2077                                 ntuples,
2078                                 j;
2079         int64           tapeSpace;
2080
2081         /* Compute number of tapes to use: merge order plus 1 */
2082         maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2083
2084         /*
2085          * We must have at least 2*maxTapes slots in the memtuples[] array, else
2086          * we'd not have room for merge heap plus preread.  It seems unlikely that
2087          * this case would ever occur, but be safe.
2088          */
2089         maxTapes = Min(maxTapes, state->memtupsize / 2);
2090
2091         state->maxTapes = maxTapes;
2092         state->tapeRange = maxTapes - 1;
2093
2094 #ifdef TRACE_SORT
2095         if (trace_sort)
2096                 elog(LOG, "switching to external sort with %d tapes: %s",
2097                          maxTapes, pg_rusage_show(&state->ru_start));
2098 #endif
2099
2100         /*
2101          * Decrease availMem to reflect the space needed for tape buffers; but
2102          * don't decrease it to the point that we have no room for tuples. (That
2103          * case is only likely to occur if sorting pass-by-value Datums; in all
2104          * other scenarios the memtuples[] array is unlikely to occupy more than
2105          * half of allowedMem.  In the pass-by-value case it's not important to
2106          * account for tuple space, so we don't care if LACKMEM becomes
2107          * inaccurate.)
2108          */
2109         tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2110
2111         if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2112                 USEMEM(state, tapeSpace);
2113
2114         /*
2115          * Make sure that the temp file(s) underlying the tape set are created in
2116          * suitable temp tablespaces.
2117          */
2118         PrepareTempTablespaces();
2119
2120         /*
2121          * Create the tape set and allocate the per-tape data arrays.
2122          */
2123         state->tapeset = LogicalTapeSetCreate(maxTapes);
2124
2125         state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2126         state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
2127         state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
2128         state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
2129         state->mergeavailmem = (int64 *) palloc0(maxTapes * sizeof(int64));
2130         state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2131         state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2132         state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2133         state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2134
2135         /*
2136          * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
2137          * marked as belonging to run number zero.
2138          *
2139          * NOTE: we pass false for checkIndex since there's no point in comparing
2140          * indexes in this step, even though we do intend the indexes to be part
2141          * of the sort key...
2142          */
2143         ntuples = state->memtupcount;
2144         state->memtupcount = 0;         /* make the heap empty */
2145         for (j = 0; j < ntuples; j++)
2146         {
2147                 /* Must copy source tuple to avoid possible overwrite */
2148                 SortTuple       stup = state->memtuples[j];
2149
2150                 tuplesort_heap_insert(state, &stup, 0, false);
2151         }
2152         Assert(state->memtupcount == ntuples);
2153
2154         state->currentRun = 0;
2155
2156         /*
2157          * Initialize variables of Algorithm D (step D1).
2158          */
2159         for (j = 0; j < maxTapes; j++)
2160         {
2161                 state->tp_fib[j] = 1;
2162                 state->tp_runs[j] = 0;
2163                 state->tp_dummy[j] = 1;
2164                 state->tp_tapenum[j] = j;
2165         }
2166         state->tp_fib[state->tapeRange] = 0;
2167         state->tp_dummy[state->tapeRange] = 0;
2168
2169         state->Level = 1;
2170         state->destTape = 0;
2171
2172         state->status = TSS_BUILDRUNS;
2173 }
2174
2175 /*
2176  * selectnewtape -- select new tape for new initial run.
2177  *
2178  * This is called after finishing a run when we know another run
2179  * must be started.  This implements steps D3, D4 of Algorithm D.
2180  */
2181 static void
2182 selectnewtape(Tuplesortstate *state)
2183 {
2184         int                     j;
2185         int                     a;
2186
2187         /* Step D3: advance j (destTape) */
2188         if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2189         {
2190                 state->destTape++;
2191                 return;
2192         }
2193         if (state->tp_dummy[state->destTape] != 0)
2194         {
2195                 state->destTape = 0;
2196                 return;
2197         }
2198
2199         /* Step D4: increase level */
2200         state->Level++;
2201         a = state->tp_fib[0];
2202         for (j = 0; j < state->tapeRange; j++)
2203         {
2204                 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2205                 state->tp_fib[j] = a + state->tp_fib[j + 1];
2206         }
2207         state->destTape = 0;
2208 }
2209
2210 /*
2211  * mergeruns -- merge all the completed initial runs.
2212  *
2213  * This implements steps D5, D6 of Algorithm D.  All input data has
2214  * already been written to initial runs on tape (see dumptuples).
2215  */
2216 static void
2217 mergeruns(Tuplesortstate *state)
2218 {
2219         int                     tapenum,
2220                                 svTape,
2221                                 svRuns,
2222                                 svDummy;
2223
2224         Assert(state->status == TSS_BUILDRUNS);
2225         Assert(state->memtupcount == 0);
2226
2227         /*
2228          * If we produced only one initial run (quite likely if the total data
2229          * volume is between 1X and 2X workMem), we can just use that tape as the
2230          * finished output, rather than doing a useless merge.  (This obvious
2231          * optimization is not in Knuth's algorithm.)
2232          */
2233         if (state->currentRun == 1)
2234         {
2235                 state->result_tape = state->tp_tapenum[state->destTape];
2236                 /* must freeze and rewind the finished output tape */
2237                 LogicalTapeFreeze(state->tapeset, state->result_tape);
2238                 state->status = TSS_SORTEDONTAPE;
2239                 return;
2240         }
2241
2242         if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2243         {
2244                 /*
2245                  * If there are multiple runs to be merged, when we go to read back
2246                  * tuples from disk, abbreviated keys will not have been stored, and
2247                  * we don't care to regenerate them.  Disable abbreviation from this
2248                  * point on.
2249                  */
2250                 state->sortKeys->abbrev_converter = NULL;
2251                 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2252
2253                 /* Not strictly necessary, but be tidy */
2254                 state->sortKeys->abbrev_abort = NULL;
2255                 state->sortKeys->abbrev_full_comparator = NULL;
2256         }
2257
2258         /* End of step D2: rewind all output tapes to prepare for merging */
2259         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2260                 LogicalTapeRewind(state->tapeset, tapenum, false);
2261
2262         for (;;)
2263         {
2264                 /*
2265                  * At this point we know that tape[T] is empty.  If there's just one
2266                  * (real or dummy) run left on each input tape, then only one merge
2267                  * pass remains.  If we don't have to produce a materialized sorted
2268                  * tape, we can stop at this point and do the final merge on-the-fly.
2269                  */
2270                 if (!state->randomAccess)
2271                 {
2272                         bool            allOneRun = true;
2273
2274                         Assert(state->tp_runs[state->tapeRange] == 0);
2275                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2276                         {
2277                                 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2278                                 {
2279                                         allOneRun = false;
2280                                         break;
2281                                 }
2282                         }
2283                         if (allOneRun)
2284                         {
2285                                 /* Tell logtape.c we won't be writing anymore */
2286                                 LogicalTapeSetForgetFreeSpace(state->tapeset);
2287                                 /* Initialize for the final merge pass */
2288                                 beginmerge(state);
2289                                 state->status = TSS_FINALMERGE;
2290                                 return;
2291                         }
2292                 }
2293
2294                 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2295                 while (state->tp_runs[state->tapeRange - 1] ||
2296                            state->tp_dummy[state->tapeRange - 1])
2297                 {
2298                         bool            allDummy = true;
2299
2300                         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2301                         {
2302                                 if (state->tp_dummy[tapenum] == 0)
2303                                 {
2304                                         allDummy = false;
2305                                         break;
2306                                 }
2307                         }
2308
2309                         if (allDummy)
2310                         {
2311                                 state->tp_dummy[state->tapeRange]++;
2312                                 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2313                                         state->tp_dummy[tapenum]--;
2314                         }
2315                         else
2316                                 mergeonerun(state);
2317                 }
2318
2319                 /* Step D6: decrease level */
2320                 if (--state->Level == 0)
2321                         break;
2322                 /* rewind output tape T to use as new input */
2323                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
2324                                                   false);
2325                 /* rewind used-up input tape P, and prepare it for write pass */
2326                 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
2327                                                   true);
2328                 state->tp_runs[state->tapeRange - 1] = 0;
2329
2330                 /*
2331                  * reassign tape units per step D6; note we no longer care about A[]
2332                  */
2333                 svTape = state->tp_tapenum[state->tapeRange];
2334                 svDummy = state->tp_dummy[state->tapeRange];
2335                 svRuns = state->tp_runs[state->tapeRange];
2336                 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2337                 {
2338                         state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2339                         state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2340                         state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2341                 }
2342                 state->tp_tapenum[0] = svTape;
2343                 state->tp_dummy[0] = svDummy;
2344                 state->tp_runs[0] = svRuns;
2345         }
2346
2347         /*
2348          * Done.  Knuth says that the result is on TAPE[1], but since we exited
2349          * the loop without performing the last iteration of step D6, we have not
2350          * rearranged the tape unit assignment, and therefore the result is on
2351          * TAPE[T].  We need to do it this way so that we can freeze the final
2352          * output tape while rewinding it.  The last iteration of step D6 would be
2353          * a waste of cycles anyway...
2354          */
2355         state->result_tape = state->tp_tapenum[state->tapeRange];
2356         LogicalTapeFreeze(state->tapeset, state->result_tape);
2357         state->status = TSS_SORTEDONTAPE;
2358 }
2359
2360 /*
2361  * Merge one run from each input tape, except ones with dummy runs.
2362  *
2363  * This is the inner loop of Algorithm D step D5.  We know that the
2364  * output tape is TAPE[T].
2365  */
2366 static void
2367 mergeonerun(Tuplesortstate *state)
2368 {
2369         int                     destTape = state->tp_tapenum[state->tapeRange];
2370         int                     srcTape;
2371         int                     tupIndex;
2372         SortTuple  *tup;
2373         int64           priorAvail,
2374                                 spaceFreed;
2375
2376         /*
2377          * Start the merge by loading one tuple from each active source tape into
2378          * the heap.  We can also decrease the input run/dummy run counts.
2379          */
2380         beginmerge(state);
2381
2382         /*
2383          * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2384          * out, and replacing it with next tuple from same tape (if there is
2385          * another one).
2386          */
2387         while (state->memtupcount > 0)
2388         {
2389                 /* write the tuple to destTape */
2390                 priorAvail = state->availMem;
2391                 srcTape = state->memtuples[0].tupindex;
2392                 WRITETUP(state, destTape, &state->memtuples[0]);
2393                 /* writetup adjusted total free space, now fix per-tape space */
2394                 spaceFreed = state->availMem - priorAvail;
2395                 state->mergeavailmem[srcTape] += spaceFreed;
2396                 /* compact the heap */
2397                 tuplesort_heap_siftup(state, false);
2398                 if ((tupIndex = state->mergenext[srcTape]) == 0)
2399                 {
2400                         /* out of preloaded data on this tape, try to read more */
2401                         mergepreread(state);
2402                         /* if still no data, we've reached end of run on this tape */
2403                         if ((tupIndex = state->mergenext[srcTape]) == 0)
2404                                 continue;
2405                 }
2406                 /* pull next preread tuple from list, insert in heap */
2407                 tup = &state->memtuples[tupIndex];
2408                 state->mergenext[srcTape] = tup->tupindex;
2409                 if (state->mergenext[srcTape] == 0)
2410                         state->mergelast[srcTape] = 0;
2411                 tuplesort_heap_insert(state, tup, srcTape, false);
2412                 /* put the now-unused memtuples entry on the freelist */
2413                 tup->tupindex = state->mergefreelist;
2414                 state->mergefreelist = tupIndex;
2415                 state->mergeavailslots[srcTape]++;
2416         }
2417
2418         /*
2419          * When the heap empties, we're done.  Write an end-of-run marker on the
2420          * output tape, and increment its count of real runs.
2421          */
2422         markrunend(state, destTape);
2423         state->tp_runs[state->tapeRange]++;
2424
2425 #ifdef TRACE_SORT
2426         if (trace_sort)
2427                 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2428                          pg_rusage_show(&state->ru_start));
2429 #endif
2430 }
2431
2432 /*
2433  * beginmerge - initialize for a merge pass
2434  *
2435  * We decrease the counts of real and dummy runs for each tape, and mark
2436  * which tapes contain active input runs in mergeactive[].  Then, load
2437  * as many tuples as we can from each active input tape, and finally
2438  * fill the merge heap with the first tuple from each active tape.
2439  */
2440 static void
2441 beginmerge(Tuplesortstate *state)
2442 {
2443         int                     activeTapes;
2444         int                     tapenum;
2445         int                     srcTape;
2446         int                     slotsPerTape;
2447         int64           spacePerTape;
2448
2449         /* Heap should be empty here */
2450         Assert(state->memtupcount == 0);
2451
2452         /* Adjust run counts and mark the active tapes */
2453         memset(state->mergeactive, 0,
2454                    state->maxTapes * sizeof(*state->mergeactive));
2455         activeTapes = 0;
2456         for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2457         {
2458                 if (state->tp_dummy[tapenum] > 0)
2459                         state->tp_dummy[tapenum]--;
2460                 else
2461                 {
2462                         Assert(state->tp_runs[tapenum] > 0);
2463                         state->tp_runs[tapenum]--;
2464                         srcTape = state->tp_tapenum[tapenum];
2465                         state->mergeactive[srcTape] = true;
2466                         activeTapes++;
2467                 }
2468         }
2469         state->activeTapes = activeTapes;
2470
2471         /* Clear merge-pass state variables */
2472         memset(state->mergenext, 0,
2473                    state->maxTapes * sizeof(*state->mergenext));
2474         memset(state->mergelast, 0,
2475                    state->maxTapes * sizeof(*state->mergelast));
2476         state->mergefreelist = 0;       /* nothing in the freelist */
2477         state->mergefirstfree = activeTapes;            /* 1st slot avail for preread */
2478
2479         /*
2480          * Initialize space allocation to let each active input tape have an equal
2481          * share of preread space.
2482          */
2483         Assert(activeTapes > 0);
2484         slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2485         Assert(slotsPerTape > 0);
2486         spacePerTape = state->availMem / activeTapes;
2487         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2488         {
2489                 if (state->mergeactive[srcTape])
2490                 {
2491                         state->mergeavailslots[srcTape] = slotsPerTape;
2492                         state->mergeavailmem[srcTape] = spacePerTape;
2493                 }
2494         }
2495
2496         /*
2497          * Preread as many tuples as possible (and at least one) from each active
2498          * tape
2499          */
2500         mergepreread(state);
2501
2502         /* Load the merge heap with the first tuple from each input tape */
2503         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2504         {
2505                 int                     tupIndex = state->mergenext[srcTape];
2506                 SortTuple  *tup;
2507
2508                 if (tupIndex)
2509                 {
2510                         tup = &state->memtuples[tupIndex];
2511                         state->mergenext[srcTape] = tup->tupindex;
2512                         if (state->mergenext[srcTape] == 0)
2513                                 state->mergelast[srcTape] = 0;
2514                         tuplesort_heap_insert(state, tup, srcTape, false);
2515                         /* put the now-unused memtuples entry on the freelist */
2516                         tup->tupindex = state->mergefreelist;
2517                         state->mergefreelist = tupIndex;
2518                         state->mergeavailslots[srcTape]++;
2519                 }
2520         }
2521 }
2522
2523 /*
2524  * mergepreread - load tuples from merge input tapes
2525  *
2526  * This routine exists to improve sequentiality of reads during a merge pass,
2527  * as explained in the header comments of this file.  Load tuples from each
2528  * active source tape until the tape's run is exhausted or it has used up
2529  * its fair share of available memory.  In any case, we guarantee that there
2530  * is at least one preread tuple available from each unexhausted input tape.
2531  *
2532  * We invoke this routine at the start of a merge pass for initial load,
2533  * and then whenever any tape's preread data runs out.  Note that we load
2534  * as much data as possible from all tapes, not just the one that ran out.
2535  * This is because logtape.c works best with a usage pattern that alternates
2536  * between reading a lot of data and writing a lot of data, so whenever we
2537  * are forced to read, we should fill working memory completely.
2538  *
2539  * In FINALMERGE state, we *don't* use this routine, but instead just preread
2540  * from the single tape that ran dry.  There's no read/write alternation in
2541  * that state and so no point in scanning through all the tapes to fix one.
2542  * (Moreover, there may be quite a lot of inactive tapes in that state, since
2543  * we might have had many fewer runs than tapes.  In a regular tape-to-tape
2544  * merge we can expect most of the tapes to be active.)
2545  */
2546 static void
2547 mergepreread(Tuplesortstate *state)
2548 {
2549         int                     srcTape;
2550
2551         for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2552                 mergeprereadone(state, srcTape);
2553 }
2554
2555 /*
2556  * mergeprereadone - load tuples from one merge input tape
2557  *
2558  * Read tuples from the specified tape until it has used up its free memory
2559  * or array slots; but ensure that we have at least one tuple, if any are
2560  * to be had.
2561  */
2562 static void
2563 mergeprereadone(Tuplesortstate *state, int srcTape)
2564 {
2565         unsigned int tuplen;
2566         SortTuple       stup;
2567         int                     tupIndex;
2568         int64           priorAvail,
2569                                 spaceUsed;
2570
2571         if (!state->mergeactive[srcTape])
2572                 return;                                 /* tape's run is already exhausted */
2573         priorAvail = state->availMem;
2574         state->availMem = state->mergeavailmem[srcTape];
2575         while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) ||
2576                    state->mergenext[srcTape] == 0)
2577         {
2578                 /* read next tuple, if any */
2579                 if ((tuplen = getlen(state, srcTape, true)) == 0)
2580                 {
2581                         state->mergeactive[srcTape] = false;
2582                         break;
2583                 }
2584                 READTUP(state, &stup, srcTape, tuplen);
2585                 /* find a free slot in memtuples[] for it */
2586                 tupIndex = state->mergefreelist;
2587                 if (tupIndex)
2588                         state->mergefreelist = state->memtuples[tupIndex].tupindex;
2589                 else
2590                 {
2591                         tupIndex = state->mergefirstfree++;
2592                         Assert(tupIndex < state->memtupsize);
2593                 }
2594                 state->mergeavailslots[srcTape]--;
2595                 /* store tuple, append to list for its tape */
2596                 stup.tupindex = 0;
2597                 state->memtuples[tupIndex] = stup;
2598                 if (state->mergelast[srcTape])
2599                         state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
2600                 else
2601                         state->mergenext[srcTape] = tupIndex;
2602                 state->mergelast[srcTape] = tupIndex;
2603         }
2604         /* update per-tape and global availmem counts */
2605         spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
2606         state->mergeavailmem[srcTape] = state->availMem;
2607         state->availMem = priorAvail - spaceUsed;
2608 }
2609
2610 /*
2611  * dumptuples - remove tuples from heap and write to tape
2612  *
2613  * This is used during initial-run building, but not during merging.
2614  *
2615  * When alltuples = false, dump only enough tuples to get under the
2616  * availMem limit (and leave at least one tuple in the heap in any case,
2617  * since puttuple assumes it always has a tuple to compare to).  We also
2618  * insist there be at least one free slot in the memtuples[] array.
2619  *
2620  * When alltuples = true, dump everything currently in memory.
2621  * (This case is only used at end of input data.)
2622  *
2623  * If we empty the heap, close out the current run and return (this should
2624  * only happen at end of input data).  If we see that the tuple run number
2625  * at the top of the heap has changed, start a new run.
2626  */
2627 static void
2628 dumptuples(Tuplesortstate *state, bool alltuples)
2629 {
2630         while (alltuples ||
2631                    (LACKMEM(state) && state->memtupcount > 1) ||
2632                    state->memtupcount >= state->memtupsize)
2633         {
2634                 /*
2635                  * Dump the heap's frontmost entry, and sift up to remove it from the
2636                  * heap.
2637                  */
2638                 Assert(state->memtupcount > 0);
2639                 WRITETUP(state, state->tp_tapenum[state->destTape],
2640                                  &state->memtuples[0]);
2641                 tuplesort_heap_siftup(state, true);
2642
2643                 /*
2644                  * If the heap is empty *or* top run number has changed, we've
2645                  * finished the current run.
2646                  */
2647                 if (state->memtupcount == 0 ||
2648                         state->currentRun != state->memtuples[0].tupindex)
2649                 {
2650                         markrunend(state, state->tp_tapenum[state->destTape]);
2651                         state->currentRun++;
2652                         state->tp_runs[state->destTape]++;
2653                         state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2654
2655 #ifdef TRACE_SORT
2656                         if (trace_sort)
2657                                 elog(LOG, "finished writing%s run %d to tape %d: %s",
2658                                          (state->memtupcount == 0) ? " final" : "",
2659                                          state->currentRun, state->destTape,
2660                                          pg_rusage_show(&state->ru_start));
2661 #endif
2662
2663                         /*
2664                          * Done if heap is empty, else prepare for new run.
2665                          */
2666                         if (state->memtupcount == 0)
2667                                 break;
2668                         Assert(state->currentRun == state->memtuples[0].tupindex);
2669                         selectnewtape(state);
2670                 }
2671         }
2672 }
2673
2674 /*
2675  * tuplesort_rescan             - rewind and replay the scan
2676  */
2677 void
2678 tuplesort_rescan(Tuplesortstate *state)
2679 {
2680         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2681
2682         Assert(state->randomAccess);
2683
2684         switch (state->status)
2685         {
2686                 case TSS_SORTEDINMEM:
2687                         state->current = 0;
2688                         state->eof_reached = false;
2689                         state->markpos_offset = 0;
2690                         state->markpos_eof = false;
2691                         break;
2692                 case TSS_SORTEDONTAPE:
2693                         LogicalTapeRewind(state->tapeset,
2694                                                           state->result_tape,
2695                                                           false);
2696                         state->eof_reached = false;
2697                         state->markpos_block = 0L;
2698                         state->markpos_offset = 0;
2699                         state->markpos_eof = false;
2700                         break;
2701                 default:
2702                         elog(ERROR, "invalid tuplesort state");
2703                         break;
2704         }
2705
2706         MemoryContextSwitchTo(oldcontext);
2707 }
2708
2709 /*
2710  * tuplesort_markpos    - saves current position in the merged sort file
2711  */
2712 void
2713 tuplesort_markpos(Tuplesortstate *state)
2714 {
2715         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2716
2717         Assert(state->randomAccess);
2718
2719         switch (state->status)
2720         {
2721                 case TSS_SORTEDINMEM:
2722                         state->markpos_offset = state->current;
2723                         state->markpos_eof = state->eof_reached;
2724                         break;
2725                 case TSS_SORTEDONTAPE:
2726                         LogicalTapeTell(state->tapeset,
2727                                                         state->result_tape,
2728                                                         &state->markpos_block,
2729                                                         &state->markpos_offset);
2730                         state->markpos_eof = state->eof_reached;
2731                         break;
2732                 default:
2733                         elog(ERROR, "invalid tuplesort state");
2734                         break;
2735         }
2736
2737         MemoryContextSwitchTo(oldcontext);
2738 }
2739
2740 /*
2741  * tuplesort_restorepos - restores current position in merged sort file to
2742  *                                                last saved position
2743  */
2744 void
2745 tuplesort_restorepos(Tuplesortstate *state)
2746 {
2747         MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2748
2749         Assert(state->randomAccess);
2750
2751         switch (state->status)
2752         {
2753                 case TSS_SORTEDINMEM:
2754                         state->current = state->markpos_offset;
2755                         state->eof_reached = state->markpos_eof;
2756                         break;
2757                 case TSS_SORTEDONTAPE:
2758                         if (!LogicalTapeSeek(state->tapeset,
2759                                                                  state->result_tape,
2760                                                                  state->markpos_block,
2761                                                                  state->markpos_offset))
2762                                 elog(ERROR, "tuplesort_restorepos failed");
2763                         state->eof_reached = state->markpos_eof;
2764                         break;
2765                 default:
2766                         elog(ERROR, "invalid tuplesort state");
2767                         break;
2768         }
2769
2770         MemoryContextSwitchTo(oldcontext);
2771 }
2772
2773 /*
2774  * tuplesort_get_stats - extract summary statistics
2775  *
2776  * This can be called after tuplesort_performsort() finishes to obtain
2777  * printable summary information about how the sort was performed.
2778  * spaceUsed is measured in kilobytes.
2779  */
2780 void
2781 tuplesort_get_stats(Tuplesortstate *state,
2782                                         const char **sortMethod,
2783                                         const char **spaceType,
2784                                         long *spaceUsed)
2785 {
2786         /*
2787          * Note: it might seem we should provide both memory and disk usage for a
2788          * disk-based sort.  However, the current code doesn't track memory space
2789          * accurately once we have begun to return tuples to the caller (since we
2790          * don't account for pfree's the caller is expected to do), so we cannot
2791          * rely on availMem in a disk sort.  This does not seem worth the overhead
2792          * to fix.  Is it worth creating an API for the memory context code to
2793          * tell us how much is actually used in sortcontext?
2794          */
2795         if (state->tapeset)
2796         {
2797                 *spaceType = "Disk";
2798                 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
2799         }
2800         else
2801         {
2802                 *spaceType = "Memory";
2803                 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
2804         }
2805
2806         switch (state->status)
2807         {
2808                 case TSS_SORTEDINMEM:
2809                         if (state->boundUsed)
2810                                 *sortMethod = "top-N heapsort";
2811                         else
2812                                 *sortMethod = "quicksort";
2813                         break;
2814                 case TSS_SORTEDONTAPE:
2815                         *sortMethod = "external sort";
2816                         break;
2817                 case TSS_FINALMERGE:
2818                         *sortMethod = "external merge";
2819                         break;
2820                 default:
2821                         *sortMethod = "still in progress";
2822                         break;
2823         }
2824 }
2825
2826
2827 /*
2828  * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
2829  *
2830  * Compare two SortTuples.  If checkIndex is true, use the tuple index
2831  * as the front of the sort key; otherwise, no.
2832  */
2833
2834 #define HEAPCOMPARE(tup1,tup2) \
2835         (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \
2836          ((tup1)->tupindex) - ((tup2)->tupindex) : \
2837          COMPARETUP(state, tup1, tup2))
2838
2839 /*
2840  * Convert the existing unordered array of SortTuples to a bounded heap,
2841  * discarding all but the smallest "state->bound" tuples.
2842  *
2843  * When working with a bounded heap, we want to keep the largest entry
2844  * at the root (array entry zero), instead of the smallest as in the normal
2845  * sort case.  This allows us to discard the largest entry cheaply.
2846  * Therefore, we temporarily reverse the sort direction.
2847  *
2848  * We assume that all entries in a bounded heap will always have tupindex
2849  * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
2850  * the direction of comparison for tupindexes.
2851  */
2852 static void
2853 make_bounded_heap(Tuplesortstate *state)
2854 {
2855         int                     tupcount = state->memtupcount;
2856         int                     i;
2857
2858         Assert(state->status == TSS_INITIAL);
2859         Assert(state->bounded);
2860         Assert(tupcount >= state->bound);
2861
2862         /* Reverse sort direction so largest entry will be at root */
2863         reversedirection(state);
2864
2865         state->memtupcount = 0;         /* make the heap empty */
2866         for (i = 0; i < tupcount; i++)
2867         {
2868                 if (state->memtupcount >= state->bound &&
2869                   COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
2870                 {
2871                         /* New tuple would just get thrown out, so skip it */
2872                         free_sort_tuple(state, &state->memtuples[i]);
2873                         CHECK_FOR_INTERRUPTS();
2874                 }
2875                 else
2876                 {
2877                         /* Insert next tuple into heap */
2878                         /* Must copy source tuple to avoid possible overwrite */
2879                         SortTuple       stup = state->memtuples[i];
2880
2881                         tuplesort_heap_insert(state, &stup, 0, false);
2882
2883                         /* If heap too full, discard largest entry */
2884                         if (state->memtupcount > state->bound)
2885                         {
2886                                 free_sort_tuple(state, &state->memtuples[0]);
2887                                 tuplesort_heap_siftup(state, false);
2888                         }
2889                 }
2890         }
2891
2892         Assert(state->memtupcount == state->bound);
2893         state->status = TSS_BOUNDED;
2894 }
2895
2896 /*
2897  * Convert the bounded heap to a properly-sorted array
2898  */
2899 static void
2900 sort_bounded_heap(Tuplesortstate *state)
2901 {
2902         int                     tupcount = state->memtupcount;
2903
2904         Assert(state->status == TSS_BOUNDED);
2905         Assert(state->bounded);
2906         Assert(tupcount == state->bound);
2907
2908         /*
2909          * We can unheapify in place because each sift-up will remove the largest
2910          * entry, which we can promptly store in the newly freed slot at the end.
2911          * Once we're down to a single-entry heap, we're done.
2912          */
2913         while (state->memtupcount > 1)
2914         {
2915                 SortTuple       stup = state->memtuples[0];
2916
2917                 /* this sifts-up the next-largest entry and decreases memtupcount */
2918                 tuplesort_heap_siftup(state, false);
2919                 state->memtuples[state->memtupcount] = stup;
2920         }
2921         state->memtupcount = tupcount;
2922
2923         /*
2924          * Reverse sort direction back to the original state.  This is not
2925          * actually necessary but seems like a good idea for tidiness.
2926          */
2927         reversedirection(state);
2928
2929         state->status = TSS_SORTEDINMEM;
2930         state->boundUsed = true;
2931 }
2932
2933 /*
2934  * Insert a new tuple into an empty or existing heap, maintaining the
2935  * heap invariant.  Caller is responsible for ensuring there's room.
2936  *
2937  * Note: we assume *tuple is a temporary variable that can be scribbled on.
2938  * For some callers, tuple actually points to a memtuples[] entry above the
2939  * end of the heap.  This is safe as long as it's not immediately adjacent
2940  * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
2941  * is, it might get overwritten before being moved into the heap!
2942  */
2943 static void
2944 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
2945                                           int tupleindex, bool checkIndex)
2946 {
2947         SortTuple  *memtuples;
2948         int                     j;
2949
2950         /*
2951          * Save the tupleindex --- see notes above about writing on *tuple. It's a
2952          * historical artifact that tupleindex is passed as a separate argument
2953          * and not in *tuple, but it's notationally convenient so let's leave it
2954          * that way.
2955          */
2956         tuple->tupindex = tupleindex;
2957
2958         memtuples = state->memtuples;
2959         Assert(state->memtupcount < state->memtupsize);
2960
2961         CHECK_FOR_INTERRUPTS();
2962
2963         /*
2964          * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
2965          * using 1-based array indexes, not 0-based.
2966          */
2967         j = state->memtupcount++;
2968         while (j > 0)
2969         {
2970                 int                     i = (j - 1) >> 1;
2971
2972                 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
2973                         break;
2974                 memtuples[j] = memtuples[i];
2975                 j = i;
2976         }
2977         memtuples[j] = *tuple;
2978 }
2979
2980 /*
2981  * The tuple at state->memtuples[0] has been removed from the heap.
2982  * Decrement memtupcount, and sift up to maintain the heap invariant.
2983  */
2984 static void
2985 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
2986 {
2987         SortTuple  *memtuples = state->memtuples;
2988         SortTuple  *tuple;
2989         int                     i,
2990                                 n;
2991
2992         if (--state->memtupcount <= 0)
2993                 return;
2994
2995         CHECK_FOR_INTERRUPTS();
2996
2997         n = state->memtupcount;
2998         tuple = &memtuples[n];          /* tuple that must be reinserted */
2999         i = 0;                                          /* i is where the "hole" is */
3000         for (;;)
3001         {
3002                 int                     j = 2 * i + 1;
3003
3004                 if (j >= n)
3005                         break;
3006                 if (j + 1 < n &&
3007                         HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3008                         j++;
3009                 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3010                         break;
3011                 memtuples[i] = memtuples[j];
3012                 i = j;
3013         }
3014         memtuples[i] = *tuple;
3015 }
3016
3017 /*
3018  * Function to reverse the sort direction from its current state
3019  *
3020  * It is not safe to call this when performing hash tuplesorts
3021  */
3022 static void
3023 reversedirection(Tuplesortstate *state)
3024 {
3025         SortSupport sortKey = state->sortKeys;
3026         int                     nkey;
3027
3028         for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3029         {
3030                 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3031                 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3032         }
3033 }
3034
3035
3036 /*
3037  * Tape interface routines
3038  */
3039
3040 static unsigned int
3041 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3042 {
3043         unsigned int len;
3044
3045         if (LogicalTapeRead(state->tapeset, tapenum,
3046                                                 &len, sizeof(len)) != sizeof(len))
3047                 elog(ERROR, "unexpected end of tape");
3048         if (len == 0 && !eofOK)
3049                 elog(ERROR, "unexpected end of data");
3050         return len;
3051 }
3052
3053 static void
3054 markrunend(Tuplesortstate *state, int tapenum)
3055 {
3056         unsigned int len = 0;
3057
3058         LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3059 }
3060
3061
3062 /*
3063  * Routines specialized for HeapTuple (actually MinimalTuple) case
3064  */
3065
3066 static int
3067 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3068 {
3069         SortSupport sortKey = state->sortKeys;
3070         HeapTupleData ltup;
3071         HeapTupleData rtup;
3072         TupleDesc       tupDesc;
3073         int                     nkey;
3074         int32           compare;
3075         AttrNumber      attno;
3076         Datum           datum1,
3077                                 datum2;
3078         bool            isnull1,
3079                                 isnull2;
3080
3081
3082         /* Compare the leading sort key */
3083         compare = ApplySortComparator(a->datum1, a->isnull1,
3084                                                                   b->datum1, b->isnull1,
3085                                                                   sortKey);
3086         if (compare != 0)
3087                 return compare;
3088
3089         /* Compare additional sort keys */
3090         ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3091         ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3092         rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3093         rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3094         tupDesc = state->tupDesc;
3095
3096         if (sortKey->abbrev_converter)
3097         {
3098                 attno = sortKey->ssup_attno;
3099
3100                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3101                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3102
3103                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3104                                                                                                 datum2, isnull2,
3105                                                                                                 sortKey);
3106                 if (compare != 0)
3107                         return compare;
3108         }
3109
3110         sortKey++;
3111         for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3112         {
3113                 attno = sortKey->ssup_attno;
3114
3115                 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
3116                 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3117
3118                 compare = ApplySortComparator(datum1, isnull1,
3119                                                                           datum2, isnull2,
3120                                                                           sortKey);
3121                 if (compare != 0)
3122                         return compare;
3123         }
3124
3125         return 0;
3126 }
3127
3128 static void
3129 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3130 {
3131         /*
3132          * We expect the passed "tup" to be a TupleTableSlot, and form a
3133          * MinimalTuple using the exported interface for that.
3134          */
3135         TupleTableSlot *slot = (TupleTableSlot *) tup;
3136         Datum           original;
3137         MinimalTuple tuple;
3138         HeapTupleData htup;
3139
3140         /* copy the tuple into sort storage */
3141         tuple = ExecCopySlotMinimalTuple(slot);
3142         stup->tuple = (void *) tuple;
3143         USEMEM(state, GetMemoryChunkSpace(tuple));
3144         /* set up first-column key value */
3145         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3146         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3147         original = heap_getattr(&htup,
3148                                                         state->sortKeys[0].ssup_attno,
3149                                                         state->tupDesc,
3150                                                         &stup->isnull1);
3151
3152         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3153         {
3154                 /*
3155                  * Store ordinary Datum representation, or NULL value.  If there is a
3156                  * converter it won't expect NULL values, and cost model is not
3157                  * required to account for NULL, so in that case we avoid calling
3158                  * converter and just set datum1 to "void" representation (to be
3159                  * consistent).
3160                  */
3161                 stup->datum1 = original;
3162         }
3163         else if (!consider_abort_common(state))
3164         {
3165                 /* Store abbreviated key representation */
3166                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3167                                                                                                                  state->sortKeys);
3168         }
3169         else
3170         {
3171                 /* Abort abbreviation */
3172                 int                     i;
3173
3174                 stup->datum1 = original;
3175
3176                 /*
3177                  * Set state to be consistent with never trying abbreviation.
3178                  *
3179                  * Alter datum1 representation in already-copied tuples, so as to
3180                  * ensure a consistent representation (current tuple was just
3181                  * handled).  It does not matter if some dumped tuples are already
3182                  * sorted on tape, since serialized tuples lack abbreviated keys
3183                  * (TSS_BUILDRUNS state prevents control reaching here in any
3184                  * case).
3185                  */
3186                 for (i = 0; i < state->memtupcount; i++)
3187                 {
3188                         SortTuple  *mtup = &state->memtuples[i];
3189
3190                         htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3191                                 MINIMAL_TUPLE_OFFSET;
3192                         htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3193                                                                                          MINIMAL_TUPLE_OFFSET);
3194
3195                         mtup->datum1 = heap_getattr(&htup,
3196                                                                                 state->sortKeys[0].ssup_attno,
3197                                                                                 state->tupDesc,
3198                                                                                 &mtup->isnull1);
3199                 }
3200         }
3201 }
3202
3203 static void
3204 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3205 {
3206         MinimalTuple tuple = (MinimalTuple) stup->tuple;
3207
3208         /* the part of the MinimalTuple we'll write: */
3209         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3210         unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3211
3212         /* total on-disk footprint: */
3213         unsigned int tuplen = tupbodylen + sizeof(int);
3214
3215         LogicalTapeWrite(state->tapeset, tapenum,
3216                                          (void *) &tuplen, sizeof(tuplen));
3217         LogicalTapeWrite(state->tapeset, tapenum,
3218                                          (void *) tupbody, tupbodylen);
3219         if (state->randomAccess)        /* need trailing length word? */
3220                 LogicalTapeWrite(state->tapeset, tapenum,
3221                                                  (void *) &tuplen, sizeof(tuplen));
3222
3223         FREEMEM(state, GetMemoryChunkSpace(tuple));
3224         heap_free_minimal_tuple(tuple);
3225 }
3226
3227 static void
3228 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3229                          int tapenum, unsigned int len)
3230 {
3231         unsigned int tupbodylen = len - sizeof(int);
3232         unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3233         MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
3234         char       *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3235         HeapTupleData htup;
3236
3237         USEMEM(state, GetMemoryChunkSpace(tuple));
3238         /* read in the tuple proper */
3239         tuple->t_len = tuplen;
3240         LogicalTapeReadExact(state->tapeset, tapenum,
3241                                                  tupbody, tupbodylen);
3242         if (state->randomAccess)        /* need trailing length word? */
3243                 LogicalTapeReadExact(state->tapeset, tapenum,
3244                                                          &tuplen, sizeof(tuplen));
3245         stup->tuple = (void *) tuple;
3246         /* set up first-column key value */
3247         htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3248         htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3249         stup->datum1 = heap_getattr(&htup,
3250                                                                 state->sortKeys[0].ssup_attno,
3251                                                                 state->tupDesc,
3252                                                                 &stup->isnull1);
3253 }
3254
3255 /*
3256  * Routines specialized for the CLUSTER case (HeapTuple data, with
3257  * comparisons per a btree index definition)
3258  */
3259
3260 static int
3261 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3262                                    Tuplesortstate *state)
3263 {
3264         SortSupport sortKey = state->sortKeys;
3265         HeapTuple       ltup;
3266         HeapTuple       rtup;
3267         TupleDesc       tupDesc;
3268         int                     nkey;
3269         int32           compare;
3270         Datum           datum1,
3271                                 datum2;
3272         bool            isnull1,
3273                                 isnull2;
3274         AttrNumber      leading = state->indexInfo->ii_KeyAttrNumbers[0];
3275
3276         /* Be prepared to compare additional sort keys */
3277         ltup = (HeapTuple) a->tuple;
3278         rtup = (HeapTuple) b->tuple;
3279         tupDesc = state->tupDesc;
3280
3281         /* Compare the leading sort key, if it's simple */
3282         if (leading != 0)
3283         {
3284                 compare = ApplySortComparator(a->datum1, a->isnull1,
3285                                                                           b->datum1, b->isnull1,
3286                                                                           sortKey);
3287                 if (compare != 0)
3288                         return compare;
3289
3290                 if (sortKey->abbrev_converter)
3291                 {
3292                         datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3293                         datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3294
3295                         compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3296                                                                                                         datum2, isnull2,
3297                                                                                                         sortKey);
3298                 }
3299                 if (compare != 0 || state->nKeys == 1)
3300                         return compare;
3301                 /* Compare additional columns the hard way */
3302                 sortKey++;
3303                 nkey = 1;
3304         }
3305         else
3306         {
3307                 /* Must compare all keys the hard way */
3308                 nkey = 0;
3309         }
3310
3311         if (state->indexInfo->ii_Expressions == NULL)
3312         {
3313                 /* If not expression index, just compare the proper heap attrs */
3314
3315                 for (; nkey < state->nKeys; nkey++, sortKey++)
3316                 {
3317                         AttrNumber      attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3318
3319                         datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3320                         datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3321
3322                         compare = ApplySortComparator(datum1, isnull1,
3323                                                                                   datum2, isnull2,
3324                                                                                   sortKey);
3325                         if (compare != 0)
3326                                 return compare;
3327                 }
3328         }
3329         else
3330         {
3331                 /*
3332                  * In the expression index case, compute the whole index tuple and
3333                  * then compare values.  It would perhaps be faster to compute only as
3334                  * many columns as we need to compare, but that would require
3335                  * duplicating all the logic in FormIndexDatum.
3336                  */
3337                 Datum           l_index_values[INDEX_MAX_KEYS];
3338                 bool            l_index_isnull[INDEX_MAX_KEYS];
3339                 Datum           r_index_values[INDEX_MAX_KEYS];
3340                 bool            r_index_isnull[INDEX_MAX_KEYS];
3341                 TupleTableSlot *ecxt_scantuple;
3342
3343                 /* Reset context each time to prevent memory leakage */
3344                 ResetPerTupleExprContext(state->estate);
3345
3346                 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3347
3348                 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3349                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3350                                            l_index_values, l_index_isnull);
3351
3352                 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3353                 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3354                                            r_index_values, r_index_isnull);
3355
3356                 for (; nkey < state->nKeys; nkey++, sortKey++)
3357                 {
3358                         compare = ApplySortComparator(l_index_values[nkey],
3359                                                                                   l_index_isnull[nkey],
3360                                                                                   r_index_values[nkey],
3361                                                                                   r_index_isnull[nkey],
3362                                                                                   sortKey);
3363                         if (compare != 0)
3364                                 return compare;
3365                 }
3366         }
3367
3368         return 0;
3369 }
3370
3371 static void
3372 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3373 {
3374         HeapTuple       tuple = (HeapTuple) tup;
3375         Datum           original;
3376
3377         /* copy the tuple into sort storage */
3378         tuple = heap_copytuple(tuple);
3379         stup->tuple = (void *) tuple;
3380         USEMEM(state, GetMemoryChunkSpace(tuple));
3381
3382         /*
3383          * set up first-column key value, and potentially abbreviate, if it's a
3384          * simple column
3385          */
3386         if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3387                 return;
3388
3389         original = heap_getattr(tuple,
3390                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3391                                                         state->tupDesc,
3392                                                         &stup->isnull1);
3393
3394         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3395         {
3396                 /*
3397                  * Store ordinary Datum representation, or NULL value.  If there is a
3398                  * converter it won't expect NULL values, and cost model is not
3399                  * required to account for NULL, so in that case we avoid calling
3400                  * converter and just set datum1 to "void" representation (to be
3401                  * consistent).
3402                  */
3403                 stup->datum1 = original;
3404         }
3405         else if (!consider_abort_common(state))
3406         {
3407                 /* Store abbreviated key representation */
3408                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3409                                                                                                                  state->sortKeys);
3410         }
3411         else
3412         {
3413                 /* Abort abbreviation */
3414                 int                     i;
3415
3416                 stup->datum1 = original;
3417
3418                 /*
3419                  * Set state to be consistent with never trying abbreviation.
3420                  *
3421                  * Alter datum1 representation in already-copied tuples, so as to
3422                  * ensure a consistent representation (current tuple was just
3423                  * handled).  It does not matter if some dumped tuples are already
3424                  * sorted on tape, since serialized tuples lack abbreviated keys
3425                  * (TSS_BUILDRUNS state prevents control reaching here in any
3426                  * case).
3427                  */
3428                 for (i = 0; i < state->memtupcount; i++)
3429                 {
3430                         SortTuple  *mtup = &state->memtuples[i];
3431
3432                         tuple = (HeapTuple) mtup->tuple;
3433                         mtup->datum1 = heap_getattr(tuple,
3434                                                                           state->indexInfo->ii_KeyAttrNumbers[0],
3435                                                                                 state->tupDesc,
3436                                                                                 &stup->isnull1);
3437                 }
3438         }
3439 }
3440
3441 static void
3442 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3443 {
3444         HeapTuple       tuple = (HeapTuple) stup->tuple;
3445         unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3446
3447         /* We need to store t_self, but not other fields of HeapTupleData */
3448         LogicalTapeWrite(state->tapeset, tapenum,
3449                                          &tuplen, sizeof(tuplen));
3450         LogicalTapeWrite(state->tapeset, tapenum,
3451                                          &tuple->t_self, sizeof(ItemPointerData));
3452         LogicalTapeWrite(state->tapeset, tapenum,
3453                                          tuple->t_data, tuple->t_len);
3454         if (state->randomAccess)        /* need trailing length word? */
3455                 LogicalTapeWrite(state->tapeset, tapenum,
3456                                                  &tuplen, sizeof(tuplen));
3457
3458         FREEMEM(state, GetMemoryChunkSpace(tuple));
3459         heap_freetuple(tuple);
3460 }
3461
3462 static void
3463 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
3464                                 int tapenum, unsigned int tuplen)
3465 {
3466         unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
3467         HeapTuple       tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE);
3468
3469         USEMEM(state, GetMemoryChunkSpace(tuple));
3470         /* Reconstruct the HeapTupleData header */
3471         tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
3472         tuple->t_len = t_len;
3473         LogicalTapeReadExact(state->tapeset, tapenum,
3474                                                  &tuple->t_self, sizeof(ItemPointerData));
3475         /* We don't currently bother to reconstruct t_tableOid */
3476         tuple->t_tableOid = InvalidOid;
3477         /* Read in the tuple body */
3478         LogicalTapeReadExact(state->tapeset, tapenum,
3479                                                  tuple->t_data, tuple->t_len);
3480         if (state->randomAccess)        /* need trailing length word? */
3481                 LogicalTapeReadExact(state->tapeset, tapenum,
3482                                                          &tuplen, sizeof(tuplen));
3483         stup->tuple = (void *) tuple;
3484         /* set up first-column key value, if it's a simple column */
3485         if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
3486                 stup->datum1 = heap_getattr(tuple,
3487                                                                         state->indexInfo->ii_KeyAttrNumbers[0],
3488                                                                         state->tupDesc,
3489                                                                         &stup->isnull1);
3490 }
3491
3492
3493 /*
3494  * Routines specialized for IndexTuple case
3495  *
3496  * The btree and hash cases require separate comparison functions, but the
3497  * IndexTuple representation is the same so the copy/write/read support
3498  * functions can be shared.
3499  */
3500
3501 static int
3502 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
3503                                            Tuplesortstate *state)
3504 {
3505         /*
3506          * This is similar to comparetup_heap(), but expects index tuples.  There
3507          * is also special handling for enforcing uniqueness, and special
3508          * treatment for equal keys at the end.
3509          */
3510         SortSupport sortKey = state->sortKeys;
3511         IndexTuple      tuple1;
3512         IndexTuple      tuple2;
3513         int                     keysz;
3514         TupleDesc       tupDes;
3515         bool            equal_hasnull = false;
3516         int                     nkey;
3517         int32           compare;
3518         Datum           datum1,
3519                                 datum2;
3520         bool            isnull1,
3521                                 isnull2;
3522
3523
3524         /* Compare the leading sort key */
3525         compare = ApplySortComparator(a->datum1, a->isnull1,
3526                                                                   b->datum1, b->isnull1,
3527                                                                   sortKey);
3528         if (compare != 0)
3529                 return compare;
3530
3531         /* Compare additional sort keys */
3532         tuple1 = (IndexTuple) a->tuple;
3533         tuple2 = (IndexTuple) b->tuple;
3534         keysz = state->nKeys;
3535         tupDes = RelationGetDescr(state->indexRel);
3536
3537         if (sortKey->abbrev_converter)
3538         {
3539                 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
3540                 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
3541
3542                 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3543                                                                                                 datum2, isnull2,
3544                                                                                                 sortKey);
3545                 if (compare != 0)
3546                         return compare;
3547         }
3548
3549         /* they are equal, so we only need to examine one null flag */
3550         if (a->isnull1)
3551                 equal_hasnull = true;
3552
3553         sortKey++;
3554         for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
3555         {
3556                 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
3557                 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
3558
3559                 compare = ApplySortComparator(datum1, isnull1,
3560                                                                           datum2, isnull2,
3561                                                                           sortKey);
3562                 if (compare != 0)
3563                         return compare;         /* done when we find unequal attributes */
3564
3565                 /* they are equal, so we only need to examine one null flag */
3566                 if (isnull1)
3567                         equal_hasnull = true;
3568         }
3569
3570         /*
3571          * If btree has asked us to enforce uniqueness, complain if two equal
3572          * tuples are detected (unless there was at least one NULL field).
3573          *
3574          * It is sufficient to make the test here, because if two tuples are equal
3575          * they *must* get compared at some stage of the sort --- otherwise the
3576          * sort algorithm wouldn't have checked whether one must appear before the
3577          * other.
3578          */
3579         if (state->enforceUnique && !equal_hasnull)
3580         {
3581                 Datum           values[INDEX_MAX_KEYS];
3582                 bool            isnull[INDEX_MAX_KEYS];
3583                 char       *key_desc;
3584
3585                 /*
3586                  * Some rather brain-dead implementations of qsort (such as the one in
3587                  * QNX 4) will sometimes call the comparison routine to compare a
3588                  * value to itself, but we always use our own implementation, which
3589                  * does not.
3590                  */
3591                 Assert(tuple1 != tuple2);
3592
3593                 index_deform_tuple(tuple1, tupDes, values, isnull);
3594
3595                 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
3596
3597                 ereport(ERROR,
3598                                 (errcode(ERRCODE_UNIQUE_VIOLATION),
3599                                  errmsg("could not create unique index \"%s\"",
3600                                                 RelationGetRelationName(state->indexRel)),
3601                                  key_desc ? errdetail("Key %s is duplicated.", key_desc) :
3602                                  errdetail("Duplicate keys exist."),
3603                                  errtableconstraint(state->heapRel,
3604                                                                  RelationGetRelationName(state->indexRel))));
3605         }
3606
3607         /*
3608          * If key values are equal, we sort on ItemPointer.  This does not affect
3609          * validity of the finished index, but it may be useful to have index
3610          * scans in physical order.
3611          */
3612         {
3613                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3614                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3615
3616                 if (blk1 != blk2)
3617                         return (blk1 < blk2) ? -1 : 1;
3618         }
3619         {
3620                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3621                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3622
3623                 if (pos1 != pos2)
3624                         return (pos1 < pos2) ? -1 : 1;
3625         }
3626
3627         return 0;
3628 }
3629
3630 static int
3631 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
3632                                           Tuplesortstate *state)
3633 {
3634         uint32          hash1;
3635         uint32          hash2;
3636         IndexTuple      tuple1;
3637         IndexTuple      tuple2;
3638
3639         /*
3640          * Fetch hash keys and mask off bits we don't want to sort by. We know
3641          * that the first column of the index tuple is the hash key.
3642          */
3643         Assert(!a->isnull1);
3644         hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
3645         Assert(!b->isnull1);
3646         hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
3647
3648         if (hash1 > hash2)
3649                 return 1;
3650         else if (hash1 < hash2)
3651                 return -1;
3652
3653         /*
3654          * If hash values are equal, we sort on ItemPointer.  This does not affect
3655          * validity of the finished index, but it may be useful to have index
3656          * scans in physical order.
3657          */
3658         tuple1 = (IndexTuple) a->tuple;
3659         tuple2 = (IndexTuple) b->tuple;
3660
3661         {
3662                 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
3663                 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
3664
3665                 if (blk1 != blk2)
3666                         return (blk1 < blk2) ? -1 : 1;
3667         }
3668         {
3669                 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
3670                 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
3671
3672                 if (pos1 != pos2)
3673                         return (pos1 < pos2) ? -1 : 1;
3674         }
3675
3676         return 0;
3677 }
3678
3679 static void
3680 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
3681 {
3682         IndexTuple      tuple = (IndexTuple) tup;
3683         unsigned int tuplen = IndexTupleSize(tuple);
3684         IndexTuple      newtuple;
3685         Datum           original;
3686
3687         /* copy the tuple into sort storage */
3688         newtuple = (IndexTuple) palloc(tuplen);
3689         memcpy(newtuple, tuple, tuplen);
3690         USEMEM(state, GetMemoryChunkSpace(newtuple));
3691         stup->tuple = (void *) newtuple;
3692         /* set up first-column key value */
3693         original = index_getattr(newtuple,
3694                                                          1,
3695                                                          RelationGetDescr(state->indexRel),
3696                                                          &stup->isnull1);
3697
3698         if (!state->sortKeys->abbrev_converter || stup->isnull1)
3699         {
3700                 /*
3701                  * Store ordinary Datum representation, or NULL value.  If there is a
3702                  * converter it won't expect NULL values, and cost model is not
3703                  * required to account for NULL, so in that case we avoid calling
3704                  * converter and just set datum1 to "void" representation (to be
3705                  * consistent).
3706                  */
3707                 stup->datum1 = original;
3708         }
3709         else if (!consider_abort_common(state))
3710         {
3711                 /* Store abbreviated key representation */
3712                 stup->datum1 = state->sortKeys->abbrev_converter(original,
3713                                                                                                                  state->sortKeys);
3714         }
3715         else
3716         {
3717                 /* Abort abbreviation */
3718                 int                     i;
3719
3720                 stup->datum1 = original;
3721
3722                 /*
3723                  * Set state to be consistent with never trying abbreviation.
3724                  *
3725                  * Alter datum1 representation in already-copied tuples, so as to
3726                  * ensure a consistent representation (current tuple was just
3727                  * handled).  It does not matter if some dumped tuples are already
3728                  * sorted on tape, since serialized tuples lack abbreviated keys
3729                  * (TSS_BUILDRUNS state prevents control reaching here in any
3730                  * case).
3731                  */
3732                 for (i = 0; i < state->memtupcount; i++)
3733                 {
3734                         SortTuple  *mtup = &state->memtuples[i];
3735
3736                         tuple = (IndexTuple) mtup->tuple;
3737                         mtup->datum1 = index_getattr(tuple,
3738                                                                                  1,
3739                                                                                  RelationGetDescr(state->indexRel),
3740                                                                                  &stup->isnull1);
3741                 }
3742         }
3743 }
3744
3745 static void
3746 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
3747 {
3748         IndexTuple      tuple = (IndexTuple) stup->tuple;
3749         unsigned int tuplen;
3750
3751         tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
3752         LogicalTapeWrite(state->tapeset, tapenum,
3753                                          (void *) &tuplen, sizeof(tuplen));
3754         LogicalTapeWrite(state->tapeset, tapenum,
3755                                          (void *) tuple, IndexTupleSize(tuple));
3756         if (state->randomAccess)        /* need trailing length word? */
3757                 LogicalTapeWrite(state->tapeset, tapenum,
3758                                                  (void *) &tuplen, sizeof(tuplen));
3759
3760         FREEMEM(state, GetMemoryChunkSpace(tuple));
3761         pfree(tuple);
3762 }
3763
3764 static void
3765 readtup_index(Tuplesortstate *state, SortTuple *stup,
3766                           int tapenum, unsigned int len)
3767 {
3768         unsigned int tuplen = len - sizeof(unsigned int);
3769         IndexTuple      tuple = (IndexTuple) palloc(tuplen);
3770
3771         USEMEM(state, GetMemoryChunkSpace(tuple));
3772         LogicalTapeReadExact(state->tapeset, tapenum,
3773                                                  tuple, tuplen);
3774         if (state->randomAccess)        /* need trailing length word? */
3775                 LogicalTapeReadExact(state->tapeset, tapenum,
3776                                                          &tuplen, sizeof(tuplen));
3777         stup->tuple = (void *) tuple;
3778         /* set up first-column key value */
3779         stup->datum1 = index_getattr(tuple,
3780                                                                  1,
3781                                                                  RelationGetDescr(state->indexRel),
3782                                                                  &stup->isnull1);
3783 }
3784
3785 /*
3786  * Routines specialized for DatumTuple case
3787  */
3788
3789 static int
3790 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3791 {
3792         int                     compare;
3793
3794         compare = ApplySortComparator(a->datum1, a->isnull1,
3795                                                                   b->datum1, b->isnull1,
3796                                                                   state->sortKeys);
3797         if (compare != 0)
3798                 return compare;
3799
3800         /* if we have abbreviations, then "tuple" has the original value */
3801
3802         if (state->sortKeys->abbrev_converter)
3803                 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
3804                                                                            PointerGetDatum(b->tuple), b->isnull1,
3805                                                                                                 state->sortKeys);
3806
3807         return compare;
3808 }
3809
3810 static void
3811 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
3812 {
3813         /* Not currently needed */
3814         elog(ERROR, "copytup_datum() should not be called");
3815 }
3816
3817 static void
3818 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
3819 {
3820         void       *waddr;
3821         unsigned int tuplen;
3822         unsigned int writtenlen;
3823
3824         if (stup->isnull1)
3825         {
3826                 waddr = NULL;
3827                 tuplen = 0;
3828         }
3829         else if (state->datumTypeByVal)
3830         {
3831                 waddr = &stup->datum1;
3832                 tuplen = sizeof(Datum);
3833         }
3834         else
3835         {
3836                 waddr = stup->tuple;
3837                 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
3838                 Assert(tuplen != 0);
3839         }
3840
3841         writtenlen = tuplen + sizeof(unsigned int);
3842
3843         LogicalTapeWrite(state->tapeset, tapenum,
3844                                          (void *) &writtenlen, sizeof(writtenlen));
3845         LogicalTapeWrite(state->tapeset, tapenum,
3846                                          waddr, tuplen);
3847         if (state->randomAccess)        /* need trailing length word? */
3848                 LogicalTapeWrite(state->tapeset, tapenum,
3849                                                  (void *) &writtenlen, sizeof(writtenlen));
3850
3851         if (stup->tuple)
3852         {
3853                 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3854                 pfree(stup->tuple);
3855         }
3856 }
3857
3858 static void
3859 readtup_datum(Tuplesortstate *state, SortTuple *stup,
3860                           int tapenum, unsigned int len)
3861 {
3862         unsigned int tuplen = len - sizeof(unsigned int);
3863
3864         if (tuplen == 0)
3865         {
3866                 /* it's NULL */
3867                 stup->datum1 = (Datum) 0;
3868                 stup->isnull1 = true;
3869                 stup->tuple = NULL;
3870         }
3871         else if (state->datumTypeByVal)
3872         {
3873                 Assert(tuplen == sizeof(Datum));
3874                 LogicalTapeReadExact(state->tapeset, tapenum,
3875                                                          &stup->datum1, tuplen);
3876                 stup->isnull1 = false;
3877                 stup->tuple = NULL;
3878         }
3879         else
3880         {
3881                 void       *raddr = palloc(tuplen);
3882
3883                 LogicalTapeReadExact(state->tapeset, tapenum,
3884                                                          raddr, tuplen);
3885                 stup->datum1 = PointerGetDatum(raddr);
3886                 stup->isnull1 = false;
3887                 stup->tuple = raddr;
3888                 USEMEM(state, GetMemoryChunkSpace(raddr));
3889         }
3890
3891         if (state->randomAccess)        /* need trailing length word? */
3892                 LogicalTapeReadExact(state->tapeset, tapenum,
3893                                                          &tuplen, sizeof(tuplen));
3894 }
3895
3896 /*
3897  * Convenience routine to free a tuple previously loaded into sort memory
3898  */
3899 static void
3900 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
3901 {
3902         FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
3903         pfree(stup->tuple);
3904 }