1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. Historically, we divided the input into sorted runs
15 * using replacement selection, in the form of a priority tree implemented
16 * as a heap (essentially his Algorithm 5.2.3H -- although that strategy is
17 * often avoided altogether), but that can now only happen first the first
18 * run. We merge the runs using polyphase merge, Knuth's Algorithm
19 * 5.4.2D. The logical "tapes" used by Algorithm D are implemented by
20 * logtape.c, which avoids space wastage by recycling disk space as soon
21 * as each block is read from its "tape".
23 * We never form the initial runs using Knuth's recommended replacement
24 * selection data structure (Algorithm 5.4.1R), because it uses a fixed
25 * number of records in memory at all times. Since we are dealing with
26 * tuples that may vary considerably in size, we want to be able to vary
27 * the number of records kept in memory to ensure full utilization of the
28 * allowed sort memory space. So, we keep the tuples in a variable-size
29 * heap, with the next record to go out at the top of the heap. Like
30 * Algorithm 5.4.1R, each record is stored with the run number that it
31 * must go into, and we use (run number, key) as the ordering key for the
32 * heap. When the run number at the top of the heap changes, we know that
33 * no more records of the prior run are left in the heap. Note that there
34 * are in practice only ever two distinct run numbers, due to the greatly
35 * reduced use of replacement selection in PostgreSQL 9.6.
37 * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small
38 * customizations) is only used with the aim of producing just one run,
39 * thereby avoiding all merging. Only the first run can use replacement
40 * selection, which is why there are now only two possible valid run
41 * numbers, and why heapification is customized to not distinguish between
42 * tuples in the second run (those will be quicksorted). We generally
43 * prefer a simple hybrid sort-merge strategy, where runs are sorted in much
44 * the same way as the entire input of an internal sort is sorted (using
45 * qsort()). The replacement_sort_tuples GUC controls the limited remaining
46 * use of replacement selection for the first run.
48 * There are several reasons to favor a hybrid sort-merge strategy.
49 * Maintaining a priority tree/heap has poor CPU cache characteristics.
50 * Furthermore, the growth in main memory sizes has greatly diminished the
51 * value of having runs that are larger than available memory, even in the
52 * case where there is partially sorted input and runs can be made far
53 * larger by using a heap. In most cases, a single-pass merge step is all
54 * that is required even when runs are no larger than available memory.
55 * Avoiding multiple merge passes was traditionally considered to be the
56 * major advantage of using replacement selection.
58 * The approximate amount of memory allowed for any one sort operation
59 * is specified in kilobytes by the caller (most pass work_mem). Initially,
60 * we absorb tuples and simply store them in an unsorted array as long as
61 * we haven't exceeded workMem. If we reach the end of the input without
62 * exceeding workMem, we sort the array using qsort() and subsequently return
63 * tuples just by scanning the tuple array sequentially. If we do exceed
64 * workMem, we begin to emit tuples into sorted runs in temporary tapes.
65 * When tuples are dumped in batch after quicksorting, we begin a new run
66 * with a new output tape (selected per Algorithm D). After the end of the
67 * input is reached, we dump out remaining tuples in memory into a final run
68 * (or two, when replacement selection is still used), then merge the runs
71 * When merging runs, we use a heap containing just the frontmost tuple from
72 * each source run; we repeatedly output the smallest tuple and insert the
73 * next tuple from its source tape (if any). When the heap empties, the merge
74 * is complete. The basic merge algorithm thus needs very little memory ---
75 * only M tuples for an M-way merge, and M is constrained to a small number.
76 * However, we can still make good use of our full workMem allocation by
77 * pre-reading additional tuples from each source tape. Without prereading,
78 * our access pattern to the temporary file would be very erratic; on average
79 * we'd read one block from each of M source tapes during the same time that
80 * we're writing M blocks to the output tape, so there is no sequentiality of
81 * access at all, defeating the read-ahead methods used by most Unix kernels.
82 * Worse, the output tape gets written into a very random sequence of blocks
83 * of the temp file, ensuring that things will be even worse when it comes
84 * time to read that tape. A straightforward merge pass thus ends up doing a
85 * lot of waiting for disk seeks. We can improve matters by prereading from
86 * each source tape sequentially, loading about workMem/M bytes from each tape
87 * in turn. Then we run the merge algorithm, writing but not reading until
88 * one of the preloaded tuple series runs out. Then we switch back to preread
89 * mode, fill memory again, and repeat. This approach helps to localize both
90 * read and write accesses.
92 * When the caller requests random access to the sort result, we form
93 * the final sorted run on a logical tape which is then "frozen", so
94 * that we can access it randomly. When the caller does not need random
95 * access, we return from tuplesort_performsort() as soon as we are down
96 * to one run per logical tape. The final merge is then performed
97 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
98 * saves one cycle of writing all the data out to disk and reading it in.
100 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
101 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
102 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
103 * tape drives are expensive beasts, and in particular that there will always
104 * be many more runs than tape drives. In our implementation a "tape drive"
105 * doesn't cost much more than a few Kb of memory buffers, so we can afford
106 * to have lots of them. In particular, if we can have as many tape drives
107 * as sorted runs, we can eliminate any repeated I/O at all. In the current
108 * code we determine the number of tapes M on the basis of workMem: we want
109 * workMem/M to be large enough that we read a fair amount of data each time
110 * we preread from a tape, so as to maintain the locality of access described
111 * above. Nonetheless, with large workMem we can have many tapes.
114 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
115 * Portions Copyright (c) 1994, Regents of the University of California
118 * src/backend/utils/sort/tuplesort.c
120 *-------------------------------------------------------------------------
123 #include "postgres.h"
127 #include "access/htup_details.h"
128 #include "access/nbtree.h"
129 #include "catalog/index.h"
130 #include "catalog/pg_am.h"
131 #include "commands/tablespace.h"
132 #include "executor/executor.h"
133 #include "miscadmin.h"
134 #include "pg_trace.h"
135 #include "utils/datum.h"
136 #include "utils/logtape.h"
137 #include "utils/lsyscache.h"
138 #include "utils/memutils.h"
139 #include "utils/pg_rusage.h"
140 #include "utils/rel.h"
141 #include "utils/sortsupport.h"
142 #include "utils/tuplesort.h"
145 /* sort-type codes for sort__start probes */
149 #define CLUSTER_SORT 3
153 bool trace_sort = false;
156 #ifdef DEBUG_BOUNDED_SORT
157 bool optimize_bounded_sort = true;
162 * The objects we actually sort are SortTuple structs. These contain
163 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
164 * which is a separate palloc chunk --- we assume it is just one chunk and
165 * can be freed by a simple pfree() (except during final on-the-fly merge,
166 * when memory is used in batch). SortTuples also contain the tuple's
167 * first key column in Datum/nullflag format, and an index integer.
169 * Storing the first key column lets us save heap_getattr or index_getattr
170 * calls during tuple comparisons. We could extract and save all the key
171 * columns not just the first, but this would increase code complexity and
172 * overhead, and wouldn't actually save any comparison cycles in the common
173 * case where the first key determines the comparison result. Note that
174 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
176 * There is one special case: when the sort support infrastructure provides an
177 * "abbreviated key" representation, where the key is (typically) a pass by
178 * value proxy for a pass by reference type. In this case, the abbreviated key
179 * is stored in datum1 in place of the actual first key column.
181 * When sorting single Datums, the data value is represented directly by
182 * datum1/isnull1 for pass by value types (or null values). If the datatype is
183 * pass-by-reference and isnull1 is false, then "tuple" points to a separately
184 * palloc'd data value, otherwise "tuple" is NULL. The value of datum1 is then
185 * either the same pointer as "tuple", or is an abbreviated key value as
186 * described above. Accordingly, "tuple" is always used in preference to
187 * datum1 as the authoritative value for pass-by-reference cases.
189 * While building initial runs, tupindex holds the tuple's run number.
190 * Historically, the run number could meaningfully distinguish many runs, but
191 * it now only distinguishes RUN_FIRST and HEAP_RUN_NEXT, since replacement
192 * selection is always abandoned after the first run; no other run number
193 * should be represented here. During merge passes, we re-use it to hold the
194 * input tape number that each tuple in the heap was read from, or to hold the
195 * index of the next tuple pre-read from the same tape in the case of pre-read
196 * entries. tupindex goes unused if the sort occurs entirely in memory.
200 void *tuple; /* the tuple itself */
201 Datum datum1; /* value of first key column */
202 bool isnull1; /* is first key column NULL? */
203 int tupindex; /* see notes above */
208 * Possible states of a Tuplesort object. These denote the states that
209 * persist between calls of Tuplesort routines.
213 TSS_INITIAL, /* Loading tuples; still within memory limit */
214 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
215 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
216 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
217 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
218 TSS_FINALMERGE /* Performing final merge on-the-fly */
222 * Parameters for calculation of number of tapes to use --- see inittapes()
223 * and tuplesort_merge_order().
225 * In this calculation we assume that each tape will cost us about 3 blocks
226 * worth of buffer space (which is an underestimate for very large data
227 * volumes, but it's probably close enough --- see logtape.c).
229 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
230 * tape during a preread cycle (see discussion at top of file).
232 #define MINORDER 6 /* minimum merge order */
233 #define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
234 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
237 * Run numbers, used during external sort operations.
239 * HEAP_RUN_NEXT is only used for SortTuple.tupindex, never state.currentRun.
242 #define HEAP_RUN_NEXT INT_MAX
245 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
246 Tuplesortstate *state);
249 * Private state of a Tuplesort operation.
251 struct Tuplesortstate
253 TupSortStatus status; /* enumerated value as shown above */
254 int nKeys; /* number of columns in sort key */
255 bool randomAccess; /* did caller request random access? */
256 bool bounded; /* did caller specify a maximum number of
257 * tuples to return? */
258 bool boundUsed; /* true if we made use of a bounded heap */
259 int bound; /* if bounded, the maximum number of tuples */
260 bool tuples; /* Can SortTuple.tuple ever be set? */
261 int64 availMem; /* remaining memory available, in bytes */
262 int64 allowedMem; /* total memory allowed, in bytes */
263 int maxTapes; /* number of tapes (Knuth's T) */
264 int tapeRange; /* maxTapes-1 (Knuth's P) */
265 MemoryContext sortcontext; /* memory context holding most sort data */
266 MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
267 LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
270 * These function pointers decouple the routines that must know what kind
271 * of tuple we are sorting from the routines that don't need to know it.
272 * They are set up by the tuplesort_begin_xxx routines.
274 * Function to compare two tuples; result is per qsort() convention, ie:
275 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
276 * qsort_arg_comparator.
278 SortTupleComparator comparetup;
281 * Function to copy a supplied input tuple into palloc'd space and set up
282 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
283 * state->availMem must be decreased by the amount of space used for the
284 * tuple copy (note the SortTuple struct itself is not counted).
286 void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
289 * Function to write a stored tuple onto tape. The representation of the
290 * tuple on tape need not be the same as it is in memory; requirements on
291 * the tape representation are given below. After writing the tuple,
292 * pfree() the out-of-line data (not the SortTuple struct!), and increase
293 * state->availMem by the amount of memory space thereby released.
295 void (*writetup) (Tuplesortstate *state, int tapenum,
299 * Function to read a stored tuple from tape back into memory. 'len' is
300 * the already-read length of the stored tuple. Create a palloc'd copy,
301 * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
302 * decrease state->availMem by the amount of memory space consumed.
304 void (*readtup) (Tuplesortstate *state, SortTuple *stup,
305 int tapenum, unsigned int len);
308 * This array holds the tuples now in sort memory. If we are in state
309 * INITIAL, the tuples are in no particular order; if we are in state
310 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
311 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
312 * H. (Note that memtupcount only counts the tuples that are part of the
313 * heap --- during merge passes, memtuples[] entries beyond tapeRange are
314 * never in the heap and are used to hold pre-read tuples.) In state
315 * SORTEDONTAPE, the array is not used.
317 SortTuple *memtuples; /* array of SortTuple structs */
318 int memtupcount; /* number of tuples currently present */
319 int memtupsize; /* allocated length of memtuples array */
320 bool growmemtuples; /* memtuples' growth still underway? */
323 * Memory for tuples is sometimes allocated in batch, rather than
324 * incrementally. This implies that incremental memory accounting has been
325 * abandoned. Currently, this only happens for the final on-the-fly merge
326 * step. Large batch allocations can store tuples (e.g. IndexTuples)
327 * without palloc() fragmentation and other overhead.
332 * While building initial runs, this indicates if the replacement
333 * selection strategy is in use. When it isn't, then a simple hybrid
334 * sort-merge strategy is in use instead (runs are quicksorted).
339 * While building initial runs, this is the current output run number
340 * (starting at RUN_FIRST). Afterwards, it is the number of initial
346 * Unless otherwise noted, all pointer variables below are pointers to
347 * arrays of length maxTapes, holding per-tape data.
351 * These variables are only used during merge passes. mergeactive[i] is
352 * true if we are reading an input run from (actual) tape number i and
353 * have not yet exhausted that run. mergenext[i] is the memtuples index
354 * of the next pre-read tuple (next to be loaded into the heap) for tape
355 * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
356 * points to the last pre-read tuple from each tape. mergeavailslots[i]
357 * is the number of unused memtuples[] slots reserved for tape i, and
358 * mergeavailmem[i] is the amount of unused space allocated for tape i.
359 * mergefreelist and mergefirstfree keep track of unused locations in the
360 * memtuples[] array. The memtuples[].tupindex fields link together
361 * pre-read tuples for each tape as well as recycled locations in
362 * mergefreelist. It is OK to use 0 as a null link in these lists, because
363 * memtuples[0] is part of the merge heap and is never a pre-read tuple.
365 bool *mergeactive; /* active input run source? */
366 int *mergenext; /* first preread tuple for each source */
367 int *mergelast; /* last preread tuple for each source */
368 int *mergeavailslots; /* slots left for prereading each tape */
369 int64 *mergeavailmem; /* availMem for prereading each tape */
370 int mergefreelist; /* head of freelist of recycled slots */
371 int mergefirstfree; /* first slot never used in this merge */
374 * Per-tape batch state, when final on-the-fly merge consumes memory from
375 * just a few large allocations.
377 * Aside from the general benefits of performing fewer individual retail
378 * palloc() calls, this also helps make merging more cache efficient, since
379 * each tape's tuples must naturally be accessed sequentially (in sorted
382 int64 spacePerTape; /* Space (memory) for tuples (not slots) */
383 char **mergetuples; /* Each tape's memory allocation */
384 char **mergecurrent; /* Current offset into each tape's memory */
385 char **mergetail; /* Last item's start point for each tape */
386 char **mergeoverflow; /* Retail palloc() "overflow" for each tape */
389 * Variables for Algorithm D. Note that destTape is a "logical" tape
390 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
391 * "logical" and "actual" tape numbers straight!
393 int Level; /* Knuth's l */
394 int destTape; /* current output tape (Knuth's j, less 1) */
395 int *tp_fib; /* Target Fibonacci run counts (A[]) */
396 int *tp_runs; /* # of real runs on each tape */
397 int *tp_dummy; /* # of dummy runs for each tape (D[]) */
398 int *tp_tapenum; /* Actual tape numbers (TAPE[]) */
399 int activeTapes; /* # of active input tapes in merge pass */
402 * These variables are used after completion of sorting to keep track of
403 * the next tuple to return. (In the tape case, the tape's current read
404 * position is also critical state.)
406 int result_tape; /* actual tape number of finished output */
407 int current; /* array index (only used if SORTEDINMEM) */
408 bool eof_reached; /* reached EOF (needed for cursors) */
410 /* markpos_xxx holds marked position for mark and restore */
411 long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
412 int markpos_offset; /* saved "current", or offset in tape block */
413 bool markpos_eof; /* saved "eof_reached" */
416 * The sortKeys variable is used by every case other than the hash index
417 * case; it is set by tuplesort_begin_xxx. tupDesc is only used by the
418 * MinimalTuple and CLUSTER routines, though.
421 SortSupport sortKeys; /* array of length nKeys */
424 * This variable is shared by the single-key MinimalTuple case and the
425 * Datum case (which both use qsort_ssup()). Otherwise it's NULL.
430 * Additional state for managing "abbreviated key" sortsupport routines
431 * (which currently may be used by all cases except the hash index case).
432 * Tracks the intervals at which the optimization's effectiveness is
435 int64 abbrevNext; /* Tuple # at which to next check
439 * These variables are specific to the CLUSTER case; they are set by
440 * tuplesort_begin_cluster.
442 IndexInfo *indexInfo; /* info about index being used for reference */
443 EState *estate; /* for evaluating index expressions */
446 * These variables are specific to the IndexTuple case; they are set by
447 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
449 Relation heapRel; /* table the index is being built on */
450 Relation indexRel; /* index being built */
452 /* These are specific to the index_btree subcase: */
453 bool enforceUnique; /* complain if we find duplicate tuples */
455 /* These are specific to the index_hash subcase: */
456 uint32 hash_mask; /* mask for sortable part of hash code */
459 * These variables are specific to the Datum case; they are set by
460 * tuplesort_begin_datum and used only by the DatumTuple routines.
463 /* we need typelen in order to know how to copy the Datums. */
467 * Resource snapshot for time of sort start.
474 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
475 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
476 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
477 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
478 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->batchUsed)
479 #define USEMEM(state,amt) ((state)->availMem -= (amt))
480 #define FREEMEM(state,amt) ((state)->availMem += (amt))
483 * NOTES about on-tape representation of tuples:
485 * We require the first "unsigned int" of a stored tuple to be the total size
486 * on-tape of the tuple, including itself (so it is never zero; an all-zero
487 * unsigned int is used to delimit runs). The remainder of the stored tuple
488 * may or may not match the in-memory representation of the tuple ---
489 * any conversion needed is the job of the writetup and readtup routines.
491 * If state->randomAccess is true, then the stored representation of the
492 * tuple must be followed by another "unsigned int" that is a copy of the
493 * length --- so the total tape space used is actually sizeof(unsigned int)
494 * more than the stored length value. This allows read-backwards. When
495 * randomAccess is not true, the write/read routines may omit the extra
498 * writetup is expected to write both length words as well as the tuple
499 * data. When readtup is called, the tape is positioned just after the
500 * front length word; readtup must read the tuple data and advance past
501 * the back length word (if present).
503 * The write/read routines can make use of the tuple description data
504 * stored in the Tuplesortstate record, if needed. They are also expected
505 * to adjust state->availMem by the amount of memory space (not tape space!)
506 * released or consumed. There is no error return from either writetup
507 * or readtup; they should ereport() on failure.
510 * NOTES about memory consumption calculations:
512 * We count space allocated for tuples against the workMem limit, plus
513 * the space used by the variable-size memtuples array. Fixed-size space
514 * is not counted; it's small enough to not be interesting.
516 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
517 * rather than the originally-requested size. This is important since
518 * palloc can add substantial overhead. It's not a complete answer since
519 * we won't count any wasted space in palloc allocation blocks, but it's
520 * a lot better than what we were doing before 7.3. As of 9.6, a
521 * separate memory context is used for caller passed tuples. Resetting
522 * it at certain key increments significantly ameliorates fragmentation.
523 * Note that this places a responsibility on readtup and copytup routines
524 * to use the right memory context for these tuples (and to not use the
525 * reset context for anything whose lifetime needs to span multiple
526 * external sort runs).
529 /* When using this macro, beware of double evaluation of len */
530 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
532 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
533 elog(ERROR, "unexpected end of data"); \
537 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
538 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
539 static bool consider_abort_common(Tuplesortstate *state);
540 static bool useselection(Tuplesortstate *state);
541 static void inittapes(Tuplesortstate *state);
542 static void selectnewtape(Tuplesortstate *state);
543 static void mergeruns(Tuplesortstate *state);
544 static void mergeonerun(Tuplesortstate *state);
545 static void beginmerge(Tuplesortstate *state, bool finalMerge);
546 static void batchmemtuples(Tuplesortstate *state);
547 static void mergebatch(Tuplesortstate *state, int64 spacePerTape);
548 static void mergebatchone(Tuplesortstate *state, int srcTape,
549 SortTuple *stup, bool *should_free);
550 static void mergebatchfreetape(Tuplesortstate *state, int srcTape,
551 SortTuple *rtup, bool *should_free);
552 static void *mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen);
553 static void mergepreread(Tuplesortstate *state);
554 static void mergeprereadone(Tuplesortstate *state, int srcTape);
555 static void dumptuples(Tuplesortstate *state, bool alltuples);
556 static void dumpbatch(Tuplesortstate *state, bool alltuples);
557 static void make_bounded_heap(Tuplesortstate *state);
558 static void sort_bounded_heap(Tuplesortstate *state);
559 static void tuplesort_sort_memtuples(Tuplesortstate *state);
560 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
561 int tupleindex, bool checkIndex);
562 static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
563 static void reversedirection(Tuplesortstate *state);
564 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
565 static void markrunend(Tuplesortstate *state, int tapenum);
566 static void *readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen);
567 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
568 Tuplesortstate *state);
569 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
570 static void writetup_heap(Tuplesortstate *state, int tapenum,
572 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
573 int tapenum, unsigned int len);
574 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
575 Tuplesortstate *state);
576 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
577 static void writetup_cluster(Tuplesortstate *state, int tapenum,
579 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
580 int tapenum, unsigned int len);
581 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
582 Tuplesortstate *state);
583 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
584 Tuplesortstate *state);
585 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
586 static void writetup_index(Tuplesortstate *state, int tapenum,
588 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
589 int tapenum, unsigned int len);
590 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
591 Tuplesortstate *state);
592 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
593 static void writetup_datum(Tuplesortstate *state, int tapenum,
595 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
596 int tapenum, unsigned int len);
597 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
600 * Special versions of qsort just for SortTuple objects. qsort_tuple() sorts
601 * any variant of SortTuples, using the appropriate comparetup function.
602 * qsort_ssup() is specialized for the case where the comparetup function
603 * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
606 #include "qsort_tuple.c"
610 * tuplesort_begin_xxx
612 * Initialize for a tuple sort operation.
614 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
615 * zero or more times, then call tuplesort_performsort when all the tuples
616 * have been supplied. After performsort, retrieve the tuples in sorted
617 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
618 * access was requested, rescan, markpos, and restorepos can also be called.)
619 * Call tuplesort_end to terminate the operation and release memory/disk space.
621 * Each variant of tuplesort_begin has a workMem parameter specifying the
622 * maximum number of kilobytes of RAM to use before spilling data to disk.
623 * (The normal value of this parameter is work_mem, but some callers use
624 * other values.) Each variant also has a randomAccess parameter specifying
625 * whether the caller needs non-sequential access to the sort result.
628 static Tuplesortstate *
629 tuplesort_begin_common(int workMem, bool randomAccess)
631 Tuplesortstate *state;
632 MemoryContext sortcontext;
633 MemoryContext tuplecontext;
634 MemoryContext oldcontext;
637 * Create a working memory context for this sort operation. All data
638 * needed by the sort will live inside this context.
640 sortcontext = AllocSetContextCreate(CurrentMemoryContext,
642 ALLOCSET_DEFAULT_MINSIZE,
643 ALLOCSET_DEFAULT_INITSIZE,
644 ALLOCSET_DEFAULT_MAXSIZE);
647 * Caller tuple (e.g. IndexTuple) memory context.
649 * A dedicated child content used exclusively for caller passed tuples
650 * eases memory management. Resetting at key points reduces fragmentation.
651 * Note that the memtuples array of SortTuples is allocated in the parent
652 * context, not this context, because there is no need to free memtuples
655 tuplecontext = AllocSetContextCreate(sortcontext,
657 ALLOCSET_DEFAULT_MINSIZE,
658 ALLOCSET_DEFAULT_INITSIZE,
659 ALLOCSET_DEFAULT_MAXSIZE);
662 * Make the Tuplesortstate within the per-sort context. This way, we
663 * don't need a separate pfree() operation for it at shutdown.
665 oldcontext = MemoryContextSwitchTo(sortcontext);
667 state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
671 pg_rusage_init(&state->ru_start);
674 state->status = TSS_INITIAL;
675 state->randomAccess = randomAccess;
676 state->bounded = false;
677 state->tuples = true;
678 state->boundUsed = false;
679 state->allowedMem = workMem * (int64) 1024;
680 state->availMem = state->allowedMem;
681 state->sortcontext = sortcontext;
682 state->tuplecontext = tuplecontext;
683 state->tapeset = NULL;
685 state->memtupcount = 0;
688 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
689 * see comments in grow_memtuples().
691 state->memtupsize = Max(1024,
692 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
694 state->growmemtuples = true;
695 state->batchUsed = false;
696 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
698 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
700 /* workMem must be large enough for the minimal memtuples array */
702 elog(ERROR, "insufficient memory allowed for sort");
704 state->currentRun = RUN_FIRST;
707 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
708 * inittapes(), if needed
711 state->result_tape = -1; /* flag that result tape has not been formed */
713 MemoryContextSwitchTo(oldcontext);
719 tuplesort_begin_heap(TupleDesc tupDesc,
720 int nkeys, AttrNumber *attNums,
721 Oid *sortOperators, Oid *sortCollations,
722 bool *nullsFirstFlags,
723 int workMem, bool randomAccess)
725 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
726 MemoryContext oldcontext;
729 oldcontext = MemoryContextSwitchTo(state->sortcontext);
731 AssertArg(nkeys > 0);
736 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
737 nkeys, workMem, randomAccess ? 't' : 'f');
740 state->nKeys = nkeys;
742 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
743 false, /* no unique check */
748 state->comparetup = comparetup_heap;
749 state->copytup = copytup_heap;
750 state->writetup = writetup_heap;
751 state->readtup = readtup_heap;
753 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
754 state->abbrevNext = 10;
756 /* Prepare SortSupport data for each column */
757 state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
759 for (i = 0; i < nkeys; i++)
761 SortSupport sortKey = state->sortKeys + i;
763 AssertArg(attNums[i] != 0);
764 AssertArg(sortOperators[i] != 0);
766 sortKey->ssup_cxt = CurrentMemoryContext;
767 sortKey->ssup_collation = sortCollations[i];
768 sortKey->ssup_nulls_first = nullsFirstFlags[i];
769 sortKey->ssup_attno = attNums[i];
770 /* Convey if abbreviation optimization is applicable in principle */
771 sortKey->abbreviate = (i == 0);
773 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
777 * The "onlyKey" optimization cannot be used with abbreviated keys, since
778 * tie-breaker comparisons may be required. Typically, the optimization
779 * is only of value to pass-by-value types anyway, whereas abbreviated
780 * keys are typically only of value to pass-by-reference types.
782 if (nkeys == 1 && !state->sortKeys->abbrev_converter)
783 state->onlyKey = state->sortKeys;
785 MemoryContextSwitchTo(oldcontext);
791 tuplesort_begin_cluster(TupleDesc tupDesc,
793 int workMem, bool randomAccess)
795 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
796 ScanKey indexScanKey;
797 MemoryContext oldcontext;
800 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
802 oldcontext = MemoryContextSwitchTo(state->sortcontext);
807 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
808 RelationGetNumberOfAttributes(indexRel),
809 workMem, randomAccess ? 't' : 'f');
812 state->nKeys = RelationGetNumberOfAttributes(indexRel);
814 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
815 false, /* no unique check */
820 state->comparetup = comparetup_cluster;
821 state->copytup = copytup_cluster;
822 state->writetup = writetup_cluster;
823 state->readtup = readtup_cluster;
824 state->abbrevNext = 10;
826 state->indexInfo = BuildIndexInfo(indexRel);
828 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
830 indexScanKey = _bt_mkscankey_nodata(indexRel);
832 if (state->indexInfo->ii_Expressions != NULL)
834 TupleTableSlot *slot;
835 ExprContext *econtext;
838 * We will need to use FormIndexDatum to evaluate the index
839 * expressions. To do that, we need an EState, as well as a
840 * TupleTableSlot to put the table tuples into. The econtext's
841 * scantuple has to point to that slot, too.
843 state->estate = CreateExecutorState();
844 slot = MakeSingleTupleTableSlot(tupDesc);
845 econtext = GetPerTupleExprContext(state->estate);
846 econtext->ecxt_scantuple = slot;
849 /* Prepare SortSupport data for each column */
850 state->sortKeys = (SortSupport) palloc0(state->nKeys *
851 sizeof(SortSupportData));
853 for (i = 0; i < state->nKeys; i++)
855 SortSupport sortKey = state->sortKeys + i;
856 ScanKey scanKey = indexScanKey + i;
859 sortKey->ssup_cxt = CurrentMemoryContext;
860 sortKey->ssup_collation = scanKey->sk_collation;
861 sortKey->ssup_nulls_first =
862 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
863 sortKey->ssup_attno = scanKey->sk_attno;
864 /* Convey if abbreviation optimization is applicable in principle */
865 sortKey->abbreviate = (i == 0);
867 AssertState(sortKey->ssup_attno != 0);
869 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
870 BTGreaterStrategyNumber : BTLessStrategyNumber;
872 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
875 _bt_freeskey(indexScanKey);
877 MemoryContextSwitchTo(oldcontext);
883 tuplesort_begin_index_btree(Relation heapRel,
886 int workMem, bool randomAccess)
888 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
889 ScanKey indexScanKey;
890 MemoryContext oldcontext;
893 oldcontext = MemoryContextSwitchTo(state->sortcontext);
898 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
899 enforceUnique ? 't' : 'f',
900 workMem, randomAccess ? 't' : 'f');
903 state->nKeys = RelationGetNumberOfAttributes(indexRel);
905 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
911 state->comparetup = comparetup_index_btree;
912 state->copytup = copytup_index;
913 state->writetup = writetup_index;
914 state->readtup = readtup_index;
915 state->abbrevNext = 10;
917 state->heapRel = heapRel;
918 state->indexRel = indexRel;
919 state->enforceUnique = enforceUnique;
921 indexScanKey = _bt_mkscankey_nodata(indexRel);
922 state->nKeys = RelationGetNumberOfAttributes(indexRel);
924 /* Prepare SortSupport data for each column */
925 state->sortKeys = (SortSupport) palloc0(state->nKeys *
926 sizeof(SortSupportData));
928 for (i = 0; i < state->nKeys; i++)
930 SortSupport sortKey = state->sortKeys + i;
931 ScanKey scanKey = indexScanKey + i;
934 sortKey->ssup_cxt = CurrentMemoryContext;
935 sortKey->ssup_collation = scanKey->sk_collation;
936 sortKey->ssup_nulls_first =
937 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
938 sortKey->ssup_attno = scanKey->sk_attno;
939 /* Convey if abbreviation optimization is applicable in principle */
940 sortKey->abbreviate = (i == 0);
942 AssertState(sortKey->ssup_attno != 0);
944 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
945 BTGreaterStrategyNumber : BTLessStrategyNumber;
947 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
950 _bt_freeskey(indexScanKey);
952 MemoryContextSwitchTo(oldcontext);
958 tuplesort_begin_index_hash(Relation heapRel,
961 int workMem, bool randomAccess)
963 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
964 MemoryContext oldcontext;
966 oldcontext = MemoryContextSwitchTo(state->sortcontext);
971 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
973 workMem, randomAccess ? 't' : 'f');
976 state->nKeys = 1; /* Only one sort column, the hash code */
978 state->comparetup = comparetup_index_hash;
979 state->copytup = copytup_index;
980 state->writetup = writetup_index;
981 state->readtup = readtup_index;
983 state->heapRel = heapRel;
984 state->indexRel = indexRel;
986 state->hash_mask = hash_mask;
988 MemoryContextSwitchTo(oldcontext);
994 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
996 int workMem, bool randomAccess)
998 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
999 MemoryContext oldcontext;
1003 oldcontext = MemoryContextSwitchTo(state->sortcontext);
1008 "begin datum sort: workMem = %d, randomAccess = %c",
1009 workMem, randomAccess ? 't' : 'f');
1012 state->nKeys = 1; /* always a one-column sort */
1014 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
1015 false, /* no unique check */
1020 state->comparetup = comparetup_datum;
1021 state->copytup = copytup_datum;
1022 state->writetup = writetup_datum;
1023 state->readtup = readtup_datum;
1024 state->abbrevNext = 10;
1026 state->datumType = datumType;
1028 /* lookup necessary attributes of the datum type */
1029 get_typlenbyval(datumType, &typlen, &typbyval);
1030 state->datumTypeLen = typlen;
1031 state->tuples = !typbyval;
1033 /* Prepare SortSupport data */
1034 state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
1036 state->sortKeys->ssup_cxt = CurrentMemoryContext;
1037 state->sortKeys->ssup_collation = sortCollation;
1038 state->sortKeys->ssup_nulls_first = nullsFirstFlag;
1041 * Abbreviation is possible here only for by-reference types. In theory,
1042 * a pass-by-value datatype could have an abbreviated form that is cheaper
1043 * to compare. In a tuple sort, we could support that, because we can
1044 * always extract the original datum from the tuple is needed. Here, we
1045 * can't, because a datum sort only stores a single copy of the datum;
1046 * the "tuple" field of each sortTuple is NULL.
1048 state->sortKeys->abbreviate = !typbyval;
1050 PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
1053 * The "onlyKey" optimization cannot be used with abbreviated keys, since
1054 * tie-breaker comparisons may be required. Typically, the optimization
1055 * is only of value to pass-by-value types anyway, whereas abbreviated
1056 * keys are typically only of value to pass-by-reference types.
1058 if (!state->sortKeys->abbrev_converter)
1059 state->onlyKey = state->sortKeys;
1061 MemoryContextSwitchTo(oldcontext);
1067 * tuplesort_set_bound
1069 * Advise tuplesort that at most the first N result tuples are required.
1071 * Must be called before inserting any tuples. (Actually, we could allow it
1072 * as long as the sort hasn't spilled to disk, but there seems no need for
1073 * delayed calls at the moment.)
1075 * This is a hint only. The tuplesort may still return more tuples than
1079 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
1081 /* Assert we're called before loading any tuples */
1082 Assert(state->status == TSS_INITIAL);
1083 Assert(state->memtupcount == 0);
1084 Assert(!state->bounded);
1086 #ifdef DEBUG_BOUNDED_SORT
1087 /* Honor GUC setting that disables the feature (for easy testing) */
1088 if (!optimize_bounded_sort)
1092 /* We want to be able to compute bound * 2, so limit the setting */
1093 if (bound > (int64) (INT_MAX / 2))
1096 state->bounded = true;
1097 state->bound = (int) bound;
1100 * Bounded sorts are not an effective target for abbreviated key
1101 * optimization. Disable by setting state to be consistent with no
1102 * abbreviation support.
1104 state->sortKeys->abbrev_converter = NULL;
1105 if (state->sortKeys->abbrev_full_comparator)
1106 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
1108 /* Not strictly necessary, but be tidy */
1109 state->sortKeys->abbrev_abort = NULL;
1110 state->sortKeys->abbrev_full_comparator = NULL;
1116 * Release resources and clean up.
1118 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1119 * pointing to garbage. Be careful not to attempt to use or free such
1120 * pointers afterwards!
1123 tuplesort_end(Tuplesortstate *state)
1125 /* context swap probably not needed, but let's be safe */
1126 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1132 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1134 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1138 * Delete temporary "tape" files, if any.
1140 * Note: want to include this in reported total cost of sort, hence need
1141 * for two #ifdef TRACE_SORT sections.
1144 LogicalTapeSetClose(state->tapeset);
1150 elog(LOG, "external sort ended, %ld disk blocks used: %s",
1151 spaceUsed, pg_rusage_show(&state->ru_start));
1153 elog(LOG, "internal sort ended, %ld KB used: %s",
1154 spaceUsed, pg_rusage_show(&state->ru_start));
1157 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1161 * If you disabled TRACE_SORT, you can still probe sort__done, but you
1162 * ain't getting space-used stats.
1164 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1167 /* Free any execution state created for CLUSTER case */
1168 if (state->estate != NULL)
1170 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1172 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1173 FreeExecutorState(state->estate);
1176 MemoryContextSwitchTo(oldcontext);
1179 * Free the per-sort memory context, thereby releasing all working memory,
1180 * including the Tuplesortstate struct itself.
1182 MemoryContextDelete(state->sortcontext);
1186 * Grow the memtuples[] array, if possible within our memory constraint. We
1187 * must not exceed INT_MAX tuples in memory or the caller-provided memory
1188 * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
1190 * Normally, at each increment we double the size of the array. When doing
1191 * that would exceed a limit, we attempt one last, smaller increase (and then
1192 * clear the growmemtuples flag so we don't try any more). That allows us to
1193 * use memory as fully as permitted; sticking to the pure doubling rule could
1194 * result in almost half going unused. Because availMem moves around with
1195 * tuple addition/removal, we need some rule to prevent making repeated small
1196 * increases in memtupsize, which would just be useless thrashing. The
1197 * growmemtuples flag accomplishes that and also prevents useless
1198 * recalculations in this function.
1201 grow_memtuples(Tuplesortstate *state)
1204 int memtupsize = state->memtupsize;
1205 int64 memNowUsed = state->allowedMem - state->availMem;
1207 /* Forget it if we've already maxed out memtuples, per comment above */
1208 if (!state->growmemtuples)
1211 /* Select new value of memtupsize */
1212 if (memNowUsed <= state->availMem)
1215 * We've used no more than half of allowedMem; double our usage,
1216 * clamping at INT_MAX tuples.
1218 if (memtupsize < INT_MAX / 2)
1219 newmemtupsize = memtupsize * 2;
1222 newmemtupsize = INT_MAX;
1223 state->growmemtuples = false;
1229 * This will be the last increment of memtupsize. Abandon doubling
1230 * strategy and instead increase as much as we safely can.
1232 * To stay within allowedMem, we can't increase memtupsize by more
1233 * than availMem / sizeof(SortTuple) elements. In practice, we want
1234 * to increase it by considerably less, because we need to leave some
1235 * space for the tuples to which the new array slots will refer. We
1236 * assume the new tuples will be about the same size as the tuples
1237 * we've already seen, and thus we can extrapolate from the space
1238 * consumption so far to estimate an appropriate new size for the
1239 * memtuples array. The optimal value might be higher or lower than
1240 * this estimate, but it's hard to know that in advance. We again
1241 * clamp at INT_MAX tuples.
1243 * This calculation is safe against enlarging the array so much that
1244 * LACKMEM becomes true, because the memory currently used includes
1245 * the present array; thus, there would be enough allowedMem for the
1246 * new array elements even if no other memory were currently used.
1248 * We do the arithmetic in float8, because otherwise the product of
1249 * memtupsize and allowedMem could overflow. Any inaccuracy in the
1250 * result should be insignificant; but even if we computed a
1251 * completely insane result, the checks below will prevent anything
1252 * really bad from happening.
1256 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1257 if (memtupsize * grow_ratio < INT_MAX)
1258 newmemtupsize = (int) (memtupsize * grow_ratio);
1260 newmemtupsize = INT_MAX;
1262 /* We won't make any further enlargement attempts */
1263 state->growmemtuples = false;
1266 /* Must enlarge array by at least one element, else report failure */
1267 if (newmemtupsize <= memtupsize)
1271 * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
1272 * to ensure our request won't be rejected. Note that we can easily
1273 * exhaust address space before facing this outcome. (This is presently
1274 * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1275 * don't rely on that at this distance.)
1277 if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1279 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1280 state->growmemtuples = false; /* can't grow any more */
1284 * We need to be sure that we do not cause LACKMEM to become true, else
1285 * the space management algorithm will go nuts. The code above should
1286 * never generate a dangerous request, but to be safe, check explicitly
1287 * that the array growth fits within availMem. (We could still cause
1288 * LACKMEM if the memory chunk overhead associated with the memtuples
1289 * array were to increase. That shouldn't happen because we chose the
1290 * initial array size large enough to ensure that palloc will be treating
1291 * both old and new arrays as separate chunks. But we'll check LACKMEM
1292 * explicitly below just in case.)
1294 if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1298 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1299 state->memtupsize = newmemtupsize;
1300 state->memtuples = (SortTuple *)
1301 repalloc_huge(state->memtuples,
1302 state->memtupsize * sizeof(SortTuple));
1303 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1305 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1309 /* If for any reason we didn't realloc, shut off future attempts */
1310 state->growmemtuples = false;
1315 * Accept one tuple while collecting input data for sort.
1317 * Note that the input data is always copied; the caller need not save it.
1320 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1322 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1326 * Copy the given tuple into memory we control, and decrease availMem.
1327 * Then call the common code.
1329 COPYTUP(state, &stup, (void *) slot);
1331 puttuple_common(state, &stup);
1333 MemoryContextSwitchTo(oldcontext);
1337 * Accept one tuple while collecting input data for sort.
1339 * Note that the input data is always copied; the caller need not save it.
1342 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1344 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1348 * Copy the given tuple into memory we control, and decrease availMem.
1349 * Then call the common code.
1351 COPYTUP(state, &stup, (void *) tup);
1353 puttuple_common(state, &stup);
1355 MemoryContextSwitchTo(oldcontext);
1359 * Collect one index tuple while collecting input data for sort, building
1360 * it from caller-supplied values.
1363 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1364 ItemPointer self, Datum *values,
1367 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1372 stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1373 tuple = ((IndexTuple) stup.tuple);
1374 tuple->t_tid = *self;
1375 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1376 /* set up first-column key value */
1377 original = index_getattr(tuple,
1379 RelationGetDescr(state->indexRel),
1382 MemoryContextSwitchTo(state->sortcontext);
1384 if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1387 * Store ordinary Datum representation, or NULL value. If there is a
1388 * converter it won't expect NULL values, and cost model is not
1389 * required to account for NULL, so in that case we avoid calling
1390 * converter and just set datum1 to zeroed representation (to be
1391 * consistent, and to support cheap inequality tests for NULL
1392 * abbreviated keys).
1394 stup.datum1 = original;
1396 else if (!consider_abort_common(state))
1398 /* Store abbreviated key representation */
1399 stup.datum1 = state->sortKeys->abbrev_converter(original,
1404 /* Abort abbreviation */
1407 stup.datum1 = original;
1410 * Set state to be consistent with never trying abbreviation.
1412 * Alter datum1 representation in already-copied tuples, so as to
1413 * ensure a consistent representation (current tuple was just
1414 * handled). It does not matter if some dumped tuples are already
1415 * sorted on tape, since serialized tuples lack abbreviated keys
1416 * (TSS_BUILDRUNS state prevents control reaching here in any
1419 for (i = 0; i < state->memtupcount; i++)
1421 SortTuple *mtup = &state->memtuples[i];
1423 tuple = mtup->tuple;
1424 mtup->datum1 = index_getattr(tuple,
1426 RelationGetDescr(state->indexRel),
1431 puttuple_common(state, &stup);
1433 MemoryContextSwitchTo(oldcontext);
1437 * Accept one Datum while collecting input data for sort.
1439 * If the Datum is pass-by-ref type, the value will be copied.
1442 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1444 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1448 * Pass-by-value types or null values are just stored directly in
1449 * stup.datum1 (and stup.tuple is not used and set to NULL).
1451 * Non-null pass-by-reference values need to be copied into memory we
1452 * control, and possibly abbreviated. The copied value is pointed to by
1453 * stup.tuple and is treated as the canonical copy (e.g. to return via
1454 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1455 * abbreviated value if abbreviation is happening, otherwise it's
1456 * identical to stup.tuple.
1459 if (isNull || !state->tuples)
1462 * Set datum1 to zeroed representation for NULLs (to be consistent, and
1463 * to support cheap inequality tests for NULL abbreviated keys).
1465 stup.datum1 = !isNull ? val : (Datum) 0;
1466 stup.isnull1 = isNull;
1467 stup.tuple = NULL; /* no separate storage */
1468 MemoryContextSwitchTo(state->sortcontext);
1472 Datum original = datumCopy(val, false, state->datumTypeLen);
1474 stup.isnull1 = false;
1475 stup.tuple = DatumGetPointer(original);
1476 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1477 MemoryContextSwitchTo(state->sortcontext);
1479 if (!state->sortKeys->abbrev_converter)
1481 stup.datum1 = original;
1483 else if (!consider_abort_common(state))
1485 /* Store abbreviated key representation */
1486 stup.datum1 = state->sortKeys->abbrev_converter(original,
1491 /* Abort abbreviation */
1494 stup.datum1 = original;
1497 * Set state to be consistent with never trying abbreviation.
1499 * Alter datum1 representation in already-copied tuples, so as to
1500 * ensure a consistent representation (current tuple was just
1501 * handled). It does not matter if some dumped tuples are
1502 * already sorted on tape, since serialized tuples lack
1503 * abbreviated keys (TSS_BUILDRUNS state prevents control
1504 * reaching here in any case).
1506 for (i = 0; i < state->memtupcount; i++)
1508 SortTuple *mtup = &state->memtuples[i];
1510 mtup->datum1 = PointerGetDatum(mtup->tuple);
1515 puttuple_common(state, &stup);
1517 MemoryContextSwitchTo(oldcontext);
1521 * Shared code for tuple and datum cases.
1524 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1526 switch (state->status)
1531 * Save the tuple into the unsorted array. First, grow the array
1532 * as needed. Note that we try to grow the array when there is
1533 * still one free slot remaining --- if we fail, there'll still be
1534 * room to store the incoming tuple, and then we'll switch to
1535 * tape-based operation.
1537 if (state->memtupcount >= state->memtupsize - 1)
1539 (void) grow_memtuples(state);
1540 Assert(state->memtupcount < state->memtupsize);
1542 state->memtuples[state->memtupcount++] = *tuple;
1545 * Check if it's time to switch over to a bounded heapsort. We do
1546 * so if the input tuple count exceeds twice the desired tuple
1547 * count (this is a heuristic for where heapsort becomes cheaper
1548 * than a quicksort), or if we've just filled workMem and have
1549 * enough tuples to meet the bound.
1551 * Note that once we enter TSS_BOUNDED state we will always try to
1552 * complete the sort that way. In the worst case, if later input
1553 * tuples are larger than earlier ones, this might cause us to
1554 * exceed workMem significantly.
1556 if (state->bounded &&
1557 (state->memtupcount > state->bound * 2 ||
1558 (state->memtupcount > state->bound && LACKMEM(state))))
1562 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1564 pg_rusage_show(&state->ru_start));
1566 make_bounded_heap(state);
1571 * Done if we still fit in available memory and have array slots.
1573 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1577 * Nope; time to switch to tape-based operation.
1582 * Dump tuples until we are back under the limit.
1584 dumptuples(state, false);
1590 * We don't want to grow the array here, so check whether the new
1591 * tuple can be discarded before putting it in. This should be a
1592 * good speed optimization, too, since when there are many more
1593 * input tuples than the bound, most input tuples can be discarded
1594 * with just this one comparison. Note that because we currently
1595 * have the sort direction reversed, we must check for <= not >=.
1597 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1599 /* new tuple <= top of the heap, so we can discard it */
1600 free_sort_tuple(state, tuple);
1601 CHECK_FOR_INTERRUPTS();
1605 /* discard top of heap, sift up, insert new tuple */
1606 free_sort_tuple(state, &state->memtuples[0]);
1607 tuplesort_heap_siftup(state, false);
1608 tuplesort_heap_insert(state, tuple, 0, false);
1615 * Insert the tuple into the heap, with run number currentRun if
1616 * it can go into the current run, else HEAP_RUN_NEXT. The tuple
1617 * can go into the current run if it is >= the first
1618 * not-yet-output tuple. (Actually, it could go into the current
1619 * run if it is >= the most recently output tuple ... but that
1620 * would require keeping around the tuple we last output, and it's
1621 * simplest to let writetup free each tuple as soon as it's
1624 * Note that this only applies when:
1626 * - currentRun is RUN_FIRST
1628 * - Replacement selection is in use (typically it is never used).
1630 * When these two conditions are not both true, all tuples are
1631 * appended indifferently, much like the TSS_INITIAL case.
1633 * There should always be room to store the incoming tuple.
1635 Assert(!state->replaceActive || state->memtupcount > 0);
1636 if (state->replaceActive &&
1637 COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1639 Assert(state->currentRun == RUN_FIRST);
1642 * Insert tuple into first, fully heapified run.
1644 * Unlike classic replacement selection, which this module was
1645 * previously based on, only RUN_FIRST tuples are fully
1646 * heapified. Any second/next run tuples are appended
1647 * indifferently. While HEAP_RUN_NEXT tuples may be sifted
1648 * out of the way of first run tuples, COMPARETUP() will never
1649 * be called for the run's tuples during sifting (only our
1650 * initial COMPARETUP() call is required for the tuple, to
1651 * determine that the tuple does not belong in RUN_FIRST).
1653 tuplesort_heap_insert(state, tuple, state->currentRun, true);
1658 * Tuple was determined to not belong to heapified RUN_FIRST,
1659 * or replacement selection not in play. Append the tuple to
1660 * memtuples indifferently.
1662 * dumptuples() does not trust that the next run's tuples are
1663 * heapified. Anything past the first run will always be
1664 * quicksorted even when replacement selection is initially
1665 * used. (When it's never used, every tuple still takes this
1668 tuple->tupindex = HEAP_RUN_NEXT;
1669 state->memtuples[state->memtupcount++] = *tuple;
1673 * If we are over the memory limit, dump tuples till we're under.
1675 dumptuples(state, false);
1679 elog(ERROR, "invalid tuplesort state");
1685 consider_abort_common(Tuplesortstate *state)
1687 Assert(state->sortKeys[0].abbrev_converter != NULL);
1688 Assert(state->sortKeys[0].abbrev_abort != NULL);
1689 Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1692 * Check effectiveness of abbreviation optimization. Consider aborting
1693 * when still within memory limit.
1695 if (state->status == TSS_INITIAL &&
1696 state->memtupcount >= state->abbrevNext)
1698 state->abbrevNext *= 2;
1701 * Check opclass-supplied abbreviation abort routine. It may indicate
1702 * that abbreviation should not proceed.
1704 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1709 * Finally, restore authoritative comparator, and indicate that
1710 * abbreviation is not in play by setting abbrev_converter to NULL
1712 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1713 state->sortKeys[0].abbrev_converter = NULL;
1714 /* Not strictly necessary, but be tidy */
1715 state->sortKeys[0].abbrev_abort = NULL;
1716 state->sortKeys[0].abbrev_full_comparator = NULL;
1718 /* Give up - expect original pass-by-value representation */
1726 * All tuples have been provided; finish the sort.
1729 tuplesort_performsort(Tuplesortstate *state)
1731 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1735 elog(LOG, "performsort starting: %s",
1736 pg_rusage_show(&state->ru_start));
1739 switch (state->status)
1744 * We were able to accumulate all the tuples within the allowed
1745 * amount of memory. Just qsort 'em and we're done.
1747 tuplesort_sort_memtuples(state);
1749 state->eof_reached = false;
1750 state->markpos_offset = 0;
1751 state->markpos_eof = false;
1752 state->status = TSS_SORTEDINMEM;
1758 * We were able to accumulate all the tuples required for output
1759 * in memory, using a heap to eliminate excess tuples. Now we
1760 * have to transform the heap to a properly-sorted array.
1762 sort_bounded_heap(state);
1764 state->eof_reached = false;
1765 state->markpos_offset = 0;
1766 state->markpos_eof = false;
1767 state->status = TSS_SORTEDINMEM;
1773 * Finish tape-based sort. First, flush all tuples remaining in
1774 * memory out to tape; then merge until we have a single remaining
1775 * run (or, if !randomAccess, one run per tape). Note that
1776 * mergeruns sets the correct state->status.
1778 dumptuples(state, true);
1780 state->eof_reached = false;
1781 state->markpos_block = 0L;
1782 state->markpos_offset = 0;
1783 state->markpos_eof = false;
1787 elog(ERROR, "invalid tuplesort state");
1794 if (state->status == TSS_FINALMERGE)
1795 elog(LOG, "performsort done (except %d-way final merge): %s",
1797 pg_rusage_show(&state->ru_start));
1799 elog(LOG, "performsort done: %s",
1800 pg_rusage_show(&state->ru_start));
1804 MemoryContextSwitchTo(oldcontext);
1808 * Internal routine to fetch the next tuple in either forward or back
1809 * direction into *stup. Returns FALSE if no more tuples.
1810 * If *should_free is set, the caller must pfree stup.tuple when done with it.
1811 * Otherwise, caller should not use tuple following next call here.
1814 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1815 SortTuple *stup, bool *should_free)
1817 unsigned int tuplen;
1819 switch (state->status)
1821 case TSS_SORTEDINMEM:
1822 Assert(forward || state->randomAccess);
1823 Assert(!state->batchUsed);
1824 *should_free = false;
1827 if (state->current < state->memtupcount)
1829 *stup = state->memtuples[state->current++];
1832 state->eof_reached = true;
1835 * Complain if caller tries to retrieve more tuples than
1836 * originally asked for in a bounded sort. This is because
1837 * returning EOF here might be the wrong thing.
1839 if (state->bounded && state->current >= state->bound)
1840 elog(ERROR, "retrieved too many tuples in a bounded sort");
1846 if (state->current <= 0)
1850 * if all tuples are fetched already then we return last
1851 * tuple, else - tuple before last returned.
1853 if (state->eof_reached)
1854 state->eof_reached = false;
1857 state->current--; /* last returned tuple */
1858 if (state->current <= 0)
1861 *stup = state->memtuples[state->current - 1];
1866 case TSS_SORTEDONTAPE:
1867 Assert(forward || state->randomAccess);
1868 Assert(!state->batchUsed);
1869 *should_free = true;
1872 if (state->eof_reached)
1874 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1876 READTUP(state, stup, state->result_tape, tuplen);
1881 state->eof_reached = true;
1889 * if all tuples are fetched already then we return last tuple,
1890 * else - tuple before last returned.
1892 if (state->eof_reached)
1895 * Seek position is pointing just past the zero tuplen at the
1896 * end of file; back up to fetch last tuple's ending length
1897 * word. If seek fails we must have a completely empty file.
1899 if (!LogicalTapeBackspace(state->tapeset,
1901 2 * sizeof(unsigned int)))
1903 state->eof_reached = false;
1908 * Back up and fetch previously-returned tuple's ending length
1909 * word. If seek fails, assume we are at start of file.
1911 if (!LogicalTapeBackspace(state->tapeset,
1913 sizeof(unsigned int)))
1915 tuplen = getlen(state, state->result_tape, false);
1918 * Back up to get ending length word of tuple before it.
1920 if (!LogicalTapeBackspace(state->tapeset,
1922 tuplen + 2 * sizeof(unsigned int)))
1925 * If that fails, presumably the prev tuple is the first
1926 * in the file. Back up so that it becomes next to read
1927 * in forward direction (not obviously right, but that is
1928 * what in-memory case does).
1930 if (!LogicalTapeBackspace(state->tapeset,
1932 tuplen + sizeof(unsigned int)))
1933 elog(ERROR, "bogus tuple length in backward scan");
1938 tuplen = getlen(state, state->result_tape, false);
1941 * Now we have the length of the prior tuple, back up and read it.
1942 * Note: READTUP expects we are positioned after the initial
1943 * length word of the tuple, so back up to that point.
1945 if (!LogicalTapeBackspace(state->tapeset,
1948 elog(ERROR, "bogus tuple length in backward scan");
1949 READTUP(state, stup, state->result_tape, tuplen);
1952 case TSS_FINALMERGE:
1954 Assert(state->batchUsed || !state->tuples);
1955 /* For now, assume tuple is stored in tape's batch memory */
1956 *should_free = false;
1959 * This code should match the inner loop of mergeonerun().
1961 if (state->memtupcount > 0)
1963 int srcTape = state->memtuples[0].tupindex;
1968 * Returned tuple is still counted in our memory space most
1969 * of the time. See mergebatchone() for discussion of why
1970 * caller may occasionally be required to free returned
1971 * tuple, and how preread memory is managed with regard to
1972 * edge cases more generally.
1974 *stup = state->memtuples[0];
1975 tuplesort_heap_siftup(state, false);
1976 if ((tupIndex = state->mergenext[srcTape]) == 0)
1979 * out of preloaded data on this tape, try to read more
1981 * Unlike mergeonerun(), we only preload from the single
1982 * tape that's run dry, though not before preparing its
1983 * batch memory for a new round of sequential consumption.
1984 * See mergepreread() comments.
1986 if (state->batchUsed)
1987 mergebatchone(state, srcTape, stup, should_free);
1989 mergeprereadone(state, srcTape);
1992 * if still no data, we've reached end of run on this tape
1994 if ((tupIndex = state->mergenext[srcTape]) == 0)
1996 /* Free tape's buffer, avoiding dangling pointer */
1997 if (state->batchUsed)
1998 mergebatchfreetape(state, srcTape, stup, should_free);
2002 /* pull next preread tuple from list, insert in heap */
2003 newtup = &state->memtuples[tupIndex];
2004 state->mergenext[srcTape] = newtup->tupindex;
2005 if (state->mergenext[srcTape] == 0)
2006 state->mergelast[srcTape] = 0;
2007 tuplesort_heap_insert(state, newtup, srcTape, false);
2008 /* put the now-unused memtuples entry on the freelist */
2009 newtup->tupindex = state->mergefreelist;
2010 state->mergefreelist = tupIndex;
2011 state->mergeavailslots[srcTape]++;
2017 elog(ERROR, "invalid tuplesort state");
2018 return false; /* keep compiler quiet */
2023 * Fetch the next tuple in either forward or back direction.
2024 * If successful, put tuple in slot and return TRUE; else, clear the slot
2027 * Caller may optionally be passed back abbreviated value (on TRUE return
2028 * value) when abbreviation was used, which can be used to cheaply avoid
2029 * equality checks that might otherwise be required. Caller can safely make a
2030 * determination of "non-equal tuple" based on simple binary inequality. A
2031 * NULL value in leading attribute will set abbreviated value to zeroed
2032 * representation, which caller may rely on in abbreviated inequality check.
2035 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
2036 TupleTableSlot *slot, Datum *abbrev)
2038 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2042 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2045 MemoryContextSwitchTo(oldcontext);
2049 /* Record abbreviated key for caller */
2050 if (state->sortKeys->abbrev_converter && abbrev)
2051 *abbrev = stup.datum1;
2053 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
2058 ExecClearTuple(slot);
2064 * Fetch the next tuple in either forward or back direction.
2065 * Returns NULL if no more tuples. If *should_free is set, the
2066 * caller must pfree the returned tuple when done with it.
2067 * If it is not set, caller should not use tuple following next
2071 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
2073 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2076 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2079 MemoryContextSwitchTo(oldcontext);
2085 * Fetch the next index tuple in either forward or back direction.
2086 * Returns NULL if no more tuples. If *should_free is set, the
2087 * caller must pfree the returned tuple when done with it.
2088 * If it is not set, caller should not use tuple following next
2092 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
2095 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2098 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2101 MemoryContextSwitchTo(oldcontext);
2103 return (IndexTuple) stup.tuple;
2107 * Fetch the next Datum in either forward or back direction.
2108 * Returns FALSE if no more datums.
2110 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
2111 * and is now owned by the caller.
2113 * Caller may optionally be passed back abbreviated value (on TRUE return
2114 * value) when abbreviation was used, which can be used to cheaply avoid
2115 * equality checks that might otherwise be required. Caller can safely make a
2116 * determination of "non-equal tuple" based on simple binary inequality. A
2117 * NULL value will have a zeroed abbreviated value representation, which caller
2118 * may rely on in abbreviated inequality check.
2121 tuplesort_getdatum(Tuplesortstate *state, bool forward,
2122 Datum *val, bool *isNull, Datum *abbrev)
2124 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2128 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2130 MemoryContextSwitchTo(oldcontext);
2134 /* Record abbreviated key for caller */
2135 if (state->sortKeys->abbrev_converter && abbrev)
2136 *abbrev = stup.datum1;
2138 if (stup.isnull1 || !state->tuples)
2141 *isNull = stup.isnull1;
2145 /* use stup.tuple because stup.datum1 may be an abbreviation */
2148 *val = PointerGetDatum(stup.tuple);
2150 *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
2154 MemoryContextSwitchTo(oldcontext);
2160 * Advance over N tuples in either forward or back direction,
2161 * without returning any data. N==0 is a no-op.
2162 * Returns TRUE if successful, FALSE if ran out of tuples.
2165 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
2167 MemoryContext oldcontext;
2170 * We don't actually support backwards skip yet, because no callers need
2171 * it. The API is designed to allow for that later, though.
2174 Assert(ntuples >= 0);
2176 switch (state->status)
2178 case TSS_SORTEDINMEM:
2179 if (state->memtupcount - state->current >= ntuples)
2181 state->current += ntuples;
2184 state->current = state->memtupcount;
2185 state->eof_reached = true;
2188 * Complain if caller tries to retrieve more tuples than
2189 * originally asked for in a bounded sort. This is because
2190 * returning EOF here might be the wrong thing.
2192 if (state->bounded && state->current >= state->bound)
2193 elog(ERROR, "retrieved too many tuples in a bounded sort");
2197 case TSS_SORTEDONTAPE:
2198 case TSS_FINALMERGE:
2201 * We could probably optimize these cases better, but for now it's
2202 * not worth the trouble.
2204 oldcontext = MemoryContextSwitchTo(state->sortcontext);
2205 while (ntuples-- > 0)
2210 if (!tuplesort_gettuple_common(state, forward,
2211 &stup, &should_free))
2213 MemoryContextSwitchTo(oldcontext);
2216 if (should_free && stup.tuple)
2218 CHECK_FOR_INTERRUPTS();
2220 MemoryContextSwitchTo(oldcontext);
2224 elog(ERROR, "invalid tuplesort state");
2225 return false; /* keep compiler quiet */
2230 * tuplesort_merge_order - report merge order we'll use for given memory
2231 * (note: "merge order" just means the number of input tapes in the merge).
2233 * This is exported for use by the planner. allowedMem is in bytes.
2236 tuplesort_merge_order(int64 allowedMem)
2241 * We need one tape for each merge input, plus another one for the output,
2242 * and each of these tapes needs buffer space. In addition we want
2243 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2246 * Note: you might be thinking we need to account for the memtuples[]
2247 * array in this calculation, but we effectively treat that as part of the
2248 * MERGE_BUFFER_SIZE workspace.
2250 mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2251 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2253 /* Even in minimum memory, use at least a MINORDER merge */
2254 mOrder = Max(mOrder, MINORDER);
2260 * useselection - determine algorithm to use to sort first run.
2262 * It can sometimes be useful to use the replacement selection algorithm if it
2263 * results in one large run, and there is little available workMem. See
2264 * remarks on RUN_SECOND optimization within dumptuples().
2267 useselection(Tuplesortstate *state)
2270 * memtupsize might be noticeably higher than memtupcount here in atypical
2271 * cases. It seems slightly preferable to not allow recent outliers to
2272 * impact this determination. Note that caller's trace_sort output reports
2273 * memtupcount instead.
2275 if (state->memtupsize <= replacement_sort_tuples)
2282 * inittapes - initialize for tape sorting.
2284 * This is called only if we have found we don't have room to sort in memory.
2287 inittapes(Tuplesortstate *state)
2293 /* Compute number of tapes to use: merge order plus 1 */
2294 maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2297 * We must have at least 2*maxTapes slots in the memtuples[] array, else
2298 * we'd not have room for merge heap plus preread. It seems unlikely that
2299 * this case would ever occur, but be safe.
2301 maxTapes = Min(maxTapes, state->memtupsize / 2);
2303 state->maxTapes = maxTapes;
2304 state->tapeRange = maxTapes - 1;
2308 elog(LOG, "switching to external sort with %d tapes: %s",
2309 maxTapes, pg_rusage_show(&state->ru_start));
2313 * Decrease availMem to reflect the space needed for tape buffers; but
2314 * don't decrease it to the point that we have no room for tuples. (That
2315 * case is only likely to occur if sorting pass-by-value Datums; in all
2316 * other scenarios the memtuples[] array is unlikely to occupy more than
2317 * half of allowedMem. In the pass-by-value case it's not important to
2318 * account for tuple space, so we don't care if LACKMEM becomes
2321 tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2323 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2324 USEMEM(state, tapeSpace);
2327 * Make sure that the temp file(s) underlying the tape set are created in
2328 * suitable temp tablespaces.
2330 PrepareTempTablespaces();
2333 * Create the tape set and allocate the per-tape data arrays.
2335 state->tapeset = LogicalTapeSetCreate(maxTapes);
2337 state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2338 state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
2339 state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
2340 state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
2341 state->mergeavailmem = (int64 *) palloc0(maxTapes * sizeof(int64));
2342 state->mergetuples = (char **) palloc0(maxTapes * sizeof(char *));
2343 state->mergecurrent = (char **) palloc0(maxTapes * sizeof(char *));
2344 state->mergetail = (char **) palloc0(maxTapes * sizeof(char *));
2345 state->mergeoverflow = (char **) palloc0(maxTapes * sizeof(char *));
2346 state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2347 state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2348 state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2349 state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2352 * Give replacement selection a try based on user setting. There will
2353 * be a switch to a simple hybrid sort-merge strategy after the first
2354 * run (iff we could not output one long run).
2356 state->replaceActive = useselection(state);
2358 if (state->replaceActive)
2361 * Convert the unsorted contents of memtuples[] into a heap. Each
2362 * tuple is marked as belonging to run number zero.
2364 * NOTE: we pass false for checkIndex since there's no point in
2365 * comparing indexes in this step, even though we do intend the
2366 * indexes to be part of the sort key...
2368 int ntuples = state->memtupcount;
2372 elog(LOG, "replacement selection will sort %d first run tuples",
2373 state->memtupcount);
2375 state->memtupcount = 0; /* make the heap empty */
2377 for (j = 0; j < ntuples; j++)
2379 /* Must copy source tuple to avoid possible overwrite */
2380 SortTuple stup = state->memtuples[j];
2382 tuplesort_heap_insert(state, &stup, 0, false);
2384 Assert(state->memtupcount == ntuples);
2387 state->currentRun = RUN_FIRST;
2390 * Initialize variables of Algorithm D (step D1).
2392 for (j = 0; j < maxTapes; j++)
2394 state->tp_fib[j] = 1;
2395 state->tp_runs[j] = 0;
2396 state->tp_dummy[j] = 1;
2397 state->tp_tapenum[j] = j;
2399 state->tp_fib[state->tapeRange] = 0;
2400 state->tp_dummy[state->tapeRange] = 0;
2403 state->destTape = 0;
2405 state->status = TSS_BUILDRUNS;
2409 * selectnewtape -- select new tape for new initial run.
2411 * This is called after finishing a run when we know another run
2412 * must be started. This implements steps D3, D4 of Algorithm D.
2415 selectnewtape(Tuplesortstate *state)
2420 /* Step D3: advance j (destTape) */
2421 if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2426 if (state->tp_dummy[state->destTape] != 0)
2428 state->destTape = 0;
2432 /* Step D4: increase level */
2434 a = state->tp_fib[0];
2435 for (j = 0; j < state->tapeRange; j++)
2437 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2438 state->tp_fib[j] = a + state->tp_fib[j + 1];
2440 state->destTape = 0;
2444 * mergeruns -- merge all the completed initial runs.
2446 * This implements steps D5, D6 of Algorithm D. All input data has
2447 * already been written to initial runs on tape (see dumptuples).
2450 mergeruns(Tuplesortstate *state)
2457 Assert(state->status == TSS_BUILDRUNS);
2458 Assert(state->memtupcount == 0);
2460 if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2463 * If there are multiple runs to be merged, when we go to read back
2464 * tuples from disk, abbreviated keys will not have been stored, and
2465 * we don't care to regenerate them. Disable abbreviation from this
2468 state->sortKeys->abbrev_converter = NULL;
2469 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2471 /* Not strictly necessary, but be tidy */
2472 state->sortKeys->abbrev_abort = NULL;
2473 state->sortKeys->abbrev_full_comparator = NULL;
2477 * If we produced only one initial run (quite likely if the total data
2478 * volume is between 1X and 2X workMem when replacement selection is used,
2479 * but something we particular count on when input is presorted), we can
2480 * just use that tape as the finished output, rather than doing a useless
2481 * merge. (This obvious optimization is not in Knuth's algorithm.)
2483 if (state->currentRun == RUN_SECOND)
2485 state->result_tape = state->tp_tapenum[state->destTape];
2486 /* must freeze and rewind the finished output tape */
2487 LogicalTapeFreeze(state->tapeset, state->result_tape);
2488 state->status = TSS_SORTEDONTAPE;
2492 /* End of step D2: rewind all output tapes to prepare for merging */
2493 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2494 LogicalTapeRewind(state->tapeset, tapenum, false);
2499 * At this point we know that tape[T] is empty. If there's just one
2500 * (real or dummy) run left on each input tape, then only one merge
2501 * pass remains. If we don't have to produce a materialized sorted
2502 * tape, we can stop at this point and do the final merge on-the-fly.
2504 if (!state->randomAccess)
2506 bool allOneRun = true;
2508 Assert(state->tp_runs[state->tapeRange] == 0);
2509 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2511 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2519 /* Tell logtape.c we won't be writing anymore */
2520 LogicalTapeSetForgetFreeSpace(state->tapeset);
2521 /* Initialize for the final merge pass */
2522 beginmerge(state, state->tuples);
2523 state->status = TSS_FINALMERGE;
2528 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2529 while (state->tp_runs[state->tapeRange - 1] ||
2530 state->tp_dummy[state->tapeRange - 1])
2532 bool allDummy = true;
2534 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2536 if (state->tp_dummy[tapenum] == 0)
2545 state->tp_dummy[state->tapeRange]++;
2546 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2547 state->tp_dummy[tapenum]--;
2553 /* Step D6: decrease level */
2554 if (--state->Level == 0)
2556 /* rewind output tape T to use as new input */
2557 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange],
2559 /* rewind used-up input tape P, and prepare it for write pass */
2560 LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1],
2562 state->tp_runs[state->tapeRange - 1] = 0;
2565 * reassign tape units per step D6; note we no longer care about A[]
2567 svTape = state->tp_tapenum[state->tapeRange];
2568 svDummy = state->tp_dummy[state->tapeRange];
2569 svRuns = state->tp_runs[state->tapeRange];
2570 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2572 state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2573 state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2574 state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2576 state->tp_tapenum[0] = svTape;
2577 state->tp_dummy[0] = svDummy;
2578 state->tp_runs[0] = svRuns;
2582 * Done. Knuth says that the result is on TAPE[1], but since we exited
2583 * the loop without performing the last iteration of step D6, we have not
2584 * rearranged the tape unit assignment, and therefore the result is on
2585 * TAPE[T]. We need to do it this way so that we can freeze the final
2586 * output tape while rewinding it. The last iteration of step D6 would be
2587 * a waste of cycles anyway...
2589 state->result_tape = state->tp_tapenum[state->tapeRange];
2590 LogicalTapeFreeze(state->tapeset, state->result_tape);
2591 state->status = TSS_SORTEDONTAPE;
2595 * Merge one run from each input tape, except ones with dummy runs.
2597 * This is the inner loop of Algorithm D step D5. We know that the
2598 * output tape is TAPE[T].
2601 mergeonerun(Tuplesortstate *state)
2603 int destTape = state->tp_tapenum[state->tapeRange];
2611 * Start the merge by loading one tuple from each active source tape into
2612 * the heap. We can also decrease the input run/dummy run counts.
2614 beginmerge(state, false);
2617 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2618 * out, and replacing it with next tuple from same tape (if there is
2621 while (state->memtupcount > 0)
2623 /* write the tuple to destTape */
2624 priorAvail = state->availMem;
2625 srcTape = state->memtuples[0].tupindex;
2626 WRITETUP(state, destTape, &state->memtuples[0]);
2627 /* writetup adjusted total free space, now fix per-tape space */
2628 spaceFreed = state->availMem - priorAvail;
2629 state->mergeavailmem[srcTape] += spaceFreed;
2630 /* compact the heap */
2631 tuplesort_heap_siftup(state, false);
2632 if ((tupIndex = state->mergenext[srcTape]) == 0)
2634 /* out of preloaded data on this tape, try to read more */
2635 mergepreread(state);
2636 /* if still no data, we've reached end of run on this tape */
2637 if ((tupIndex = state->mergenext[srcTape]) == 0)
2640 /* pull next preread tuple from list, insert in heap */
2641 tup = &state->memtuples[tupIndex];
2642 state->mergenext[srcTape] = tup->tupindex;
2643 if (state->mergenext[srcTape] == 0)
2644 state->mergelast[srcTape] = 0;
2645 tuplesort_heap_insert(state, tup, srcTape, false);
2646 /* put the now-unused memtuples entry on the freelist */
2647 tup->tupindex = state->mergefreelist;
2648 state->mergefreelist = tupIndex;
2649 state->mergeavailslots[srcTape]++;
2653 * Reset tuple memory. We've freed all of the tuples that we previously
2654 * allocated, but AllocSetFree will have put those chunks of memory on
2655 * particular free lists, bucketed by size class. Thus, although all of
2656 * that memory is free, it is effectively fragmented. Resetting the
2657 * context gets us out from under that problem.
2659 MemoryContextReset(state->tuplecontext);
2662 * When the heap empties, we're done. Write an end-of-run marker on the
2663 * output tape, and increment its count of real runs.
2665 markrunend(state, destTape);
2666 state->tp_runs[state->tapeRange]++;
2670 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2671 pg_rusage_show(&state->ru_start));
2676 * beginmerge - initialize for a merge pass
2678 * We decrease the counts of real and dummy runs for each tape, and mark
2679 * which tapes contain active input runs in mergeactive[]. Then, load
2680 * as many tuples as we can from each active input tape, and finally
2681 * fill the merge heap with the first tuple from each active tape.
2683 * finalMergeBatch indicates if this is the beginning of a final on-the-fly
2684 * merge where a batched allocation of tuple memory is required.
2687 beginmerge(Tuplesortstate *state, bool finalMergeBatch)
2695 /* Heap should be empty here */
2696 Assert(state->memtupcount == 0);
2698 /* Adjust run counts and mark the active tapes */
2699 memset(state->mergeactive, 0,
2700 state->maxTapes * sizeof(*state->mergeactive));
2702 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2704 if (state->tp_dummy[tapenum] > 0)
2705 state->tp_dummy[tapenum]--;
2708 Assert(state->tp_runs[tapenum] > 0);
2709 state->tp_runs[tapenum]--;
2710 srcTape = state->tp_tapenum[tapenum];
2711 state->mergeactive[srcTape] = true;
2715 state->activeTapes = activeTapes;
2717 /* Clear merge-pass state variables */
2718 memset(state->mergenext, 0,
2719 state->maxTapes * sizeof(*state->mergenext));
2720 memset(state->mergelast, 0,
2721 state->maxTapes * sizeof(*state->mergelast));
2722 state->mergefreelist = 0; /* nothing in the freelist */
2723 state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
2725 if (finalMergeBatch)
2727 /* Free outright buffers for tape never actually allocated */
2728 FREEMEM(state, (state->maxTapes - activeTapes) * TAPE_BUFFER_OVERHEAD);
2731 * Grow memtuples one last time, since the palloc() overhead no longer
2732 * incurred can make a big difference
2734 batchmemtuples(state);
2738 * Initialize space allocation to let each active input tape have an equal
2739 * share of preread space.
2741 Assert(activeTapes > 0);
2742 slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes;
2743 Assert(slotsPerTape > 0);
2744 spacePerTape = MAXALIGN_DOWN(state->availMem / activeTapes);
2745 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2747 if (state->mergeactive[srcTape])
2749 state->mergeavailslots[srcTape] = slotsPerTape;
2750 state->mergeavailmem[srcTape] = spacePerTape;
2755 * Preallocate tuple batch memory for each tape. This is the memory used
2756 * for tuples themselves (not SortTuples), so it's never used by
2757 * pass-by-value datum sorts. Memory allocation is performed here at most
2758 * once per sort, just in advance of the final on-the-fly merge step.
2760 if (finalMergeBatch)
2761 mergebatch(state, spacePerTape);
2764 * Preread as many tuples as possible (and at least one) from each active
2767 mergepreread(state);
2769 /* Load the merge heap with the first tuple from each input tape */
2770 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2772 int tupIndex = state->mergenext[srcTape];
2777 tup = &state->memtuples[tupIndex];
2778 state->mergenext[srcTape] = tup->tupindex;
2779 if (state->mergenext[srcTape] == 0)
2780 state->mergelast[srcTape] = 0;
2781 tuplesort_heap_insert(state, tup, srcTape, false);
2782 /* put the now-unused memtuples entry on the freelist */
2783 tup->tupindex = state->mergefreelist;
2784 state->mergefreelist = tupIndex;
2785 state->mergeavailslots[srcTape]++;
2788 if (trace_sort && finalMergeBatch)
2790 int64 perTapeKB = (spacePerTape + 1023) / 1024;
2795 * Report how effective batchmemtuples() was in balancing
2796 * the number of slots against the need for memory for the
2797 * underlying tuples (e.g. IndexTuples). The big preread of
2798 * all tapes when switching to FINALMERGE state should be
2799 * fairly representative of memory utilization during the
2800 * final merge step, and in any case is the only point at
2801 * which all tapes are guaranteed to have depleted either
2802 * their batch memory allowance or slot allowance. Ideally,
2803 * both will be completely depleted for every tape by now.
2805 usedSpaceKB = (state->mergecurrent[srcTape] -
2806 state->mergetuples[srcTape] + 1023) / 1024;
2807 usedSlots = slotsPerTape - state->mergeavailslots[srcTape];
2809 elog(LOG, "tape %d initially used " INT64_FORMAT " KB of "
2810 INT64_FORMAT " KB batch (%2.3f) and %d out of %d slots "
2812 usedSpaceKB, perTapeKB,
2813 (double) usedSpaceKB / (double) perTapeKB,
2814 usedSlots, slotsPerTape,
2815 (double) usedSlots / (double) slotsPerTape);
2823 * batchmemtuples - grow memtuples without palloc overhead
2825 * When called, availMem should be approximately the amount of memory we'd
2826 * require to allocate memtupsize - memtupcount tuples (not SortTuples/slots)
2827 * that were allocated with palloc() overhead, and in doing so use up all
2828 * allocated slots. However, though slots and tuple memory is in balance
2829 * following the last grow_memtuples() call, that's predicated on the observed
2830 * average tuple size for the "final" grow_memtuples() call, which includes
2831 * palloc overhead. During the final merge pass, where we will arrange to
2832 * squeeze out the palloc overhead, we might need more slots in the memtuples
2835 * To make that happen, arrange for the amount of remaining memory to be
2836 * exactly equal to the palloc overhead multiplied by the current size of
2837 * the memtuples array, force the grow_memtuples flag back to true (it's
2838 * probably but not necessarily false on entry to this routine), and then
2839 * call grow_memtuples. This simulates loading enough tuples to fill the
2840 * whole memtuples array and then having some space left over because of the
2841 * elided palloc overhead. We expect that grow_memtuples() will conclude that
2842 * it can't double the size of the memtuples array but that it can increase
2843 * it by some percentage; but if it does decide to double it, that just means
2844 * that we've never managed to use many slots in the memtuples array, in which
2845 * case doubling it shouldn't hurt anything anyway.
2848 batchmemtuples(Tuplesortstate *state)
2851 int64 availMemLessRefund;
2852 int memtupsize = state->memtupsize;
2854 /* For simplicity, assume no memtuples are actually currently counted */
2855 Assert(state->memtupcount == 0);
2858 * Refund STANDARDCHUNKHEADERSIZE per tuple.
2860 * This sometimes fails to make memory use perfectly balanced, but it
2861 * should never make the situation worse. Note that Assert-enabled builds
2862 * get a larger refund, due to a varying STANDARDCHUNKHEADERSIZE.
2864 refund = memtupsize * STANDARDCHUNKHEADERSIZE;
2865 availMemLessRefund = state->availMem - refund;
2868 * To establish balanced memory use after refunding palloc overhead,
2869 * temporarily have our accounting indicate that we've allocated all
2870 * memory we're allowed to less that refund, and call grow_memtuples()
2871 * to have it increase the number of slots.
2873 state->growmemtuples = true;
2874 USEMEM(state, availMemLessRefund);
2875 (void) grow_memtuples(state);
2876 /* Should not matter, but be tidy */
2877 FREEMEM(state, availMemLessRefund);
2878 state->growmemtuples = false;
2883 Size OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024;
2884 Size NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024;
2886 elog(LOG, "grew memtuples %1.2fx from %d (%zu KB) to %d (%zu KB) for final merge",
2887 (double) NewKb / (double) OldKb,
2889 state->memtupsize, NewKb);
2895 * mergebatch - initialize tuple memory in batch
2897 * This allows sequential access to sorted tuples buffered in memory from
2898 * tapes/runs on disk during a final on-the-fly merge step. Note that the
2899 * memory is not used for SortTuples, but for the underlying tuples (e.g.
2902 * Note that when batch memory is used, there is a simple division of space
2903 * into large buffers (one per active tape). The conventional incremental
2904 * memory accounting (calling USEMEM() and FREEMEM()) is abandoned. Instead,
2905 * when each tape's memory budget is exceeded, a retail palloc() "overflow" is
2906 * performed, which is then immediately detected in a way that is analogous to
2907 * LACKMEM(). This keeps each tape's use of memory fair, which is always a
2911 mergebatch(Tuplesortstate *state, int64 spacePerTape)
2915 Assert(state->activeTapes > 0);
2916 Assert(state->tuples);
2919 * For the purposes of tuplesort's memory accounting, the batch allocation
2920 * is special, and regular memory accounting through USEMEM() calls is
2921 * abandoned (see mergeprereadone()).
2923 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2927 if (!state->mergeactive[srcTape])
2930 /* Allocate buffer for each active tape */
2931 mergetuples = MemoryContextAllocHuge(state->tuplecontext,
2934 /* Initialize state for tape */
2935 state->mergetuples[srcTape] = mergetuples;
2936 state->mergecurrent[srcTape] = mergetuples;
2937 state->mergetail[srcTape] = mergetuples;
2938 state->mergeoverflow[srcTape] = NULL;
2941 state->batchUsed = true;
2942 state->spacePerTape = spacePerTape;
2946 * mergebatchone - prepare batch memory for one merge input tape
2948 * This is called following the exhaustion of preread tuples for one input
2949 * tape. All that actually occurs is that the state for the source tape is
2950 * reset to indicate that all memory may be reused.
2952 * This routine must deal with fixing up the tuple that is about to be returned
2953 * to the client, due to "overflow" allocations.
2956 mergebatchone(Tuplesortstate *state, int srcTape, SortTuple *rtup,
2959 Assert(state->batchUsed);
2962 * Tuple about to be returned to caller ("stup") is final preread tuple
2963 * from tape, just removed from the top of the heap. Special steps around
2964 * memory management must be performed for that tuple, to make sure it
2965 * isn't overwritten early.
2967 if (!state->mergeoverflow[srcTape])
2972 * Mark tuple buffer range for reuse, but be careful to move final,
2973 * tail tuple to start of space for next run so that it's available
2974 * to caller when stup is returned, and remains available at least
2975 * until the next tuple is requested.
2977 tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
2978 state->mergecurrent[srcTape] = state->mergetuples[srcTape];
2979 memmove(state->mergecurrent[srcTape], state->mergetail[srcTape],
2982 /* Make SortTuple at top of the merge heap point to new tuple */
2983 rtup->tuple = (void *) state->mergecurrent[srcTape];
2985 state->mergetail[srcTape] = state->mergecurrent[srcTape];
2986 state->mergecurrent[srcTape] += tupLen;
2991 * Handle an "overflow" retail palloc.
2993 * This is needed when we run out of tuple memory for the tape.
2995 state->mergecurrent[srcTape] = state->mergetuples[srcTape];
2996 state->mergetail[srcTape] = state->mergetuples[srcTape];
3000 Assert(rtup->tuple == (void *) state->mergeoverflow[srcTape]);
3001 /* Caller should free palloc'd tuple */
3002 *should_free = true;
3004 state->mergeoverflow[srcTape] = NULL;
3009 * mergebatchfreetape - handle final clean-up for batch memory once tape is
3010 * about to become exhausted
3012 * All tuples are returned from tape, but a single final tuple, *rtup, is to be
3013 * passed back to caller. Free tape's batch allocation buffer while ensuring
3014 * that the final tuple is managed appropriately.
3017 mergebatchfreetape(Tuplesortstate *state, int srcTape, SortTuple *rtup,
3020 Assert(state->batchUsed);
3021 Assert(state->status == TSS_FINALMERGE);
3024 * Tuple may or may not already be an overflow allocation from
3027 if (!*should_free && rtup->tuple)
3030 * Final tuple still in tape's batch allocation.
3032 * Return palloc()'d copy to caller, and have it freed in a similar
3033 * manner to overflow allocation. Otherwise, we'd free batch memory
3034 * and pass back a pointer to garbage. Note that we deliberately
3035 * allocate this in the parent tuplesort context, to be on the safe
3039 void *oldTuple = rtup->tuple;
3041 tuplen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
3042 rtup->tuple = MemoryContextAlloc(state->sortcontext, tuplen);
3043 memcpy(rtup->tuple, oldTuple, tuplen);
3044 *should_free = true;
3047 /* Free spacePerTape-sized buffer */
3048 pfree(state->mergetuples[srcTape]);
3052 * mergebatchalloc - allocate memory for one tuple using a batch memory
3053 * "logical allocation".
3055 * This is used for the final on-the-fly merge phase only. READTUP() routines
3056 * receive memory from here in place of palloc() and USEMEM() calls.
3058 * Tuple tapenum is passed, ensuring each tape's tuples are stored in sorted,
3059 * contiguous order (while allowing safe reuse of memory made available to
3060 * each tape). This maximizes locality of access as tuples are returned by
3063 * Caller must not subsequently attempt to free memory returned here. In
3064 * general, only mergebatch* functions know about how memory returned from
3065 * here should be freed, and this function's caller must ensure that batch
3066 * memory management code will definitely have the opportunity to do the right
3067 * thing during the final on-the-fly merge.
3070 mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen)
3072 Size reserve_tuplen = MAXALIGN(tuplen);
3075 /* Should overflow at most once before mergebatchone() call: */
3076 Assert(state->mergeoverflow[tapenum] == NULL);
3077 Assert(state->batchUsed);
3079 /* It should be possible to use precisely spacePerTape memory at once */
3080 if (state->mergecurrent[tapenum] + reserve_tuplen <=
3081 state->mergetuples[tapenum] + state->spacePerTape)
3084 * Usual case -- caller is returned pointer into its tape's buffer, and
3085 * an offset from that point is recorded as where tape has consumed up
3086 * to for current round of preloading.
3088 ret = state->mergetail[tapenum] = state->mergecurrent[tapenum];
3089 state->mergecurrent[tapenum] += reserve_tuplen;
3094 * Allocate memory, and record as tape's overflow allocation. This
3095 * will be detected quickly, in a similar fashion to a LACKMEM()
3096 * condition, and should not happen again before a new round of
3097 * preloading for caller's tape. Note that we deliberately allocate
3098 * this in the parent tuplesort context, to be on the safe side.
3100 * Sometimes, this does not happen because merging runs out of slots
3101 * before running out of memory.
3103 ret = state->mergeoverflow[tapenum] =
3104 MemoryContextAlloc(state->sortcontext, tuplen);
3111 * mergepreread - load tuples from merge input tapes
3113 * This routine exists to improve sequentiality of reads during a merge pass,
3114 * as explained in the header comments of this file. Load tuples from each
3115 * active source tape until the tape's run is exhausted or it has used up
3116 * its fair share of available memory. In any case, we guarantee that there
3117 * is at least one preread tuple available from each unexhausted input tape.
3119 * We invoke this routine at the start of a merge pass for initial load,
3120 * and then whenever any tape's preread data runs out. Note that we load
3121 * as much data as possible from all tapes, not just the one that ran out.
3122 * This is because logtape.c works best with a usage pattern that alternates
3123 * between reading a lot of data and writing a lot of data, so whenever we
3124 * are forced to read, we should fill working memory completely.
3126 * In FINALMERGE state, we *don't* use this routine, but instead just preread
3127 * from the single tape that ran dry. There's no read/write alternation in
3128 * that state and so no point in scanning through all the tapes to fix one.
3129 * (Moreover, there may be quite a lot of inactive tapes in that state, since
3130 * we might have had many fewer runs than tapes. In a regular tape-to-tape
3131 * merge we can expect most of the tapes to be active. Plus, only
3132 * FINALMERGE state has to consider memory management for a batch
3136 mergepreread(Tuplesortstate *state)
3140 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
3141 mergeprereadone(state, srcTape);
3145 * mergeprereadone - load tuples from one merge input tape
3147 * Read tuples from the specified tape until it has used up its free memory
3148 * or array slots; but ensure that we have at least one tuple, if any are
3152 mergeprereadone(Tuplesortstate *state, int srcTape)
3154 unsigned int tuplen;
3160 if (!state->mergeactive[srcTape])
3161 return; /* tape's run is already exhausted */
3164 * Manage per-tape availMem. Only actually matters when batch memory not
3167 priorAvail = state->availMem;
3168 state->availMem = state->mergeavailmem[srcTape];
3171 * When batch memory is used if final on-the-fly merge, only mergeoverflow
3172 * test is relevant; otherwise, only LACKMEM() test is relevant.
3174 while ((state->mergeavailslots[srcTape] > 0 &&
3175 state->mergeoverflow[srcTape] == NULL && !LACKMEM(state)) ||
3176 state->mergenext[srcTape] == 0)
3178 /* read next tuple, if any */
3179 if ((tuplen = getlen(state, srcTape, true)) == 0)
3181 state->mergeactive[srcTape] = false;
3184 READTUP(state, &stup, srcTape, tuplen);
3185 /* find a free slot in memtuples[] for it */
3186 tupIndex = state->mergefreelist;
3188 state->mergefreelist = state->memtuples[tupIndex].tupindex;
3191 tupIndex = state->mergefirstfree++;
3192 Assert(tupIndex < state->memtupsize);
3194 state->mergeavailslots[srcTape]--;
3195 /* store tuple, append to list for its tape */
3197 state->memtuples[tupIndex] = stup;
3198 if (state->mergelast[srcTape])
3199 state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex;
3201 state->mergenext[srcTape] = tupIndex;
3202 state->mergelast[srcTape] = tupIndex;
3204 /* update per-tape and global availmem counts */
3205 spaceUsed = state->mergeavailmem[srcTape] - state->availMem;
3206 state->mergeavailmem[srcTape] = state->availMem;
3207 state->availMem = priorAvail - spaceUsed;
3211 * dumptuples - remove tuples from memtuples and write to tape
3213 * This is used during initial-run building, but not during merging.
3215 * When alltuples = false and replacement selection is still active, dump
3216 * only enough tuples to get under the availMem limit (and leave at least
3217 * one tuple in memtuples, since puttuple will then assume it is a heap that
3218 * has a tuple to compare to). We always insist there be at least one free
3219 * slot in the memtuples[] array.
3221 * When alltuples = true, dump everything currently in memory. (This
3222 * case is only used at end of input data, although in practice only the
3223 * first run could fail to dump all tuples when we LACKMEM(), and only
3224 * when replacement selection is active.)
3226 * If, when replacement selection is active, we see that the tuple run
3227 * number at the top of the heap has changed, start a new run. This must be
3228 * the first run, because replacement selection is always abandoned for all
3232 dumptuples(Tuplesortstate *state, bool alltuples)
3235 (LACKMEM(state) && state->memtupcount > 1) ||
3236 state->memtupcount >= state->memtupsize)
3238 if (state->replaceActive)
3241 * Still holding out for a case favorable to replacement selection.
3242 * Still incrementally spilling using heap.
3244 * Dump the heap's frontmost entry, and sift up to remove it from
3247 Assert(state->memtupcount > 0);
3248 WRITETUP(state, state->tp_tapenum[state->destTape],
3249 &state->memtuples[0]);
3250 tuplesort_heap_siftup(state, true);
3255 * Once committed to quicksorting runs, never incrementally
3258 dumpbatch(state, alltuples);
3263 * If top run number has changed, we've finished the current run
3264 * (this can only be the first run), and will no longer spill
3267 if (state->memtupcount == 0 ||
3268 state->memtuples[0].tupindex == HEAP_RUN_NEXT)
3270 markrunend(state, state->tp_tapenum[state->destTape]);
3271 Assert(state->currentRun == RUN_FIRST);
3272 state->currentRun++;
3273 state->tp_runs[state->destTape]++;
3274 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3278 elog(LOG, "finished incrementally writing %s run %d to tape %d: %s",
3279 (state->memtupcount == 0) ? "only" : "first",
3280 state->currentRun, state->destTape,
3281 pg_rusage_show(&state->ru_start));
3284 * Done if heap is empty, which is possible when there is only one
3287 Assert(state->currentRun == RUN_SECOND);
3288 if (state->memtupcount == 0)
3291 * Replacement selection best case; no final merge required,
3292 * because there was only one initial run (second run has no
3293 * tuples). See RUN_SECOND case in mergeruns().
3299 * Abandon replacement selection for second run (as well as any
3302 state->replaceActive = false;
3305 * First tuple of next run should not be heapified, and so will
3306 * bear placeholder run number. In practice this must actually be
3307 * the second run, which just became the currentRun, so we're
3308 * clear to quicksort and dump the tuples in batch next time
3309 * memtuples becomes full.
3311 Assert(state->memtuples[0].tupindex == HEAP_RUN_NEXT);
3312 selectnewtape(state);
3318 * dumpbatch - sort and dump all memtuples, forming one run on tape
3320 * Second or subsequent runs are never heapified by this module (although
3321 * heapification still respects run number differences between the first and
3322 * second runs), and a heap (replacement selection priority queue) is often
3323 * avoided in the first place.
3326 dumpbatch(Tuplesortstate *state, bool alltuples)
3332 * Final call might require no sorting, in rare cases where we just so
3333 * happen to have previously LACKMEM()'d at the point where exactly all
3334 * remaining tuples are loaded into memory, just before input was
3337 * In general, short final runs are quite possible. Rather than
3338 * allowing a special case where there was a superfluous
3339 * selectnewtape() call (i.e. a call with no subsequent run actually
3340 * written to destTape), we prefer to write out a 0 tuple run.
3342 * mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and
3343 * will reliably mark the tape inactive for the merge when called from
3344 * beginmerge(). This case is therefore similar to the case where
3345 * mergeonerun() finds a dummy run for the tape, and so doesn't need to
3346 * merge a run from the tape (or conceptually "merges" the dummy run,
3347 * if you prefer). According to Knuth, Algorithm D "isn't strictly
3348 * optimal" in its method of distribution and dummy run assignment;
3349 * this edge case seems very unlikely to make that appreciably worse.
3351 Assert(state->status == TSS_BUILDRUNS);
3354 * It seems unlikely that this limit will ever be exceeded, but take no
3357 if (state->currentRun == INT_MAX)
3359 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3360 errmsg("cannot have more than %d runs for an external sort",
3363 state->currentRun++;
3367 elog(LOG, "starting quicksort of run %d: %s",
3368 state->currentRun, pg_rusage_show(&state->ru_start));
3372 * Sort all tuples accumulated within the allowed amount of memory for this
3373 * run using quicksort
3375 tuplesort_sort_memtuples(state);
3379 elog(LOG, "finished quicksort of run %d: %s",
3380 state->currentRun, pg_rusage_show(&state->ru_start));
3383 memtupwrite = state->memtupcount;
3384 for (i = 0; i < memtupwrite; i++)
3386 WRITETUP(state, state->tp_tapenum[state->destTape],
3387 &state->memtuples[i]);
3388 state->memtupcount--;
3392 * Reset tuple memory. We've freed all of the tuples that we previously
3393 * allocated. It's important to avoid fragmentation when there is a stark
3394 * change in allocation patterns due to the use of batch memory.
3395 * Fragmentation due to AllocSetFree's bucketing by size class might be
3396 * particularly bad if this step wasn't taken.
3398 MemoryContextReset(state->tuplecontext);
3400 markrunend(state, state->tp_tapenum[state->destTape]);
3401 state->tp_runs[state->destTape]++;
3402 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3406 elog(LOG, "finished writing run %d to tape %d: %s",
3407 state->currentRun, state->destTape,
3408 pg_rusage_show(&state->ru_start));
3412 selectnewtape(state);
3416 * tuplesort_rescan - rewind and replay the scan
3419 tuplesort_rescan(Tuplesortstate *state)
3421 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3423 Assert(state->randomAccess);
3425 switch (state->status)
3427 case TSS_SORTEDINMEM:
3429 state->eof_reached = false;
3430 state->markpos_offset = 0;
3431 state->markpos_eof = false;
3433 case TSS_SORTEDONTAPE:
3434 LogicalTapeRewind(state->tapeset,
3437 state->eof_reached = false;
3438 state->markpos_block = 0L;
3439 state->markpos_offset = 0;
3440 state->markpos_eof = false;
3443 elog(ERROR, "invalid tuplesort state");
3447 MemoryContextSwitchTo(oldcontext);
3451 * tuplesort_markpos - saves current position in the merged sort file
3454 tuplesort_markpos(Tuplesortstate *state)
3456 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3458 Assert(state->randomAccess);
3460 switch (state->status)
3462 case TSS_SORTEDINMEM:
3463 state->markpos_offset = state->current;
3464 state->markpos_eof = state->eof_reached;
3466 case TSS_SORTEDONTAPE:
3467 LogicalTapeTell(state->tapeset,
3469 &state->markpos_block,
3470 &state->markpos_offset);
3471 state->markpos_eof = state->eof_reached;
3474 elog(ERROR, "invalid tuplesort state");
3478 MemoryContextSwitchTo(oldcontext);
3482 * tuplesort_restorepos - restores current position in merged sort file to
3483 * last saved position
3486 tuplesort_restorepos(Tuplesortstate *state)
3488 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3490 Assert(state->randomAccess);
3492 switch (state->status)
3494 case TSS_SORTEDINMEM:
3495 state->current = state->markpos_offset;
3496 state->eof_reached = state->markpos_eof;
3498 case TSS_SORTEDONTAPE:
3499 if (!LogicalTapeSeek(state->tapeset,
3501 state->markpos_block,
3502 state->markpos_offset))
3503 elog(ERROR, "tuplesort_restorepos failed");
3504 state->eof_reached = state->markpos_eof;
3507 elog(ERROR, "invalid tuplesort state");
3511 MemoryContextSwitchTo(oldcontext);
3515 * tuplesort_get_stats - extract summary statistics
3517 * This can be called after tuplesort_performsort() finishes to obtain
3518 * printable summary information about how the sort was performed.
3519 * spaceUsed is measured in kilobytes.
3522 tuplesort_get_stats(Tuplesortstate *state,
3523 const char **sortMethod,
3524 const char **spaceType,
3528 * Note: it might seem we should provide both memory and disk usage for a
3529 * disk-based sort. However, the current code doesn't track memory space
3530 * accurately once we have begun to return tuples to the caller (since we
3531 * don't account for pfree's the caller is expected to do), so we cannot
3532 * rely on availMem in a disk sort. This does not seem worth the overhead
3533 * to fix. Is it worth creating an API for the memory context code to
3534 * tell us how much is actually used in sortcontext?
3538 *spaceType = "Disk";
3539 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
3543 *spaceType = "Memory";
3544 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
3547 switch (state->status)
3549 case TSS_SORTEDINMEM:
3550 if (state->boundUsed)
3551 *sortMethod = "top-N heapsort";
3553 *sortMethod = "quicksort";
3555 case TSS_SORTEDONTAPE:
3556 *sortMethod = "external sort";
3558 case TSS_FINALMERGE:
3559 *sortMethod = "external merge";
3562 *sortMethod = "still in progress";
3569 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
3571 * Compare two SortTuples. If checkIndex is true, use the tuple index
3572 * as the front of the sort key; otherwise, no.
3574 * Note that for checkIndex callers, the heap invariant is never
3575 * maintained beyond the first run, and so there are no COMPARETUP()
3576 * calls needed to distinguish tuples in HEAP_RUN_NEXT.
3579 #define HEAPCOMPARE(tup1,tup2) \
3580 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex || \
3581 (tup1)->tupindex == HEAP_RUN_NEXT) ? \
3582 ((tup1)->tupindex) - ((tup2)->tupindex) : \
3583 COMPARETUP(state, tup1, tup2))
3586 * Convert the existing unordered array of SortTuples to a bounded heap,
3587 * discarding all but the smallest "state->bound" tuples.
3589 * When working with a bounded heap, we want to keep the largest entry
3590 * at the root (array entry zero), instead of the smallest as in the normal
3591 * sort case. This allows us to discard the largest entry cheaply.
3592 * Therefore, we temporarily reverse the sort direction.
3594 * We assume that all entries in a bounded heap will always have tupindex
3595 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
3596 * the direction of comparison for tupindexes.
3599 make_bounded_heap(Tuplesortstate *state)
3601 int tupcount = state->memtupcount;
3604 Assert(state->status == TSS_INITIAL);
3605 Assert(state->bounded);
3606 Assert(tupcount >= state->bound);
3608 /* Reverse sort direction so largest entry will be at root */
3609 reversedirection(state);
3611 state->memtupcount = 0; /* make the heap empty */
3612 for (i = 0; i < tupcount; i++)
3614 if (state->memtupcount >= state->bound &&
3615 COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
3617 /* New tuple would just get thrown out, so skip it */
3618 free_sort_tuple(state, &state->memtuples[i]);
3619 CHECK_FOR_INTERRUPTS();
3623 /* Insert next tuple into heap */
3624 /* Must copy source tuple to avoid possible overwrite */
3625 SortTuple stup = state->memtuples[i];
3627 tuplesort_heap_insert(state, &stup, 0, false);
3629 /* If heap too full, discard largest entry */
3630 if (state->memtupcount > state->bound)
3632 free_sort_tuple(state, &state->memtuples[0]);
3633 tuplesort_heap_siftup(state, false);
3638 Assert(state->memtupcount == state->bound);
3639 state->status = TSS_BOUNDED;
3643 * Convert the bounded heap to a properly-sorted array
3646 sort_bounded_heap(Tuplesortstate *state)
3648 int tupcount = state->memtupcount;
3650 Assert(state->status == TSS_BOUNDED);
3651 Assert(state->bounded);
3652 Assert(tupcount == state->bound);
3655 * We can unheapify in place because each sift-up will remove the largest
3656 * entry, which we can promptly store in the newly freed slot at the end.
3657 * Once we're down to a single-entry heap, we're done.
3659 while (state->memtupcount > 1)
3661 SortTuple stup = state->memtuples[0];
3663 /* this sifts-up the next-largest entry and decreases memtupcount */
3664 tuplesort_heap_siftup(state, false);
3665 state->memtuples[state->memtupcount] = stup;
3667 state->memtupcount = tupcount;
3670 * Reverse sort direction back to the original state. This is not
3671 * actually necessary but seems like a good idea for tidiness.
3673 reversedirection(state);
3675 state->status = TSS_SORTEDINMEM;
3676 state->boundUsed = true;
3680 * Sort all memtuples using specialized qsort() routines.
3682 * Quicksort is used for small in-memory sorts. Quicksort is also generally
3683 * preferred to replacement selection for generating runs during external sort
3684 * operations, although replacement selection is sometimes used for the first
3688 tuplesort_sort_memtuples(Tuplesortstate *state)
3690 if (state->memtupcount > 1)
3692 /* Can we use the single-key sort function? */
3693 if (state->onlyKey != NULL)
3694 qsort_ssup(state->memtuples, state->memtupcount,
3697 qsort_tuple(state->memtuples,
3705 * Insert a new tuple into an empty or existing heap, maintaining the
3706 * heap invariant. Caller is responsible for ensuring there's room.
3708 * Note: we assume *tuple is a temporary variable that can be scribbled on.
3709 * For some callers, tuple actually points to a memtuples[] entry above the
3710 * end of the heap. This is safe as long as it's not immediately adjacent
3711 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
3712 * is, it might get overwritten before being moved into the heap!
3715 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
3716 int tupleindex, bool checkIndex)
3718 SortTuple *memtuples;
3722 * Save the tupleindex --- see notes above about writing on *tuple. It's a
3723 * historical artifact that tupleindex is passed as a separate argument
3724 * and not in *tuple, but it's notationally convenient so let's leave it
3727 tuple->tupindex = tupleindex;
3729 memtuples = state->memtuples;
3730 Assert(state->memtupcount < state->memtupsize);
3731 Assert(!checkIndex || tupleindex == RUN_FIRST);
3733 CHECK_FOR_INTERRUPTS();
3736 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
3737 * using 1-based array indexes, not 0-based.
3739 j = state->memtupcount++;
3742 int i = (j - 1) >> 1;
3744 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
3746 memtuples[j] = memtuples[i];
3749 memtuples[j] = *tuple;
3753 * The tuple at state->memtuples[0] has been removed from the heap.
3754 * Decrement memtupcount, and sift up to maintain the heap invariant.
3757 tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
3759 SortTuple *memtuples = state->memtuples;
3764 Assert(!checkIndex || state->currentRun == RUN_FIRST);
3765 if (--state->memtupcount <= 0)
3768 CHECK_FOR_INTERRUPTS();
3770 n = state->memtupcount;
3771 tuple = &memtuples[n]; /* tuple that must be reinserted */
3772 i = 0; /* i is where the "hole" is */
3780 HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3782 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3784 memtuples[i] = memtuples[j];
3787 memtuples[i] = *tuple;
3791 * Function to reverse the sort direction from its current state
3793 * It is not safe to call this when performing hash tuplesorts
3796 reversedirection(Tuplesortstate *state)
3798 SortSupport sortKey = state->sortKeys;
3801 for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3803 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3804 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3810 * Tape interface routines
3814 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3818 if (LogicalTapeRead(state->tapeset, tapenum,
3819 &len, sizeof(len)) != sizeof(len))
3820 elog(ERROR, "unexpected end of tape");
3821 if (len == 0 && !eofOK)
3822 elog(ERROR, "unexpected end of data");
3827 markrunend(Tuplesortstate *state, int tapenum)
3829 unsigned int len = 0;
3831 LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3835 * Get memory for tuple from within READTUP() routine. Allocate
3836 * memory and account for that, or consume from tape's batch
3839 * Memory returned here in the final on-the-fly merge case is recycled
3840 * from tape's batch allocation. Otherwise, callers must pfree() or
3841 * reset tuple child memory context, and account for that with a
3842 * FREEMEM(). Currently, this only ever needs to happen in WRITETUP()
3846 readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen)
3848 if (state->batchUsed)
3851 * No USEMEM() call, because during final on-the-fly merge
3852 * accounting is based on tape-private state. ("Overflow"
3853 * allocations are detected as an indication that a new round
3854 * or preloading is required. Preloading marks existing
3855 * contents of tape's batch buffer for reuse.)
3857 return mergebatchalloc(state, tapenum, tuplen);
3863 /* Batch allocation yet to be performed */
3864 ret = MemoryContextAlloc(state->tuplecontext, tuplen);
3865 USEMEM(state, GetMemoryChunkSpace(ret));
3872 * Routines specialized for HeapTuple (actually MinimalTuple) case
3876 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3878 SortSupport sortKey = state->sortKeys;
3891 /* Compare the leading sort key */
3892 compare = ApplySortComparator(a->datum1, a->isnull1,
3893 b->datum1, b->isnull1,
3898 /* Compare additional sort keys */
3899 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3900 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3901 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3902 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3903 tupDesc = state->tupDesc;
3905 if (sortKey->abbrev_converter)
3907 attno = sortKey->ssup_attno;
3909 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3910 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3912 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3920 for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3922 attno = sortKey->ssup_attno;
3924 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3925 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3927 compare = ApplySortComparator(datum1, isnull1,
3938 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3941 * We expect the passed "tup" to be a TupleTableSlot, and form a
3942 * MinimalTuple using the exported interface for that.
3944 TupleTableSlot *slot = (TupleTableSlot *) tup;
3948 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3950 /* copy the tuple into sort storage */
3951 tuple = ExecCopySlotMinimalTuple(slot);
3952 stup->tuple = (void *) tuple;
3953 USEMEM(state, GetMemoryChunkSpace(tuple));
3954 /* set up first-column key value */
3955 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3956 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3957 original = heap_getattr(&htup,
3958 state->sortKeys[0].ssup_attno,
3962 MemoryContextSwitchTo(oldcontext);
3964 if (!state->sortKeys->abbrev_converter || stup->isnull1)
3967 * Store ordinary Datum representation, or NULL value. If there is a
3968 * converter it won't expect NULL values, and cost model is not
3969 * required to account for NULL, so in that case we avoid calling
3970 * converter and just set datum1 to zeroed representation (to be
3971 * consistent, and to support cheap inequality tests for NULL
3972 * abbreviated keys).
3974 stup->datum1 = original;
3976 else if (!consider_abort_common(state))
3978 /* Store abbreviated key representation */
3979 stup->datum1 = state->sortKeys->abbrev_converter(original,
3984 /* Abort abbreviation */
3987 stup->datum1 = original;
3990 * Set state to be consistent with never trying abbreviation.
3992 * Alter datum1 representation in already-copied tuples, so as to
3993 * ensure a consistent representation (current tuple was just
3994 * handled). It does not matter if some dumped tuples are already
3995 * sorted on tape, since serialized tuples lack abbreviated keys
3996 * (TSS_BUILDRUNS state prevents control reaching here in any
3999 for (i = 0; i < state->memtupcount; i++)
4001 SortTuple *mtup = &state->memtuples[i];
4003 htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
4004 MINIMAL_TUPLE_OFFSET;
4005 htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
4006 MINIMAL_TUPLE_OFFSET);
4008 mtup->datum1 = heap_getattr(&htup,
4009 state->sortKeys[0].ssup_attno,
4017 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
4019 MinimalTuple tuple = (MinimalTuple) stup->tuple;
4021 /* the part of the MinimalTuple we'll write: */
4022 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
4023 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
4025 /* total on-disk footprint: */
4026 unsigned int tuplen = tupbodylen + sizeof(int);
4028 LogicalTapeWrite(state->tapeset, tapenum,
4029 (void *) &tuplen, sizeof(tuplen));
4030 LogicalTapeWrite(state->tapeset, tapenum,
4031 (void *) tupbody, tupbodylen);
4032 if (state->randomAccess) /* need trailing length word? */
4033 LogicalTapeWrite(state->tapeset, tapenum,
4034 (void *) &tuplen, sizeof(tuplen));
4036 FREEMEM(state, GetMemoryChunkSpace(tuple));
4037 heap_free_minimal_tuple(tuple);
4041 readtup_heap(Tuplesortstate *state, SortTuple *stup,
4042 int tapenum, unsigned int len)
4044 unsigned int tupbodylen = len - sizeof(int);
4045 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
4046 MinimalTuple tuple = (MinimalTuple) readtup_alloc(state, tapenum, tuplen);
4047 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
4050 /* read in the tuple proper */
4051 tuple->t_len = tuplen;
4052 LogicalTapeReadExact(state->tapeset, tapenum,
4053 tupbody, tupbodylen);
4054 if (state->randomAccess) /* need trailing length word? */
4055 LogicalTapeReadExact(state->tapeset, tapenum,
4056 &tuplen, sizeof(tuplen));
4057 stup->tuple = (void *) tuple;
4058 /* set up first-column key value */
4059 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
4060 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
4061 stup->datum1 = heap_getattr(&htup,
4062 state->sortKeys[0].ssup_attno,
4068 * Routines specialized for the CLUSTER case (HeapTuple data, with
4069 * comparisons per a btree index definition)
4073 comparetup_cluster(const SortTuple *a, const SortTuple *b,
4074 Tuplesortstate *state)
4076 SortSupport sortKey = state->sortKeys;
4086 AttrNumber leading = state->indexInfo->ii_KeyAttrNumbers[0];
4088 /* Be prepared to compare additional sort keys */
4089 ltup = (HeapTuple) a->tuple;
4090 rtup = (HeapTuple) b->tuple;
4091 tupDesc = state->tupDesc;
4093 /* Compare the leading sort key, if it's simple */
4096 compare = ApplySortComparator(a->datum1, a->isnull1,
4097 b->datum1, b->isnull1,
4102 if (sortKey->abbrev_converter)
4104 datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
4105 datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
4107 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4111 if (compare != 0 || state->nKeys == 1)
4113 /* Compare additional columns the hard way */
4119 /* Must compare all keys the hard way */
4123 if (state->indexInfo->ii_Expressions == NULL)
4125 /* If not expression index, just compare the proper heap attrs */
4127 for (; nkey < state->nKeys; nkey++, sortKey++)
4129 AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
4131 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
4132 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
4134 compare = ApplySortComparator(datum1, isnull1,
4144 * In the expression index case, compute the whole index tuple and
4145 * then compare values. It would perhaps be faster to compute only as
4146 * many columns as we need to compare, but that would require
4147 * duplicating all the logic in FormIndexDatum.
4149 Datum l_index_values[INDEX_MAX_KEYS];
4150 bool l_index_isnull[INDEX_MAX_KEYS];
4151 Datum r_index_values[INDEX_MAX_KEYS];
4152 bool r_index_isnull[INDEX_MAX_KEYS];
4153 TupleTableSlot *ecxt_scantuple;
4155 /* Reset context each time to prevent memory leakage */
4156 ResetPerTupleExprContext(state->estate);
4158 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
4160 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
4161 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
4162 l_index_values, l_index_isnull);
4164 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
4165 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
4166 r_index_values, r_index_isnull);
4168 for (; nkey < state->nKeys; nkey++, sortKey++)
4170 compare = ApplySortComparator(l_index_values[nkey],
4171 l_index_isnull[nkey],
4172 r_index_values[nkey],
4173 r_index_isnull[nkey],
4184 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
4186 HeapTuple tuple = (HeapTuple) tup;
4188 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
4190 /* copy the tuple into sort storage */
4191 tuple = heap_copytuple(tuple);
4192 stup->tuple = (void *) tuple;
4193 USEMEM(state, GetMemoryChunkSpace(tuple));
4195 MemoryContextSwitchTo(oldcontext);
4198 * set up first-column key value, and potentially abbreviate, if it's a
4201 if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
4204 original = heap_getattr(tuple,
4205 state->indexInfo->ii_KeyAttrNumbers[0],
4209 if (!state->sortKeys->abbrev_converter || stup->isnull1)
4212 * Store ordinary Datum representation, or NULL value. If there is a
4213 * converter it won't expect NULL values, and cost model is not
4214 * required to account for NULL, so in that case we avoid calling
4215 * converter and just set datum1 to zeroed representation (to be
4216 * consistent, and to support cheap inequality tests for NULL
4217 * abbreviated keys).
4219 stup->datum1 = original;
4221 else if (!consider_abort_common(state))
4223 /* Store abbreviated key representation */
4224 stup->datum1 = state->sortKeys->abbrev_converter(original,
4229 /* Abort abbreviation */
4232 stup->datum1 = original;
4235 * Set state to be consistent with never trying abbreviation.
4237 * Alter datum1 representation in already-copied tuples, so as to
4238 * ensure a consistent representation (current tuple was just
4239 * handled). It does not matter if some dumped tuples are already
4240 * sorted on tape, since serialized tuples lack abbreviated keys
4241 * (TSS_BUILDRUNS state prevents control reaching here in any
4244 for (i = 0; i < state->memtupcount; i++)
4246 SortTuple *mtup = &state->memtuples[i];
4248 tuple = (HeapTuple) mtup->tuple;
4249 mtup->datum1 = heap_getattr(tuple,
4250 state->indexInfo->ii_KeyAttrNumbers[0],
4258 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
4260 HeapTuple tuple = (HeapTuple) stup->tuple;
4261 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
4263 /* We need to store t_self, but not other fields of HeapTupleData */
4264 LogicalTapeWrite(state->tapeset, tapenum,
4265 &tuplen, sizeof(tuplen));
4266 LogicalTapeWrite(state->tapeset, tapenum,
4267 &tuple->t_self, sizeof(ItemPointerData));
4268 LogicalTapeWrite(state->tapeset, tapenum,
4269 tuple->t_data, tuple->t_len);
4270 if (state->randomAccess) /* need trailing length word? */
4271 LogicalTapeWrite(state->tapeset, tapenum,
4272 &tuplen, sizeof(tuplen));
4274 FREEMEM(state, GetMemoryChunkSpace(tuple));
4275 heap_freetuple(tuple);
4279 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
4280 int tapenum, unsigned int tuplen)
4282 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
4283 HeapTuple tuple = (HeapTuple) readtup_alloc(state,
4285 t_len + HEAPTUPLESIZE);
4287 /* Reconstruct the HeapTupleData header */
4288 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
4289 tuple->t_len = t_len;
4290 LogicalTapeReadExact(state->tapeset, tapenum,
4291 &tuple->t_self, sizeof(ItemPointerData));
4292 /* We don't currently bother to reconstruct t_tableOid */
4293 tuple->t_tableOid = InvalidOid;
4294 /* Read in the tuple body */
4295 LogicalTapeReadExact(state->tapeset, tapenum,
4296 tuple->t_data, tuple->t_len);
4297 if (state->randomAccess) /* need trailing length word? */
4298 LogicalTapeReadExact(state->tapeset, tapenum,
4299 &tuplen, sizeof(tuplen));
4300 stup->tuple = (void *) tuple;
4301 /* set up first-column key value, if it's a simple column */
4302 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
4303 stup->datum1 = heap_getattr(tuple,
4304 state->indexInfo->ii_KeyAttrNumbers[0],
4311 * Routines specialized for IndexTuple case
4313 * The btree and hash cases require separate comparison functions, but the
4314 * IndexTuple representation is the same so the copy/write/read support
4315 * functions can be shared.
4319 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
4320 Tuplesortstate *state)
4323 * This is similar to comparetup_heap(), but expects index tuples. There
4324 * is also special handling for enforcing uniqueness, and special
4325 * treatment for equal keys at the end.
4327 SortSupport sortKey = state->sortKeys;
4332 bool equal_hasnull = false;
4341 /* Compare the leading sort key */
4342 compare = ApplySortComparator(a->datum1, a->isnull1,
4343 b->datum1, b->isnull1,
4348 /* Compare additional sort keys */
4349 tuple1 = (IndexTuple) a->tuple;
4350 tuple2 = (IndexTuple) b->tuple;
4351 keysz = state->nKeys;
4352 tupDes = RelationGetDescr(state->indexRel);
4354 if (sortKey->abbrev_converter)
4356 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
4357 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
4359 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4366 /* they are equal, so we only need to examine one null flag */
4368 equal_hasnull = true;
4371 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4373 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
4374 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
4376 compare = ApplySortComparator(datum1, isnull1,
4380 return compare; /* done when we find unequal attributes */
4382 /* they are equal, so we only need to examine one null flag */
4384 equal_hasnull = true;
4388 * If btree has asked us to enforce uniqueness, complain if two equal
4389 * tuples are detected (unless there was at least one NULL field).
4391 * It is sufficient to make the test here, because if two tuples are equal
4392 * they *must* get compared at some stage of the sort --- otherwise the
4393 * sort algorithm wouldn't have checked whether one must appear before the
4396 if (state->enforceUnique && !equal_hasnull)
4398 Datum values[INDEX_MAX_KEYS];
4399 bool isnull[INDEX_MAX_KEYS];
4403 * Some rather brain-dead implementations of qsort (such as the one in
4404 * QNX 4) will sometimes call the comparison routine to compare a
4405 * value to itself, but we always use our own implementation, which
4408 Assert(tuple1 != tuple2);
4410 index_deform_tuple(tuple1, tupDes, values, isnull);
4412 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
4415 (errcode(ERRCODE_UNIQUE_VIOLATION),
4416 errmsg("could not create unique index \"%s\"",
4417 RelationGetRelationName(state->indexRel)),
4418 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
4419 errdetail("Duplicate keys exist."),
4420 errtableconstraint(state->heapRel,
4421 RelationGetRelationName(state->indexRel))));
4425 * If key values are equal, we sort on ItemPointer. This does not affect
4426 * validity of the finished index, but it may be useful to have index
4427 * scans in physical order.
4430 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4431 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4434 return (blk1 < blk2) ? -1 : 1;
4437 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4438 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4441 return (pos1 < pos2) ? -1 : 1;
4448 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
4449 Tuplesortstate *state)
4457 * Fetch hash keys and mask off bits we don't want to sort by. We know
4458 * that the first column of the index tuple is the hash key.
4460 Assert(!a->isnull1);
4461 hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
4462 Assert(!b->isnull1);
4463 hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
4467 else if (hash1 < hash2)
4471 * If hash values are equal, we sort on ItemPointer. This does not affect
4472 * validity of the finished index, but it may be useful to have index
4473 * scans in physical order.
4475 tuple1 = (IndexTuple) a->tuple;
4476 tuple2 = (IndexTuple) b->tuple;
4479 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4480 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4483 return (blk1 < blk2) ? -1 : 1;
4486 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4487 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4490 return (pos1 < pos2) ? -1 : 1;
4497 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
4499 IndexTuple tuple = (IndexTuple) tup;
4500 unsigned int tuplen = IndexTupleSize(tuple);
4501 IndexTuple newtuple;
4504 /* copy the tuple into sort storage */
4505 newtuple = (IndexTuple) MemoryContextAlloc(state->tuplecontext, tuplen);
4506 memcpy(newtuple, tuple, tuplen);
4507 USEMEM(state, GetMemoryChunkSpace(newtuple));
4508 stup->tuple = (void *) newtuple;
4509 /* set up first-column key value */
4510 original = index_getattr(newtuple,
4512 RelationGetDescr(state->indexRel),
4515 if (!state->sortKeys->abbrev_converter || stup->isnull1)
4518 * Store ordinary Datum representation, or NULL value. If there is a
4519 * converter it won't expect NULL values, and cost model is not
4520 * required to account for NULL, so in that case we avoid calling
4521 * converter and just set datum1 to zeroed representation (to be
4522 * consistent, and to support cheap inequality tests for NULL
4523 * abbreviated keys).
4525 stup->datum1 = original;
4527 else if (!consider_abort_common(state))
4529 /* Store abbreviated key representation */
4530 stup->datum1 = state->sortKeys->abbrev_converter(original,
4535 /* Abort abbreviation */
4538 stup->datum1 = original;
4541 * Set state to be consistent with never trying abbreviation.
4543 * Alter datum1 representation in already-copied tuples, so as to
4544 * ensure a consistent representation (current tuple was just
4545 * handled). It does not matter if some dumped tuples are already
4546 * sorted on tape, since serialized tuples lack abbreviated keys
4547 * (TSS_BUILDRUNS state prevents control reaching here in any
4550 for (i = 0; i < state->memtupcount; i++)
4552 SortTuple *mtup = &state->memtuples[i];
4554 tuple = (IndexTuple) mtup->tuple;
4555 mtup->datum1 = index_getattr(tuple,
4557 RelationGetDescr(state->indexRel),
4564 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
4566 IndexTuple tuple = (IndexTuple) stup->tuple;
4567 unsigned int tuplen;
4569 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
4570 LogicalTapeWrite(state->tapeset, tapenum,
4571 (void *) &tuplen, sizeof(tuplen));
4572 LogicalTapeWrite(state->tapeset, tapenum,
4573 (void *) tuple, IndexTupleSize(tuple));
4574 if (state->randomAccess) /* need trailing length word? */
4575 LogicalTapeWrite(state->tapeset, tapenum,
4576 (void *) &tuplen, sizeof(tuplen));
4578 FREEMEM(state, GetMemoryChunkSpace(tuple));
4583 readtup_index(Tuplesortstate *state, SortTuple *stup,
4584 int tapenum, unsigned int len)
4586 unsigned int tuplen = len - sizeof(unsigned int);
4587 IndexTuple tuple = (IndexTuple) readtup_alloc(state, tapenum, tuplen);
4589 LogicalTapeReadExact(state->tapeset, tapenum,
4591 if (state->randomAccess) /* need trailing length word? */
4592 LogicalTapeReadExact(state->tapeset, tapenum,
4593 &tuplen, sizeof(tuplen));
4594 stup->tuple = (void *) tuple;
4595 /* set up first-column key value */
4596 stup->datum1 = index_getattr(tuple,
4598 RelationGetDescr(state->indexRel),
4603 * Routines specialized for DatumTuple case
4607 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
4611 compare = ApplySortComparator(a->datum1, a->isnull1,
4612 b->datum1, b->isnull1,
4617 /* if we have abbreviations, then "tuple" has the original value */
4619 if (state->sortKeys->abbrev_converter)
4620 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
4621 PointerGetDatum(b->tuple), b->isnull1,
4628 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
4630 /* Not currently needed */
4631 elog(ERROR, "copytup_datum() should not be called");
4635 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
4638 unsigned int tuplen;
4639 unsigned int writtenlen;
4646 else if (!state->tuples)
4648 waddr = &stup->datum1;
4649 tuplen = sizeof(Datum);
4653 waddr = stup->tuple;
4654 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
4655 Assert(tuplen != 0);
4658 writtenlen = tuplen + sizeof(unsigned int);
4660 LogicalTapeWrite(state->tapeset, tapenum,
4661 (void *) &writtenlen, sizeof(writtenlen));
4662 LogicalTapeWrite(state->tapeset, tapenum,
4664 if (state->randomAccess) /* need trailing length word? */
4665 LogicalTapeWrite(state->tapeset, tapenum,
4666 (void *) &writtenlen, sizeof(writtenlen));
4670 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4676 readtup_datum(Tuplesortstate *state, SortTuple *stup,
4677 int tapenum, unsigned int len)
4679 unsigned int tuplen = len - sizeof(unsigned int);
4684 stup->datum1 = (Datum) 0;
4685 stup->isnull1 = true;
4688 else if (!state->tuples)
4690 Assert(tuplen == sizeof(Datum));
4691 LogicalTapeReadExact(state->tapeset, tapenum,
4692 &stup->datum1, tuplen);
4693 stup->isnull1 = false;
4698 void *raddr = readtup_alloc(state, tapenum, tuplen);
4700 LogicalTapeReadExact(state->tapeset, tapenum,
4702 stup->datum1 = PointerGetDatum(raddr);
4703 stup->isnull1 = false;
4704 stup->tuple = raddr;
4707 if (state->randomAccess) /* need trailing length word? */
4708 LogicalTapeReadExact(state->tapeset, tapenum,
4709 &tuplen, sizeof(tuplen));
4713 * Convenience routine to free a tuple previously loaded into sort memory
4716 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
4718 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));