1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. Historically, we divided the input into sorted runs
15 * using replacement selection, in the form of a priority tree implemented
16 * as a heap (essentially his Algorithm 5.2.3H), but now we only do that
17 * for the first run, and only if the run would otherwise end up being very
18 * short. We merge the runs using polyphase merge, Knuth's Algorithm
19 * 5.4.2D. The logical "tapes" used by Algorithm D are implemented by
20 * logtape.c, which avoids space wastage by recycling disk space as soon
21 * as each block is read from its "tape".
23 * We do not use Knuth's recommended data structure (Algorithm 5.4.1R) for
24 * the replacement selection, because it uses a fixed number of records
25 * in memory at all times. Since we are dealing with tuples that may vary
26 * considerably in size, we want to be able to vary the number of records
27 * kept in memory to ensure full utilization of the allowed sort memory
28 * space. So, we keep the tuples in a variable-size heap, with the next
29 * record to go out at the top of the heap. Like Algorithm 5.4.1R, each
30 * record is stored with the run number that it must go into, and we use
31 * (run number, key) as the ordering key for the heap. When the run number
32 * at the top of the heap changes, we know that no more records of the prior
33 * run are left in the heap. Note that there are in practice only ever two
34 * distinct run numbers, because since PostgreSQL 9.6, we only use
35 * replacement selection to form the first run.
37 * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small
38 * customizations) is only used with the aim of producing just one run,
39 * thereby avoiding all merging. Only the first run can use replacement
40 * selection, which is why there are now only two possible valid run
41 * numbers, and why heapification is customized to not distinguish between
42 * tuples in the second run (those will be quicksorted). We generally
43 * prefer a simple hybrid sort-merge strategy, where runs are sorted in much
44 * the same way as the entire input of an internal sort is sorted (using
45 * qsort()). The replacement_sort_tuples GUC controls the limited remaining
46 * use of replacement selection for the first run.
48 * There are several reasons to favor a hybrid sort-merge strategy.
49 * Maintaining a priority tree/heap has poor CPU cache characteristics.
50 * Furthermore, the growth in main memory sizes has greatly diminished the
51 * value of having runs that are larger than available memory, even in the
52 * case where there is partially sorted input and runs can be made far
53 * larger by using a heap. In most cases, a single-pass merge step is all
54 * that is required even when runs are no larger than available memory.
55 * Avoiding multiple merge passes was traditionally considered to be the
56 * major advantage of using replacement selection.
58 * The approximate amount of memory allowed for any one sort operation
59 * is specified in kilobytes by the caller (most pass work_mem). Initially,
60 * we absorb tuples and simply store them in an unsorted array as long as
61 * we haven't exceeded workMem. If we reach the end of the input without
62 * exceeding workMem, we sort the array using qsort() and subsequently return
63 * tuples just by scanning the tuple array sequentially. If we do exceed
64 * workMem, we begin to emit tuples into sorted runs in temporary tapes.
65 * When tuples are dumped in batch after quicksorting, we begin a new run
66 * with a new output tape (selected per Algorithm D). After the end of the
67 * input is reached, we dump out remaining tuples in memory into a final run
68 * (or two, when replacement selection is still used), then merge the runs
71 * When merging runs, we use a heap containing just the frontmost tuple from
72 * each source run; we repeatedly output the smallest tuple and replace it
73 * with the next tuple from its source tape (if any). When the heap empties,
74 * the merge is complete. The basic merge algorithm thus needs very little
75 * memory --- only M tuples for an M-way merge, and M is constrained to a
76 * small number. However, we can still make good use of our full workMem
77 * allocation by pre-reading additional blocks from each source tape. Without
78 * prereading, our access pattern to the temporary file would be very erratic;
79 * on average we'd read one block from each of M source tapes during the same
80 * time that we're writing M blocks to the output tape, so there is no
81 * sequentiality of access at all, defeating the read-ahead methods used by
82 * most Unix kernels. Worse, the output tape gets written into a very random
83 * sequence of blocks of the temp file, ensuring that things will be even
84 * worse when it comes time to read that tape. A straightforward merge pass
85 * thus ends up doing a lot of waiting for disk seeks. We can improve matters
86 * by prereading from each source tape sequentially, loading about workMem/M
87 * bytes from each tape in turn, and making the sequential blocks immediately
88 * available for reuse. This approach helps to localize both read and write
89 * accesses. The pre-reading is handled by logtape.c, we just tell it how
90 * much memory to use for the buffers.
92 * When the caller requests random access to the sort result, we form
93 * the final sorted run on a logical tape which is then "frozen", so
94 * that we can access it randomly. When the caller does not need random
95 * access, we return from tuplesort_performsort() as soon as we are down
96 * to one run per logical tape. The final merge is then performed
97 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
98 * saves one cycle of writing all the data out to disk and reading it in.
100 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
101 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
102 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
103 * tape drives are expensive beasts, and in particular that there will always
104 * be many more runs than tape drives. In our implementation a "tape drive"
105 * doesn't cost much more than a few Kb of memory buffers, so we can afford
106 * to have lots of them. In particular, if we can have as many tape drives
107 * as sorted runs, we can eliminate any repeated I/O at all. In the current
108 * code we determine the number of tapes M on the basis of workMem: we want
109 * workMem/M to be large enough that we read a fair amount of data each time
110 * we preread from a tape, so as to maintain the locality of access described
111 * above. Nonetheless, with large workMem we can have many tapes (but not
112 * too many -- see the comments in tuplesort_merge_order).
115 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
116 * Portions Copyright (c) 1994, Regents of the University of California
119 * src/backend/utils/sort/tuplesort.c
121 *-------------------------------------------------------------------------
124 #include "postgres.h"
128 #include "access/htup_details.h"
129 #include "access/nbtree.h"
130 #include "catalog/index.h"
131 #include "catalog/pg_am.h"
132 #include "commands/tablespace.h"
133 #include "executor/executor.h"
134 #include "miscadmin.h"
135 #include "pg_trace.h"
136 #include "utils/datum.h"
137 #include "utils/logtape.h"
138 #include "utils/lsyscache.h"
139 #include "utils/memutils.h"
140 #include "utils/pg_rusage.h"
141 #include "utils/rel.h"
142 #include "utils/sortsupport.h"
143 #include "utils/tuplesort.h"
146 /* sort-type codes for sort__start probes */
150 #define CLUSTER_SORT 3
154 bool trace_sort = false;
157 #ifdef DEBUG_BOUNDED_SORT
158 bool optimize_bounded_sort = true;
163 * The objects we actually sort are SortTuple structs. These contain
164 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
165 * which is a separate palloc chunk --- we assume it is just one chunk and
166 * can be freed by a simple pfree() (except during merge, when we use a
167 * simple slab allocator). SortTuples also contain the tuple's first key
168 * column in Datum/nullflag format, and an index integer.
170 * Storing the first key column lets us save heap_getattr or index_getattr
171 * calls during tuple comparisons. We could extract and save all the key
172 * columns not just the first, but this would increase code complexity and
173 * overhead, and wouldn't actually save any comparison cycles in the common
174 * case where the first key determines the comparison result. Note that
175 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
177 * There is one special case: when the sort support infrastructure provides an
178 * "abbreviated key" representation, where the key is (typically) a pass by
179 * value proxy for a pass by reference type. In this case, the abbreviated key
180 * is stored in datum1 in place of the actual first key column.
182 * When sorting single Datums, the data value is represented directly by
183 * datum1/isnull1 for pass by value types (or null values). If the datatype is
184 * pass-by-reference and isnull1 is false, then "tuple" points to a separately
185 * palloc'd data value, otherwise "tuple" is NULL. The value of datum1 is then
186 * either the same pointer as "tuple", or is an abbreviated key value as
187 * described above. Accordingly, "tuple" is always used in preference to
188 * datum1 as the authoritative value for pass-by-reference cases.
190 * While building initial runs, tupindex holds the tuple's run number.
191 * Historically, the run number could meaningfully distinguish many runs, but
192 * it now only distinguishes RUN_FIRST and HEAP_RUN_NEXT, since replacement
193 * selection is always abandoned after the first run; no other run number
194 * should be represented here. During merge passes, we re-use it to hold the
195 * input tape number that each tuple in the heap was read from. tupindex goes
196 * unused if the sort occurs entirely in memory.
200 void *tuple; /* the tuple itself */
201 Datum datum1; /* value of first key column */
202 bool isnull1; /* is first key column NULL? */
203 int tupindex; /* see notes above */
207 * During merge, we use a pre-allocated set of fixed-size slots to hold
208 * tuples. To avoid palloc/pfree overhead.
210 * Merge doesn't require a lot of memory, so we can afford to waste some,
211 * by using gratuitously-sized slots. If a tuple is larger than 1 kB, the
212 * palloc() overhead is not significant anymore.
214 * 'nextfree' is valid when this chunk is in the free list. When in use, the
215 * slot holds a tuple.
217 #define SLAB_SLOT_SIZE 1024
219 typedef union SlabSlot
221 union SlabSlot *nextfree;
222 char buffer[SLAB_SLOT_SIZE];
226 * Possible states of a Tuplesort object. These denote the states that
227 * persist between calls of Tuplesort routines.
231 TSS_INITIAL, /* Loading tuples; still within memory limit */
232 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
233 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
234 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
235 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
236 TSS_FINALMERGE /* Performing final merge on-the-fly */
240 * Parameters for calculation of number of tapes to use --- see inittapes()
241 * and tuplesort_merge_order().
243 * In this calculation we assume that each tape will cost us about 3 blocks
244 * worth of buffer space (which is an underestimate for very large data
245 * volumes, but it's probably close enough --- see logtape.c).
247 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
248 * tape during a preread cycle (see discussion at top of file).
250 #define MINORDER 6 /* minimum merge order */
251 #define MAXORDER 500 /* maximum merge order */
252 #define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
253 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
256 * Run numbers, used during external sort operations.
258 * HEAP_RUN_NEXT is only used for SortTuple.tupindex, never state.currentRun.
261 #define HEAP_RUN_NEXT INT_MAX
264 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
265 Tuplesortstate *state);
268 * Private state of a Tuplesort operation.
270 struct Tuplesortstate
272 TupSortStatus status; /* enumerated value as shown above */
273 int nKeys; /* number of columns in sort key */
274 bool randomAccess; /* did caller request random access? */
275 bool bounded; /* did caller specify a maximum number of
276 * tuples to return? */
277 bool boundUsed; /* true if we made use of a bounded heap */
278 int bound; /* if bounded, the maximum number of tuples */
279 bool tuples; /* Can SortTuple.tuple ever be set? */
280 int64 availMem; /* remaining memory available, in bytes */
281 int64 allowedMem; /* total memory allowed, in bytes */
282 int maxTapes; /* number of tapes (Knuth's T) */
283 int tapeRange; /* maxTapes-1 (Knuth's P) */
284 MemoryContext sortcontext; /* memory context holding most sort data */
285 MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
286 LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
289 * These function pointers decouple the routines that must know what kind
290 * of tuple we are sorting from the routines that don't need to know it.
291 * They are set up by the tuplesort_begin_xxx routines.
293 * Function to compare two tuples; result is per qsort() convention, ie:
294 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
295 * qsort_arg_comparator.
297 SortTupleComparator comparetup;
300 * Function to copy a supplied input tuple into palloc'd space and set up
301 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
302 * state->availMem must be decreased by the amount of space used for the
303 * tuple copy (note the SortTuple struct itself is not counted).
305 void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
308 * Function to write a stored tuple onto tape. The representation of the
309 * tuple on tape need not be the same as it is in memory; requirements on
310 * the tape representation are given below. Unless the slab allocator is
311 * used, after writing the tuple, pfree() the out-of-line data (not the
312 * SortTuple struct!), and increase state->availMem by the amount of
313 * memory space thereby released.
315 void (*writetup) (Tuplesortstate *state, int tapenum,
319 * Function to read a stored tuple from tape back into memory. 'len' is
320 * the already-read length of the stored tuple. The tuple is allocated
321 * from the slab memory arena, or is palloc'd, see readtup_alloc().
323 void (*readtup) (Tuplesortstate *state, SortTuple *stup,
324 int tapenum, unsigned int len);
327 * This array holds the tuples now in sort memory. If we are in state
328 * INITIAL, the tuples are in no particular order; if we are in state
329 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
330 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
331 * H. In state SORTEDONTAPE, the array is not used.
333 SortTuple *memtuples; /* array of SortTuple structs */
334 int memtupcount; /* number of tuples currently present */
335 int memtupsize; /* allocated length of memtuples array */
336 bool growmemtuples; /* memtuples' growth still underway? */
339 * Memory for tuples is sometimes allocated using a simple slab allocator,
340 * rather than with palloc(). Currently, we switch to slab allocation
341 * when we start merging. Merging only needs to keep a small, fixed
342 * number of tuples in memory at any time, so we can avoid the
343 * palloc/pfree overhead by recycling a fixed number of fixed-size slots
344 * to hold the tuples.
346 * For the slab, we use one large allocation, divided into SLAB_SLOT_SIZE
347 * slots. The allocation is sized to have one slot per tape, plus one
348 * additional slot. We need that many slots to hold all the tuples kept
349 * in the heap during merge, plus the one we have last returned from the
350 * sort, with tuplesort_gettuple.
352 * Initially, all the slots are kept in a linked list of free slots. When
353 * a tuple is read from a tape, it is put to the next available slot, if
354 * it fits. If the tuple is larger than SLAB_SLOT_SIZE, it is palloc'd
357 * When we're done processing a tuple, we return the slot back to the free
358 * list, or pfree() if it was palloc'd. We know that a tuple was
359 * allocated from the slab, if its pointer value is between
360 * slabMemoryBegin and -End.
362 * When the slab allocator is used, the USEMEM/LACKMEM mechanism of
363 * tracking memory usage is not used.
365 bool slabAllocatorUsed;
367 char *slabMemoryBegin; /* beginning of slab memory arena */
368 char *slabMemoryEnd; /* end of slab memory arena */
369 SlabSlot *slabFreeHead; /* head of free list */
371 /* Buffer size to use for reading input tapes, during merge. */
372 size_t read_buffer_size;
375 * When we return a tuple to the caller in tuplesort_gettuple_XXX, that
376 * came from a tape (that is, in TSS_SORTEDONTAPE or TSS_FINALMERGE
377 * modes), we remember the tuple in 'lastReturnedTuple', so that we can
378 * recycle the memory on next gettuple call.
380 void *lastReturnedTuple;
383 * While building initial runs, this indicates if the replacement
384 * selection strategy is in use. When it isn't, then a simple hybrid
385 * sort-merge strategy is in use instead (runs are quicksorted).
390 * While building initial runs, this is the current output run number
391 * (starting at RUN_FIRST). Afterwards, it is the number of initial runs
397 * Unless otherwise noted, all pointer variables below are pointers to
398 * arrays of length maxTapes, holding per-tape data.
402 * This variable is only used during merge passes. mergeactive[i] is true
403 * if we are reading an input run from (actual) tape number i and have not
404 * yet exhausted that run.
406 bool *mergeactive; /* active input run source? */
409 * Variables for Algorithm D. Note that destTape is a "logical" tape
410 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
411 * "logical" and "actual" tape numbers straight!
413 int Level; /* Knuth's l */
414 int destTape; /* current output tape (Knuth's j, less 1) */
415 int *tp_fib; /* Target Fibonacci run counts (A[]) */
416 int *tp_runs; /* # of real runs on each tape */
417 int *tp_dummy; /* # of dummy runs for each tape (D[]) */
418 int *tp_tapenum; /* Actual tape numbers (TAPE[]) */
419 int activeTapes; /* # of active input tapes in merge pass */
422 * These variables are used after completion of sorting to keep track of
423 * the next tuple to return. (In the tape case, the tape's current read
424 * position is also critical state.)
426 int result_tape; /* actual tape number of finished output */
427 int current; /* array index (only used if SORTEDINMEM) */
428 bool eof_reached; /* reached EOF (needed for cursors) */
430 /* markpos_xxx holds marked position for mark and restore */
431 long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
432 int markpos_offset; /* saved "current", or offset in tape block */
433 bool markpos_eof; /* saved "eof_reached" */
436 * The sortKeys variable is used by every case other than the hash index
437 * case; it is set by tuplesort_begin_xxx. tupDesc is only used by the
438 * MinimalTuple and CLUSTER routines, though.
441 SortSupport sortKeys; /* array of length nKeys */
444 * This variable is shared by the single-key MinimalTuple case and the
445 * Datum case (which both use qsort_ssup()). Otherwise it's NULL.
450 * Additional state for managing "abbreviated key" sortsupport routines
451 * (which currently may be used by all cases except the hash index case).
452 * Tracks the intervals at which the optimization's effectiveness is
455 int64 abbrevNext; /* Tuple # at which to next check
459 * These variables are specific to the CLUSTER case; they are set by
460 * tuplesort_begin_cluster.
462 IndexInfo *indexInfo; /* info about index being used for reference */
463 EState *estate; /* for evaluating index expressions */
466 * These variables are specific to the IndexTuple case; they are set by
467 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
469 Relation heapRel; /* table the index is being built on */
470 Relation indexRel; /* index being built */
472 /* These are specific to the index_btree subcase: */
473 bool enforceUnique; /* complain if we find duplicate tuples */
475 /* These are specific to the index_hash subcase: */
476 uint32 hash_mask; /* mask for sortable part of hash code */
479 * These variables are specific to the Datum case; they are set by
480 * tuplesort_begin_datum and used only by the DatumTuple routines.
483 /* we need typelen in order to know how to copy the Datums. */
487 * Resource snapshot for time of sort start.
495 * Is the given tuple allocated from the slab memory arena?
497 #define IS_SLAB_SLOT(state, tuple) \
498 ((char *) (tuple) >= (state)->slabMemoryBegin && \
499 (char *) (tuple) < (state)->slabMemoryEnd)
502 * Return the given tuple to the slab memory free list, or free it
503 * if it was palloc'd.
505 #define RELEASE_SLAB_SLOT(state, tuple) \
507 SlabSlot *buf = (SlabSlot *) tuple; \
509 if (IS_SLAB_SLOT((state), buf)) \
511 buf->nextfree = (state)->slabFreeHead; \
512 (state)->slabFreeHead = buf; \
517 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
518 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
519 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
520 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
521 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
522 #define USEMEM(state,amt) ((state)->availMem -= (amt))
523 #define FREEMEM(state,amt) ((state)->availMem += (amt))
526 * NOTES about on-tape representation of tuples:
528 * We require the first "unsigned int" of a stored tuple to be the total size
529 * on-tape of the tuple, including itself (so it is never zero; an all-zero
530 * unsigned int is used to delimit runs). The remainder of the stored tuple
531 * may or may not match the in-memory representation of the tuple ---
532 * any conversion needed is the job of the writetup and readtup routines.
534 * If state->randomAccess is true, then the stored representation of the
535 * tuple must be followed by another "unsigned int" that is a copy of the
536 * length --- so the total tape space used is actually sizeof(unsigned int)
537 * more than the stored length value. This allows read-backwards. When
538 * randomAccess is not true, the write/read routines may omit the extra
541 * writetup is expected to write both length words as well as the tuple
542 * data. When readtup is called, the tape is positioned just after the
543 * front length word; readtup must read the tuple data and advance past
544 * the back length word (if present).
546 * The write/read routines can make use of the tuple description data
547 * stored in the Tuplesortstate record, if needed. They are also expected
548 * to adjust state->availMem by the amount of memory space (not tape space!)
549 * released or consumed. There is no error return from either writetup
550 * or readtup; they should ereport() on failure.
553 * NOTES about memory consumption calculations:
555 * We count space allocated for tuples against the workMem limit, plus
556 * the space used by the variable-size memtuples array. Fixed-size space
557 * is not counted; it's small enough to not be interesting.
559 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
560 * rather than the originally-requested size. This is important since
561 * palloc can add substantial overhead. It's not a complete answer since
562 * we won't count any wasted space in palloc allocation blocks, but it's
563 * a lot better than what we were doing before 7.3. As of 9.6, a
564 * separate memory context is used for caller passed tuples. Resetting
565 * it at certain key increments significantly ameliorates fragmentation.
566 * Note that this places a responsibility on readtup and copytup routines
567 * to use the right memory context for these tuples (and to not use the
568 * reset context for anything whose lifetime needs to span multiple
569 * external sort runs).
572 /* When using this macro, beware of double evaluation of len */
573 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
575 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
576 elog(ERROR, "unexpected end of data"); \
580 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
581 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
582 static bool consider_abort_common(Tuplesortstate *state);
583 static bool useselection(Tuplesortstate *state);
584 static void inittapes(Tuplesortstate *state);
585 static void selectnewtape(Tuplesortstate *state);
586 static void init_slab_allocator(Tuplesortstate *state, int numSlots);
587 static void mergeruns(Tuplesortstate *state);
588 static void mergeonerun(Tuplesortstate *state);
589 static void beginmerge(Tuplesortstate *state);
590 static bool mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup);
591 static void dumptuples(Tuplesortstate *state, bool alltuples);
592 static void dumpbatch(Tuplesortstate *state, bool alltuples);
593 static void make_bounded_heap(Tuplesortstate *state);
594 static void sort_bounded_heap(Tuplesortstate *state);
595 static void tuplesort_sort_memtuples(Tuplesortstate *state);
596 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
598 static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
600 static void tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex);
601 static void reversedirection(Tuplesortstate *state);
602 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
603 static void markrunend(Tuplesortstate *state, int tapenum);
604 static void *readtup_alloc(Tuplesortstate *state, Size tuplen);
605 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
606 Tuplesortstate *state);
607 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
608 static void writetup_heap(Tuplesortstate *state, int tapenum,
610 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
611 int tapenum, unsigned int len);
612 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
613 Tuplesortstate *state);
614 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
615 static void writetup_cluster(Tuplesortstate *state, int tapenum,
617 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
618 int tapenum, unsigned int len);
619 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
620 Tuplesortstate *state);
621 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
622 Tuplesortstate *state);
623 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
624 static void writetup_index(Tuplesortstate *state, int tapenum,
626 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
627 int tapenum, unsigned int len);
628 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
629 Tuplesortstate *state);
630 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
631 static void writetup_datum(Tuplesortstate *state, int tapenum,
633 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
634 int tapenum, unsigned int len);
635 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
638 * Special versions of qsort just for SortTuple objects. qsort_tuple() sorts
639 * any variant of SortTuples, using the appropriate comparetup function.
640 * qsort_ssup() is specialized for the case where the comparetup function
641 * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
644 #include "qsort_tuple.c"
648 * tuplesort_begin_xxx
650 * Initialize for a tuple sort operation.
652 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
653 * zero or more times, then call tuplesort_performsort when all the tuples
654 * have been supplied. After performsort, retrieve the tuples in sorted
655 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
656 * access was requested, rescan, markpos, and restorepos can also be called.)
657 * Call tuplesort_end to terminate the operation and release memory/disk space.
659 * Each variant of tuplesort_begin has a workMem parameter specifying the
660 * maximum number of kilobytes of RAM to use before spilling data to disk.
661 * (The normal value of this parameter is work_mem, but some callers use
662 * other values.) Each variant also has a randomAccess parameter specifying
663 * whether the caller needs non-sequential access to the sort result.
666 static Tuplesortstate *
667 tuplesort_begin_common(int workMem, bool randomAccess)
669 Tuplesortstate *state;
670 MemoryContext sortcontext;
671 MemoryContext tuplecontext;
672 MemoryContext oldcontext;
675 * Create a working memory context for this sort operation. All data
676 * needed by the sort will live inside this context.
678 sortcontext = AllocSetContextCreate(CurrentMemoryContext,
680 ALLOCSET_DEFAULT_SIZES);
683 * Caller tuple (e.g. IndexTuple) memory context.
685 * A dedicated child context used exclusively for caller passed tuples
686 * eases memory management. Resetting at key points reduces
687 * fragmentation. Note that the memtuples array of SortTuples is allocated
688 * in the parent context, not this context, because there is no need to
689 * free memtuples early.
691 tuplecontext = AllocSetContextCreate(sortcontext,
693 ALLOCSET_DEFAULT_SIZES);
696 * Make the Tuplesortstate within the per-sort context. This way, we
697 * don't need a separate pfree() operation for it at shutdown.
699 oldcontext = MemoryContextSwitchTo(sortcontext);
701 state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
705 pg_rusage_init(&state->ru_start);
708 state->status = TSS_INITIAL;
709 state->randomAccess = randomAccess;
710 state->bounded = false;
711 state->tuples = true;
712 state->boundUsed = false;
713 state->allowedMem = workMem * (int64) 1024;
714 state->availMem = state->allowedMem;
715 state->sortcontext = sortcontext;
716 state->tuplecontext = tuplecontext;
717 state->tapeset = NULL;
719 state->memtupcount = 0;
722 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
723 * see comments in grow_memtuples().
725 state->memtupsize = Max(1024,
726 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
728 state->growmemtuples = true;
729 state->slabAllocatorUsed = false;
730 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
732 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
734 /* workMem must be large enough for the minimal memtuples array */
736 elog(ERROR, "insufficient memory allowed for sort");
738 state->currentRun = RUN_FIRST;
741 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
742 * inittapes(), if needed
745 state->result_tape = -1; /* flag that result tape has not been formed */
747 MemoryContextSwitchTo(oldcontext);
753 tuplesort_begin_heap(TupleDesc tupDesc,
754 int nkeys, AttrNumber *attNums,
755 Oid *sortOperators, Oid *sortCollations,
756 bool *nullsFirstFlags,
757 int workMem, bool randomAccess)
759 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
760 MemoryContext oldcontext;
763 oldcontext = MemoryContextSwitchTo(state->sortcontext);
765 AssertArg(nkeys > 0);
770 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
771 nkeys, workMem, randomAccess ? 't' : 'f');
774 state->nKeys = nkeys;
776 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
777 false, /* no unique check */
782 state->comparetup = comparetup_heap;
783 state->copytup = copytup_heap;
784 state->writetup = writetup_heap;
785 state->readtup = readtup_heap;
787 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
788 state->abbrevNext = 10;
790 /* Prepare SortSupport data for each column */
791 state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
793 for (i = 0; i < nkeys; i++)
795 SortSupport sortKey = state->sortKeys + i;
797 AssertArg(attNums[i] != 0);
798 AssertArg(sortOperators[i] != 0);
800 sortKey->ssup_cxt = CurrentMemoryContext;
801 sortKey->ssup_collation = sortCollations[i];
802 sortKey->ssup_nulls_first = nullsFirstFlags[i];
803 sortKey->ssup_attno = attNums[i];
804 /* Convey if abbreviation optimization is applicable in principle */
805 sortKey->abbreviate = (i == 0);
807 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
811 * The "onlyKey" optimization cannot be used with abbreviated keys, since
812 * tie-breaker comparisons may be required. Typically, the optimization
813 * is only of value to pass-by-value types anyway, whereas abbreviated
814 * keys are typically only of value to pass-by-reference types.
816 if (nkeys == 1 && !state->sortKeys->abbrev_converter)
817 state->onlyKey = state->sortKeys;
819 MemoryContextSwitchTo(oldcontext);
825 tuplesort_begin_cluster(TupleDesc tupDesc,
827 int workMem, bool randomAccess)
829 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
830 ScanKey indexScanKey;
831 MemoryContext oldcontext;
834 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
836 oldcontext = MemoryContextSwitchTo(state->sortcontext);
841 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
842 RelationGetNumberOfAttributes(indexRel),
843 workMem, randomAccess ? 't' : 'f');
846 state->nKeys = RelationGetNumberOfAttributes(indexRel);
848 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
849 false, /* no unique check */
854 state->comparetup = comparetup_cluster;
855 state->copytup = copytup_cluster;
856 state->writetup = writetup_cluster;
857 state->readtup = readtup_cluster;
858 state->abbrevNext = 10;
860 state->indexInfo = BuildIndexInfo(indexRel);
862 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
864 indexScanKey = _bt_mkscankey_nodata(indexRel);
866 if (state->indexInfo->ii_Expressions != NULL)
868 TupleTableSlot *slot;
869 ExprContext *econtext;
872 * We will need to use FormIndexDatum to evaluate the index
873 * expressions. To do that, we need an EState, as well as a
874 * TupleTableSlot to put the table tuples into. The econtext's
875 * scantuple has to point to that slot, too.
877 state->estate = CreateExecutorState();
878 slot = MakeSingleTupleTableSlot(tupDesc);
879 econtext = GetPerTupleExprContext(state->estate);
880 econtext->ecxt_scantuple = slot;
883 /* Prepare SortSupport data for each column */
884 state->sortKeys = (SortSupport) palloc0(state->nKeys *
885 sizeof(SortSupportData));
887 for (i = 0; i < state->nKeys; i++)
889 SortSupport sortKey = state->sortKeys + i;
890 ScanKey scanKey = indexScanKey + i;
893 sortKey->ssup_cxt = CurrentMemoryContext;
894 sortKey->ssup_collation = scanKey->sk_collation;
895 sortKey->ssup_nulls_first =
896 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
897 sortKey->ssup_attno = scanKey->sk_attno;
898 /* Convey if abbreviation optimization is applicable in principle */
899 sortKey->abbreviate = (i == 0);
901 AssertState(sortKey->ssup_attno != 0);
903 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
904 BTGreaterStrategyNumber : BTLessStrategyNumber;
906 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
909 _bt_freeskey(indexScanKey);
911 MemoryContextSwitchTo(oldcontext);
917 tuplesort_begin_index_btree(Relation heapRel,
920 int workMem, bool randomAccess)
922 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
923 ScanKey indexScanKey;
924 MemoryContext oldcontext;
927 oldcontext = MemoryContextSwitchTo(state->sortcontext);
932 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
933 enforceUnique ? 't' : 'f',
934 workMem, randomAccess ? 't' : 'f');
937 state->nKeys = RelationGetNumberOfAttributes(indexRel);
939 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
945 state->comparetup = comparetup_index_btree;
946 state->copytup = copytup_index;
947 state->writetup = writetup_index;
948 state->readtup = readtup_index;
949 state->abbrevNext = 10;
951 state->heapRel = heapRel;
952 state->indexRel = indexRel;
953 state->enforceUnique = enforceUnique;
955 indexScanKey = _bt_mkscankey_nodata(indexRel);
956 state->nKeys = RelationGetNumberOfAttributes(indexRel);
958 /* Prepare SortSupport data for each column */
959 state->sortKeys = (SortSupport) palloc0(state->nKeys *
960 sizeof(SortSupportData));
962 for (i = 0; i < state->nKeys; i++)
964 SortSupport sortKey = state->sortKeys + i;
965 ScanKey scanKey = indexScanKey + i;
968 sortKey->ssup_cxt = CurrentMemoryContext;
969 sortKey->ssup_collation = scanKey->sk_collation;
970 sortKey->ssup_nulls_first =
971 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
972 sortKey->ssup_attno = scanKey->sk_attno;
973 /* Convey if abbreviation optimization is applicable in principle */
974 sortKey->abbreviate = (i == 0);
976 AssertState(sortKey->ssup_attno != 0);
978 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
979 BTGreaterStrategyNumber : BTLessStrategyNumber;
981 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
984 _bt_freeskey(indexScanKey);
986 MemoryContextSwitchTo(oldcontext);
992 tuplesort_begin_index_hash(Relation heapRel,
995 int workMem, bool randomAccess)
997 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
998 MemoryContext oldcontext;
1000 oldcontext = MemoryContextSwitchTo(state->sortcontext);
1005 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
1007 workMem, randomAccess ? 't' : 'f');
1010 state->nKeys = 1; /* Only one sort column, the hash code */
1012 state->comparetup = comparetup_index_hash;
1013 state->copytup = copytup_index;
1014 state->writetup = writetup_index;
1015 state->readtup = readtup_index;
1017 state->heapRel = heapRel;
1018 state->indexRel = indexRel;
1020 state->hash_mask = hash_mask;
1022 MemoryContextSwitchTo(oldcontext);
1028 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
1029 bool nullsFirstFlag,
1030 int workMem, bool randomAccess)
1032 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
1033 MemoryContext oldcontext;
1037 oldcontext = MemoryContextSwitchTo(state->sortcontext);
1042 "begin datum sort: workMem = %d, randomAccess = %c",
1043 workMem, randomAccess ? 't' : 'f');
1046 state->nKeys = 1; /* always a one-column sort */
1048 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
1049 false, /* no unique check */
1054 state->comparetup = comparetup_datum;
1055 state->copytup = copytup_datum;
1056 state->writetup = writetup_datum;
1057 state->readtup = readtup_datum;
1058 state->abbrevNext = 10;
1060 state->datumType = datumType;
1062 /* lookup necessary attributes of the datum type */
1063 get_typlenbyval(datumType, &typlen, &typbyval);
1064 state->datumTypeLen = typlen;
1065 state->tuples = !typbyval;
1067 /* Prepare SortSupport data */
1068 state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
1070 state->sortKeys->ssup_cxt = CurrentMemoryContext;
1071 state->sortKeys->ssup_collation = sortCollation;
1072 state->sortKeys->ssup_nulls_first = nullsFirstFlag;
1075 * Abbreviation is possible here only for by-reference types. In theory,
1076 * a pass-by-value datatype could have an abbreviated form that is cheaper
1077 * to compare. In a tuple sort, we could support that, because we can
1078 * always extract the original datum from the tuple is needed. Here, we
1079 * can't, because a datum sort only stores a single copy of the datum; the
1080 * "tuple" field of each sortTuple is NULL.
1082 state->sortKeys->abbreviate = !typbyval;
1084 PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
1087 * The "onlyKey" optimization cannot be used with abbreviated keys, since
1088 * tie-breaker comparisons may be required. Typically, the optimization
1089 * is only of value to pass-by-value types anyway, whereas abbreviated
1090 * keys are typically only of value to pass-by-reference types.
1092 if (!state->sortKeys->abbrev_converter)
1093 state->onlyKey = state->sortKeys;
1095 MemoryContextSwitchTo(oldcontext);
1101 * tuplesort_set_bound
1103 * Advise tuplesort that at most the first N result tuples are required.
1105 * Must be called before inserting any tuples. (Actually, we could allow it
1106 * as long as the sort hasn't spilled to disk, but there seems no need for
1107 * delayed calls at the moment.)
1109 * This is a hint only. The tuplesort may still return more tuples than
1113 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
1115 /* Assert we're called before loading any tuples */
1116 Assert(state->status == TSS_INITIAL);
1117 Assert(state->memtupcount == 0);
1118 Assert(!state->bounded);
1120 #ifdef DEBUG_BOUNDED_SORT
1121 /* Honor GUC setting that disables the feature (for easy testing) */
1122 if (!optimize_bounded_sort)
1126 /* We want to be able to compute bound * 2, so limit the setting */
1127 if (bound > (int64) (INT_MAX / 2))
1130 state->bounded = true;
1131 state->bound = (int) bound;
1134 * Bounded sorts are not an effective target for abbreviated key
1135 * optimization. Disable by setting state to be consistent with no
1136 * abbreviation support.
1138 state->sortKeys->abbrev_converter = NULL;
1139 if (state->sortKeys->abbrev_full_comparator)
1140 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
1142 /* Not strictly necessary, but be tidy */
1143 state->sortKeys->abbrev_abort = NULL;
1144 state->sortKeys->abbrev_full_comparator = NULL;
1150 * Release resources and clean up.
1152 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1153 * pointing to garbage. Be careful not to attempt to use or free such
1154 * pointers afterwards!
1157 tuplesort_end(Tuplesortstate *state)
1159 /* context swap probably not needed, but let's be safe */
1160 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1166 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1168 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1172 * Delete temporary "tape" files, if any.
1174 * Note: want to include this in reported total cost of sort, hence need
1175 * for two #ifdef TRACE_SORT sections.
1178 LogicalTapeSetClose(state->tapeset);
1184 elog(LOG, "external sort ended, %ld disk blocks used: %s",
1185 spaceUsed, pg_rusage_show(&state->ru_start));
1187 elog(LOG, "internal sort ended, %ld KB used: %s",
1188 spaceUsed, pg_rusage_show(&state->ru_start));
1191 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1195 * If you disabled TRACE_SORT, you can still probe sort__done, but you
1196 * ain't getting space-used stats.
1198 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1201 /* Free any execution state created for CLUSTER case */
1202 if (state->estate != NULL)
1204 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1206 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1207 FreeExecutorState(state->estate);
1210 MemoryContextSwitchTo(oldcontext);
1213 * Free the per-sort memory context, thereby releasing all working memory,
1214 * including the Tuplesortstate struct itself.
1216 MemoryContextDelete(state->sortcontext);
1220 * Grow the memtuples[] array, if possible within our memory constraint. We
1221 * must not exceed INT_MAX tuples in memory or the caller-provided memory
1222 * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
1224 * Normally, at each increment we double the size of the array. When doing
1225 * that would exceed a limit, we attempt one last, smaller increase (and then
1226 * clear the growmemtuples flag so we don't try any more). That allows us to
1227 * use memory as fully as permitted; sticking to the pure doubling rule could
1228 * result in almost half going unused. Because availMem moves around with
1229 * tuple addition/removal, we need some rule to prevent making repeated small
1230 * increases in memtupsize, which would just be useless thrashing. The
1231 * growmemtuples flag accomplishes that and also prevents useless
1232 * recalculations in this function.
1235 grow_memtuples(Tuplesortstate *state)
1238 int memtupsize = state->memtupsize;
1239 int64 memNowUsed = state->allowedMem - state->availMem;
1241 /* Forget it if we've already maxed out memtuples, per comment above */
1242 if (!state->growmemtuples)
1245 /* Select new value of memtupsize */
1246 if (memNowUsed <= state->availMem)
1249 * We've used no more than half of allowedMem; double our usage,
1250 * clamping at INT_MAX tuples.
1252 if (memtupsize < INT_MAX / 2)
1253 newmemtupsize = memtupsize * 2;
1256 newmemtupsize = INT_MAX;
1257 state->growmemtuples = false;
1263 * This will be the last increment of memtupsize. Abandon doubling
1264 * strategy and instead increase as much as we safely can.
1266 * To stay within allowedMem, we can't increase memtupsize by more
1267 * than availMem / sizeof(SortTuple) elements. In practice, we want
1268 * to increase it by considerably less, because we need to leave some
1269 * space for the tuples to which the new array slots will refer. We
1270 * assume the new tuples will be about the same size as the tuples
1271 * we've already seen, and thus we can extrapolate from the space
1272 * consumption so far to estimate an appropriate new size for the
1273 * memtuples array. The optimal value might be higher or lower than
1274 * this estimate, but it's hard to know that in advance. We again
1275 * clamp at INT_MAX tuples.
1277 * This calculation is safe against enlarging the array so much that
1278 * LACKMEM becomes true, because the memory currently used includes
1279 * the present array; thus, there would be enough allowedMem for the
1280 * new array elements even if no other memory were currently used.
1282 * We do the arithmetic in float8, because otherwise the product of
1283 * memtupsize and allowedMem could overflow. Any inaccuracy in the
1284 * result should be insignificant; but even if we computed a
1285 * completely insane result, the checks below will prevent anything
1286 * really bad from happening.
1290 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1291 if (memtupsize * grow_ratio < INT_MAX)
1292 newmemtupsize = (int) (memtupsize * grow_ratio);
1294 newmemtupsize = INT_MAX;
1296 /* We won't make any further enlargement attempts */
1297 state->growmemtuples = false;
1300 /* Must enlarge array by at least one element, else report failure */
1301 if (newmemtupsize <= memtupsize)
1305 * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
1306 * to ensure our request won't be rejected. Note that we can easily
1307 * exhaust address space before facing this outcome. (This is presently
1308 * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1309 * don't rely on that at this distance.)
1311 if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1313 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1314 state->growmemtuples = false; /* can't grow any more */
1318 * We need to be sure that we do not cause LACKMEM to become true, else
1319 * the space management algorithm will go nuts. The code above should
1320 * never generate a dangerous request, but to be safe, check explicitly
1321 * that the array growth fits within availMem. (We could still cause
1322 * LACKMEM if the memory chunk overhead associated with the memtuples
1323 * array were to increase. That shouldn't happen because we chose the
1324 * initial array size large enough to ensure that palloc will be treating
1325 * both old and new arrays as separate chunks. But we'll check LACKMEM
1326 * explicitly below just in case.)
1328 if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1332 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1333 state->memtupsize = newmemtupsize;
1334 state->memtuples = (SortTuple *)
1335 repalloc_huge(state->memtuples,
1336 state->memtupsize * sizeof(SortTuple));
1337 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1339 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1343 /* If for any reason we didn't realloc, shut off future attempts */
1344 state->growmemtuples = false;
1349 * Accept one tuple while collecting input data for sort.
1351 * Note that the input data is always copied; the caller need not save it.
1354 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1356 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1360 * Copy the given tuple into memory we control, and decrease availMem.
1361 * Then call the common code.
1363 COPYTUP(state, &stup, (void *) slot);
1365 puttuple_common(state, &stup);
1367 MemoryContextSwitchTo(oldcontext);
1371 * Accept one tuple while collecting input data for sort.
1373 * Note that the input data is always copied; the caller need not save it.
1376 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1378 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1382 * Copy the given tuple into memory we control, and decrease availMem.
1383 * Then call the common code.
1385 COPYTUP(state, &stup, (void *) tup);
1387 puttuple_common(state, &stup);
1389 MemoryContextSwitchTo(oldcontext);
1393 * Collect one index tuple while collecting input data for sort, building
1394 * it from caller-supplied values.
1397 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1398 ItemPointer self, Datum *values,
1401 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1406 stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1407 tuple = ((IndexTuple) stup.tuple);
1408 tuple->t_tid = *self;
1409 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1410 /* set up first-column key value */
1411 original = index_getattr(tuple,
1413 RelationGetDescr(state->indexRel),
1416 MemoryContextSwitchTo(state->sortcontext);
1418 if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1421 * Store ordinary Datum representation, or NULL value. If there is a
1422 * converter it won't expect NULL values, and cost model is not
1423 * required to account for NULL, so in that case we avoid calling
1424 * converter and just set datum1 to zeroed representation (to be
1425 * consistent, and to support cheap inequality tests for NULL
1426 * abbreviated keys).
1428 stup.datum1 = original;
1430 else if (!consider_abort_common(state))
1432 /* Store abbreviated key representation */
1433 stup.datum1 = state->sortKeys->abbrev_converter(original,
1438 /* Abort abbreviation */
1441 stup.datum1 = original;
1444 * Set state to be consistent with never trying abbreviation.
1446 * Alter datum1 representation in already-copied tuples, so as to
1447 * ensure a consistent representation (current tuple was just
1448 * handled). It does not matter if some dumped tuples are already
1449 * sorted on tape, since serialized tuples lack abbreviated keys
1450 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1452 for (i = 0; i < state->memtupcount; i++)
1454 SortTuple *mtup = &state->memtuples[i];
1456 tuple = mtup->tuple;
1457 mtup->datum1 = index_getattr(tuple,
1459 RelationGetDescr(state->indexRel),
1464 puttuple_common(state, &stup);
1466 MemoryContextSwitchTo(oldcontext);
1470 * Accept one Datum while collecting input data for sort.
1472 * If the Datum is pass-by-ref type, the value will be copied.
1475 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1477 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1481 * Pass-by-value types or null values are just stored directly in
1482 * stup.datum1 (and stup.tuple is not used and set to NULL).
1484 * Non-null pass-by-reference values need to be copied into memory we
1485 * control, and possibly abbreviated. The copied value is pointed to by
1486 * stup.tuple and is treated as the canonical copy (e.g. to return via
1487 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1488 * abbreviated value if abbreviation is happening, otherwise it's
1489 * identical to stup.tuple.
1492 if (isNull || !state->tuples)
1495 * Set datum1 to zeroed representation for NULLs (to be consistent,
1496 * and to support cheap inequality tests for NULL abbreviated keys).
1498 stup.datum1 = !isNull ? val : (Datum) 0;
1499 stup.isnull1 = isNull;
1500 stup.tuple = NULL; /* no separate storage */
1501 MemoryContextSwitchTo(state->sortcontext);
1505 Datum original = datumCopy(val, false, state->datumTypeLen);
1507 stup.isnull1 = false;
1508 stup.tuple = DatumGetPointer(original);
1509 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1510 MemoryContextSwitchTo(state->sortcontext);
1512 if (!state->sortKeys->abbrev_converter)
1514 stup.datum1 = original;
1516 else if (!consider_abort_common(state))
1518 /* Store abbreviated key representation */
1519 stup.datum1 = state->sortKeys->abbrev_converter(original,
1524 /* Abort abbreviation */
1527 stup.datum1 = original;
1530 * Set state to be consistent with never trying abbreviation.
1532 * Alter datum1 representation in already-copied tuples, so as to
1533 * ensure a consistent representation (current tuple was just
1534 * handled). It does not matter if some dumped tuples are already
1535 * sorted on tape, since serialized tuples lack abbreviated keys
1536 * (TSS_BUILDRUNS state prevents control reaching here in any
1539 for (i = 0; i < state->memtupcount; i++)
1541 SortTuple *mtup = &state->memtuples[i];
1543 mtup->datum1 = PointerGetDatum(mtup->tuple);
1548 puttuple_common(state, &stup);
1550 MemoryContextSwitchTo(oldcontext);
1554 * Shared code for tuple and datum cases.
1557 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1559 switch (state->status)
1564 * Save the tuple into the unsorted array. First, grow the array
1565 * as needed. Note that we try to grow the array when there is
1566 * still one free slot remaining --- if we fail, there'll still be
1567 * room to store the incoming tuple, and then we'll switch to
1568 * tape-based operation.
1570 if (state->memtupcount >= state->memtupsize - 1)
1572 (void) grow_memtuples(state);
1573 Assert(state->memtupcount < state->memtupsize);
1575 state->memtuples[state->memtupcount++] = *tuple;
1578 * Check if it's time to switch over to a bounded heapsort. We do
1579 * so if the input tuple count exceeds twice the desired tuple
1580 * count (this is a heuristic for where heapsort becomes cheaper
1581 * than a quicksort), or if we've just filled workMem and have
1582 * enough tuples to meet the bound.
1584 * Note that once we enter TSS_BOUNDED state we will always try to
1585 * complete the sort that way. In the worst case, if later input
1586 * tuples are larger than earlier ones, this might cause us to
1587 * exceed workMem significantly.
1589 if (state->bounded &&
1590 (state->memtupcount > state->bound * 2 ||
1591 (state->memtupcount > state->bound && LACKMEM(state))))
1595 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1597 pg_rusage_show(&state->ru_start));
1599 make_bounded_heap(state);
1604 * Done if we still fit in available memory and have array slots.
1606 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1610 * Nope; time to switch to tape-based operation.
1615 * Dump tuples until we are back under the limit.
1617 dumptuples(state, false);
1623 * We don't want to grow the array here, so check whether the new
1624 * tuple can be discarded before putting it in. This should be a
1625 * good speed optimization, too, since when there are many more
1626 * input tuples than the bound, most input tuples can be discarded
1627 * with just this one comparison. Note that because we currently
1628 * have the sort direction reversed, we must check for <= not >=.
1630 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1632 /* new tuple <= top of the heap, so we can discard it */
1633 free_sort_tuple(state, tuple);
1634 CHECK_FOR_INTERRUPTS();
1638 /* discard top of heap, replacing it with the new tuple */
1639 free_sort_tuple(state, &state->memtuples[0]);
1640 tuple->tupindex = 0; /* not used */
1641 tuplesort_heap_replace_top(state, tuple, false);
1648 * Insert the tuple into the heap, with run number currentRun if
1649 * it can go into the current run, else HEAP_RUN_NEXT. The tuple
1650 * can go into the current run if it is >= the first
1651 * not-yet-output tuple. (Actually, it could go into the current
1652 * run if it is >= the most recently output tuple ... but that
1653 * would require keeping around the tuple we last output, and it's
1654 * simplest to let writetup free each tuple as soon as it's
1657 * Note that this only applies when:
1659 * - currentRun is RUN_FIRST
1661 * - Replacement selection is in use (typically it is never used).
1663 * When these two conditions are not both true, all tuples are
1664 * appended indifferently, much like the TSS_INITIAL case.
1666 * There should always be room to store the incoming tuple.
1668 Assert(!state->replaceActive || state->memtupcount > 0);
1669 if (state->replaceActive &&
1670 COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1672 Assert(state->currentRun == RUN_FIRST);
1675 * Insert tuple into first, fully heapified run.
1677 * Unlike classic replacement selection, which this module was
1678 * previously based on, only RUN_FIRST tuples are fully
1679 * heapified. Any second/next run tuples are appended
1680 * indifferently. While HEAP_RUN_NEXT tuples may be sifted
1681 * out of the way of first run tuples, COMPARETUP() will never
1682 * be called for the run's tuples during sifting (only our
1683 * initial COMPARETUP() call is required for the tuple, to
1684 * determine that the tuple does not belong in RUN_FIRST).
1686 tuple->tupindex = state->currentRun;
1687 tuplesort_heap_insert(state, tuple, true);
1692 * Tuple was determined to not belong to heapified RUN_FIRST,
1693 * or replacement selection not in play. Append the tuple to
1694 * memtuples indifferently.
1696 * dumptuples() does not trust that the next run's tuples are
1697 * heapified. Anything past the first run will always be
1698 * quicksorted even when replacement selection is initially
1699 * used. (When it's never used, every tuple still takes this
1702 tuple->tupindex = HEAP_RUN_NEXT;
1703 state->memtuples[state->memtupcount++] = *tuple;
1707 * If we are over the memory limit, dump tuples till we're under.
1709 dumptuples(state, false);
1713 elog(ERROR, "invalid tuplesort state");
1719 consider_abort_common(Tuplesortstate *state)
1721 Assert(state->sortKeys[0].abbrev_converter != NULL);
1722 Assert(state->sortKeys[0].abbrev_abort != NULL);
1723 Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1726 * Check effectiveness of abbreviation optimization. Consider aborting
1727 * when still within memory limit.
1729 if (state->status == TSS_INITIAL &&
1730 state->memtupcount >= state->abbrevNext)
1732 state->abbrevNext *= 2;
1735 * Check opclass-supplied abbreviation abort routine. It may indicate
1736 * that abbreviation should not proceed.
1738 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1743 * Finally, restore authoritative comparator, and indicate that
1744 * abbreviation is not in play by setting abbrev_converter to NULL
1746 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1747 state->sortKeys[0].abbrev_converter = NULL;
1748 /* Not strictly necessary, but be tidy */
1749 state->sortKeys[0].abbrev_abort = NULL;
1750 state->sortKeys[0].abbrev_full_comparator = NULL;
1752 /* Give up - expect original pass-by-value representation */
1760 * All tuples have been provided; finish the sort.
1763 tuplesort_performsort(Tuplesortstate *state)
1765 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1769 elog(LOG, "performsort starting: %s",
1770 pg_rusage_show(&state->ru_start));
1773 switch (state->status)
1778 * We were able to accumulate all the tuples within the allowed
1779 * amount of memory. Just qsort 'em and we're done.
1781 tuplesort_sort_memtuples(state);
1783 state->eof_reached = false;
1784 state->markpos_offset = 0;
1785 state->markpos_eof = false;
1786 state->status = TSS_SORTEDINMEM;
1792 * We were able to accumulate all the tuples required for output
1793 * in memory, using a heap to eliminate excess tuples. Now we
1794 * have to transform the heap to a properly-sorted array.
1796 sort_bounded_heap(state);
1798 state->eof_reached = false;
1799 state->markpos_offset = 0;
1800 state->markpos_eof = false;
1801 state->status = TSS_SORTEDINMEM;
1807 * Finish tape-based sort. First, flush all tuples remaining in
1808 * memory out to tape; then merge until we have a single remaining
1809 * run (or, if !randomAccess, one run per tape). Note that
1810 * mergeruns sets the correct state->status.
1812 dumptuples(state, true);
1814 state->eof_reached = false;
1815 state->markpos_block = 0L;
1816 state->markpos_offset = 0;
1817 state->markpos_eof = false;
1821 elog(ERROR, "invalid tuplesort state");
1828 if (state->status == TSS_FINALMERGE)
1829 elog(LOG, "performsort done (except %d-way final merge): %s",
1831 pg_rusage_show(&state->ru_start));
1833 elog(LOG, "performsort done: %s",
1834 pg_rusage_show(&state->ru_start));
1838 MemoryContextSwitchTo(oldcontext);
1842 * Internal routine to fetch the next tuple in either forward or back
1843 * direction into *stup. Returns FALSE if no more tuples.
1844 * If *should_free is set, the caller must pfree stup.tuple when done with it.
1845 * Otherwise, caller should not use tuple following next call here.
1848 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1849 SortTuple *stup, bool *should_free)
1851 unsigned int tuplen;
1853 switch (state->status)
1855 case TSS_SORTEDINMEM:
1856 Assert(forward || state->randomAccess);
1857 Assert(!state->slabAllocatorUsed);
1858 *should_free = false;
1861 if (state->current < state->memtupcount)
1863 *stup = state->memtuples[state->current++];
1866 state->eof_reached = true;
1869 * Complain if caller tries to retrieve more tuples than
1870 * originally asked for in a bounded sort. This is because
1871 * returning EOF here might be the wrong thing.
1873 if (state->bounded && state->current >= state->bound)
1874 elog(ERROR, "retrieved too many tuples in a bounded sort");
1880 if (state->current <= 0)
1884 * if all tuples are fetched already then we return last
1885 * tuple, else - tuple before last returned.
1887 if (state->eof_reached)
1888 state->eof_reached = false;
1891 state->current--; /* last returned tuple */
1892 if (state->current <= 0)
1895 *stup = state->memtuples[state->current - 1];
1900 case TSS_SORTEDONTAPE:
1901 Assert(forward || state->randomAccess);
1902 Assert(state->slabAllocatorUsed);
1905 * The slot that held the tuple that we returned in previous
1906 * gettuple call can now be reused.
1908 if (state->lastReturnedTuple)
1910 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1911 state->lastReturnedTuple = NULL;
1916 if (state->eof_reached)
1919 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1921 READTUP(state, stup, state->result_tape, tuplen);
1924 * Remember the tuple we return, so that we can recycle
1925 * its memory on next call. (This can be NULL, in the
1926 * !state->tuples case).
1928 state->lastReturnedTuple = stup->tuple;
1930 *should_free = false;
1935 state->eof_reached = true;
1943 * if all tuples are fetched already then we return last tuple,
1944 * else - tuple before last returned.
1946 if (state->eof_reached)
1949 * Seek position is pointing just past the zero tuplen at the
1950 * end of file; back up to fetch last tuple's ending length
1951 * word. If seek fails we must have a completely empty file.
1953 if (!LogicalTapeBackspace(state->tapeset,
1955 2 * sizeof(unsigned int)))
1957 state->eof_reached = false;
1962 * Back up and fetch previously-returned tuple's ending length
1963 * word. If seek fails, assume we are at start of file.
1965 if (!LogicalTapeBackspace(state->tapeset,
1967 sizeof(unsigned int)))
1969 tuplen = getlen(state, state->result_tape, false);
1972 * Back up to get ending length word of tuple before it.
1974 if (!LogicalTapeBackspace(state->tapeset,
1976 tuplen + 2 * sizeof(unsigned int)))
1979 * If that fails, presumably the prev tuple is the first
1980 * in the file. Back up so that it becomes next to read
1981 * in forward direction (not obviously right, but that is
1982 * what in-memory case does).
1984 if (!LogicalTapeBackspace(state->tapeset,
1986 tuplen + sizeof(unsigned int)))
1987 elog(ERROR, "bogus tuple length in backward scan");
1992 tuplen = getlen(state, state->result_tape, false);
1995 * Now we have the length of the prior tuple, back up and read it.
1996 * Note: READTUP expects we are positioned after the initial
1997 * length word of the tuple, so back up to that point.
1999 if (!LogicalTapeBackspace(state->tapeset,
2002 elog(ERROR, "bogus tuple length in backward scan");
2003 READTUP(state, stup, state->result_tape, tuplen);
2006 * Remember the tuple we return, so that we can recycle its memory
2007 * on next call. (This can be NULL, in the Datum case).
2009 state->lastReturnedTuple = stup->tuple;
2011 *should_free = false;
2014 case TSS_FINALMERGE:
2016 /* We are managing memory ourselves, with the slab allocator. */
2017 Assert(state->slabAllocatorUsed);
2018 *should_free = false;
2021 * The slab slot holding the tuple that we returned in previous
2022 * gettuple call can now be reused.
2024 if (state->lastReturnedTuple)
2026 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
2027 state->lastReturnedTuple = NULL;
2031 * This code should match the inner loop of mergeonerun().
2033 if (state->memtupcount > 0)
2035 int srcTape = state->memtuples[0].tupindex;
2038 *stup = state->memtuples[0];
2041 * Remember the tuple we return, so that we can recycle its
2042 * memory on next call. (This can be NULL, in the Datum case).
2044 state->lastReturnedTuple = stup->tuple;
2047 * Pull next tuple from tape, and replace the returned tuple
2048 * at top of the heap with it.
2050 if (!mergereadnext(state, srcTape, &newtup))
2053 * If no more data, we've reached end of run on this tape.
2054 * Remove the top node from the heap.
2056 tuplesort_heap_delete_top(state, false);
2059 * Rewind to free the read buffer. It'd go away at the
2060 * end of the sort anyway, but better to release the
2063 LogicalTapeRewindForWrite(state->tapeset, srcTape);
2066 newtup.tupindex = srcTape;
2067 tuplesort_heap_replace_top(state, &newtup, false);
2073 elog(ERROR, "invalid tuplesort state");
2074 return false; /* keep compiler quiet */
2079 * Fetch the next tuple in either forward or back direction.
2080 * If successful, put tuple in slot and return TRUE; else, clear the slot
2083 * Caller may optionally be passed back abbreviated value (on TRUE return
2084 * value) when abbreviation was used, which can be used to cheaply avoid
2085 * equality checks that might otherwise be required. Caller can safely make a
2086 * determination of "non-equal tuple" based on simple binary inequality. A
2087 * NULL value in leading attribute will set abbreviated value to zeroed
2088 * representation, which caller may rely on in abbreviated inequality check.
2090 * The slot receives a copied tuple (sometimes allocated in caller memory
2091 * context) that will stay valid regardless of future manipulations of the
2092 * tuplesort's state.
2095 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
2096 TupleTableSlot *slot, Datum *abbrev)
2098 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2102 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2105 MemoryContextSwitchTo(oldcontext);
2109 /* Record abbreviated key for caller */
2110 if (state->sortKeys->abbrev_converter && abbrev)
2111 *abbrev = stup.datum1;
2115 stup.tuple = heap_copy_minimal_tuple((MinimalTuple) stup.tuple);
2118 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free);
2123 ExecClearTuple(slot);
2129 * Fetch the next tuple in either forward or back direction.
2130 * Returns NULL if no more tuples. If *should_free is set, the
2131 * caller must pfree the returned tuple when done with it.
2132 * If it is not set, caller should not use tuple following next
2136 tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
2138 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2141 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2144 MemoryContextSwitchTo(oldcontext);
2150 * Fetch the next index tuple in either forward or back direction.
2151 * Returns NULL if no more tuples. If *should_free is set, the
2152 * caller must pfree the returned tuple when done with it.
2153 * If it is not set, caller should not use tuple following next
2157 tuplesort_getindextuple(Tuplesortstate *state, bool forward,
2160 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2163 if (!tuplesort_gettuple_common(state, forward, &stup, should_free))
2166 MemoryContextSwitchTo(oldcontext);
2168 return (IndexTuple) stup.tuple;
2172 * Fetch the next Datum in either forward or back direction.
2173 * Returns FALSE if no more datums.
2175 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
2176 * and is now owned by the caller.
2178 * Caller may optionally be passed back abbreviated value (on TRUE return
2179 * value) when abbreviation was used, which can be used to cheaply avoid
2180 * equality checks that might otherwise be required. Caller can safely make a
2181 * determination of "non-equal tuple" based on simple binary inequality. A
2182 * NULL value will have a zeroed abbreviated value representation, which caller
2183 * may rely on in abbreviated inequality check.
2186 tuplesort_getdatum(Tuplesortstate *state, bool forward,
2187 Datum *val, bool *isNull, Datum *abbrev)
2189 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2193 if (!tuplesort_gettuple_common(state, forward, &stup, &should_free))
2195 MemoryContextSwitchTo(oldcontext);
2199 /* Record abbreviated key for caller */
2200 if (state->sortKeys->abbrev_converter && abbrev)
2201 *abbrev = stup.datum1;
2203 if (stup.isnull1 || !state->tuples)
2206 *isNull = stup.isnull1;
2210 /* use stup.tuple because stup.datum1 may be an abbreviation */
2213 *val = PointerGetDatum(stup.tuple);
2215 *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
2219 MemoryContextSwitchTo(oldcontext);
2225 * Advance over N tuples in either forward or back direction,
2226 * without returning any data. N==0 is a no-op.
2227 * Returns TRUE if successful, FALSE if ran out of tuples.
2230 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
2232 MemoryContext oldcontext;
2235 * We don't actually support backwards skip yet, because no callers need
2236 * it. The API is designed to allow for that later, though.
2239 Assert(ntuples >= 0);
2241 switch (state->status)
2243 case TSS_SORTEDINMEM:
2244 if (state->memtupcount - state->current >= ntuples)
2246 state->current += ntuples;
2249 state->current = state->memtupcount;
2250 state->eof_reached = true;
2253 * Complain if caller tries to retrieve more tuples than
2254 * originally asked for in a bounded sort. This is because
2255 * returning EOF here might be the wrong thing.
2257 if (state->bounded && state->current >= state->bound)
2258 elog(ERROR, "retrieved too many tuples in a bounded sort");
2262 case TSS_SORTEDONTAPE:
2263 case TSS_FINALMERGE:
2266 * We could probably optimize these cases better, but for now it's
2267 * not worth the trouble.
2269 oldcontext = MemoryContextSwitchTo(state->sortcontext);
2270 while (ntuples-- > 0)
2275 if (!tuplesort_gettuple_common(state, forward,
2276 &stup, &should_free))
2278 MemoryContextSwitchTo(oldcontext);
2281 if (should_free && stup.tuple)
2283 CHECK_FOR_INTERRUPTS();
2285 MemoryContextSwitchTo(oldcontext);
2289 elog(ERROR, "invalid tuplesort state");
2290 return false; /* keep compiler quiet */
2295 * tuplesort_merge_order - report merge order we'll use for given memory
2296 * (note: "merge order" just means the number of input tapes in the merge).
2298 * This is exported for use by the planner. allowedMem is in bytes.
2301 tuplesort_merge_order(int64 allowedMem)
2306 * We need one tape for each merge input, plus another one for the output,
2307 * and each of these tapes needs buffer space. In addition we want
2308 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2311 * Note: you might be thinking we need to account for the memtuples[]
2312 * array in this calculation, but we effectively treat that as part of the
2313 * MERGE_BUFFER_SIZE workspace.
2315 mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2316 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2319 * Even in minimum memory, use at least a MINORDER merge. On the other
2320 * hand, even when we have lots of memory, do not use more than a MAXORDER
2321 * merge. Tapes are pretty cheap, but they're not entirely free. Each
2322 * additional tape reduces the amount of memory available to build runs,
2323 * which in turn can cause the same sort to need more runs, which makes
2324 * merging slower even if it can still be done in a single pass. Also,
2325 * high order merges are quite slow due to CPU cache effects; it can be
2326 * faster to pay the I/O cost of a polyphase merge than to perform a single
2327 * merge pass across many hundreds of tapes.
2329 mOrder = Max(mOrder, MINORDER);
2330 mOrder = Min(mOrder, MAXORDER);
2336 * useselection - determine algorithm to use to sort first run.
2338 * It can sometimes be useful to use the replacement selection algorithm if it
2339 * results in one large run, and there is little available workMem. See
2340 * remarks on RUN_SECOND optimization within dumptuples().
2343 useselection(Tuplesortstate *state)
2346 * memtupsize might be noticeably higher than memtupcount here in atypical
2347 * cases. It seems slightly preferable to not allow recent outliers to
2348 * impact this determination. Note that caller's trace_sort output
2349 * reports memtupcount instead.
2351 if (state->memtupsize <= replacement_sort_tuples)
2358 * inittapes - initialize for tape sorting.
2360 * This is called only if we have found we don't have room to sort in memory.
2363 inittapes(Tuplesortstate *state)
2369 /* Compute number of tapes to use: merge order plus 1 */
2370 maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2372 state->maxTapes = maxTapes;
2373 state->tapeRange = maxTapes - 1;
2377 elog(LOG, "switching to external sort with %d tapes: %s",
2378 maxTapes, pg_rusage_show(&state->ru_start));
2382 * Decrease availMem to reflect the space needed for tape buffers, when
2383 * writing the initial runs; but don't decrease it to the point that we
2384 * have no room for tuples. (That case is only likely to occur if sorting
2385 * pass-by-value Datums; in all other scenarios the memtuples[] array is
2386 * unlikely to occupy more than half of allowedMem. In the pass-by-value
2387 * case it's not important to account for tuple space, so we don't care if
2388 * LACKMEM becomes inaccurate.)
2390 tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2392 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2393 USEMEM(state, tapeSpace);
2396 * Make sure that the temp file(s) underlying the tape set are created in
2397 * suitable temp tablespaces.
2399 PrepareTempTablespaces();
2402 * Create the tape set and allocate the per-tape data arrays.
2404 state->tapeset = LogicalTapeSetCreate(maxTapes);
2406 state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2407 state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2408 state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2409 state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2410 state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2413 * Give replacement selection a try based on user setting. There will be
2414 * a switch to a simple hybrid sort-merge strategy after the first run
2415 * (iff we could not output one long run).
2417 state->replaceActive = useselection(state);
2419 if (state->replaceActive)
2422 * Convert the unsorted contents of memtuples[] into a heap. Each
2423 * tuple is marked as belonging to run number zero.
2425 * NOTE: we pass false for checkIndex since there's no point in
2426 * comparing indexes in this step, even though we do intend the
2427 * indexes to be part of the sort key...
2429 int ntuples = state->memtupcount;
2433 elog(LOG, "replacement selection will sort %d first run tuples",
2434 state->memtupcount);
2436 state->memtupcount = 0; /* make the heap empty */
2438 for (j = 0; j < ntuples; j++)
2440 /* Must copy source tuple to avoid possible overwrite */
2441 SortTuple stup = state->memtuples[j];
2443 stup.tupindex = RUN_FIRST;
2444 tuplesort_heap_insert(state, &stup, false);
2446 Assert(state->memtupcount == ntuples);
2449 state->currentRun = RUN_FIRST;
2452 * Initialize variables of Algorithm D (step D1).
2454 for (j = 0; j < maxTapes; j++)
2456 state->tp_fib[j] = 1;
2457 state->tp_runs[j] = 0;
2458 state->tp_dummy[j] = 1;
2459 state->tp_tapenum[j] = j;
2461 state->tp_fib[state->tapeRange] = 0;
2462 state->tp_dummy[state->tapeRange] = 0;
2465 state->destTape = 0;
2467 state->status = TSS_BUILDRUNS;
2471 * selectnewtape -- select new tape for new initial run.
2473 * This is called after finishing a run when we know another run
2474 * must be started. This implements steps D3, D4 of Algorithm D.
2477 selectnewtape(Tuplesortstate *state)
2482 /* Step D3: advance j (destTape) */
2483 if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2488 if (state->tp_dummy[state->destTape] != 0)
2490 state->destTape = 0;
2494 /* Step D4: increase level */
2496 a = state->tp_fib[0];
2497 for (j = 0; j < state->tapeRange; j++)
2499 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2500 state->tp_fib[j] = a + state->tp_fib[j + 1];
2502 state->destTape = 0;
2506 * Initialize the slab allocation arena, for the given number of slots.
2509 init_slab_allocator(Tuplesortstate *state, int numSlots)
2516 state->slabMemoryBegin = palloc(numSlots * SLAB_SLOT_SIZE);
2517 state->slabMemoryEnd = state->slabMemoryBegin +
2518 numSlots * SLAB_SLOT_SIZE;
2519 state->slabFreeHead = (SlabSlot *) state->slabMemoryBegin;
2520 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
2522 p = state->slabMemoryBegin;
2523 for (i = 0; i < numSlots - 1; i++)
2525 ((SlabSlot *) p)->nextfree = (SlabSlot *) (p + SLAB_SLOT_SIZE);
2526 p += SLAB_SLOT_SIZE;
2528 ((SlabSlot *) p)->nextfree = NULL;
2532 state->slabMemoryBegin = state->slabMemoryEnd = NULL;
2533 state->slabFreeHead = NULL;
2535 state->slabAllocatorUsed = true;
2539 * mergeruns -- merge all the completed initial runs.
2541 * This implements steps D5, D6 of Algorithm D. All input data has
2542 * already been written to initial runs on tape (see dumptuples).
2545 mergeruns(Tuplesortstate *state)
2554 Assert(state->status == TSS_BUILDRUNS);
2555 Assert(state->memtupcount == 0);
2557 if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2560 * If there are multiple runs to be merged, when we go to read back
2561 * tuples from disk, abbreviated keys will not have been stored, and
2562 * we don't care to regenerate them. Disable abbreviation from this
2565 state->sortKeys->abbrev_converter = NULL;
2566 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2568 /* Not strictly necessary, but be tidy */
2569 state->sortKeys->abbrev_abort = NULL;
2570 state->sortKeys->abbrev_full_comparator = NULL;
2574 * Reset tuple memory. We've freed all the tuples that we previously
2575 * allocated. We will use the slab allocator from now on.
2577 MemoryContextDelete(state->tuplecontext);
2578 state->tuplecontext = NULL;
2581 * We no longer need a large memtuples array. (We will allocate a smaller
2582 * one for the heap later.)
2584 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
2585 pfree(state->memtuples);
2586 state->memtuples = NULL;
2589 * If we had fewer runs than tapes, refund the memory that we imagined we
2590 * would need for the tape buffers of the unused tapes.
2592 * numTapes and numInputTapes reflect the actual number of tapes we will
2593 * use. Note that the output tape's tape number is maxTapes - 1, so the
2594 * tape numbers of the used tapes are not consecutive, and you cannot just
2595 * loop from 0 to numTapes to visit all used tapes!
2597 if (state->Level == 1)
2599 numInputTapes = state->currentRun;
2600 numTapes = numInputTapes + 1;
2601 FREEMEM(state, (state->maxTapes - numTapes) * TAPE_BUFFER_OVERHEAD);
2605 numInputTapes = state->tapeRange;
2606 numTapes = state->maxTapes;
2610 * Initialize the slab allocator. We need one slab slot per input tape,
2611 * for the tuples in the heap, plus one to hold the tuple last returned
2612 * from tuplesort_gettuple. (If we're sorting pass-by-val Datums,
2613 * however, we don't need to do allocate anything.)
2615 * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism
2616 * to track memory usage of individual tuples.
2619 init_slab_allocator(state, numInputTapes + 1);
2621 init_slab_allocator(state, 0);
2624 * If we produced only one initial run (quite likely if the total data
2625 * volume is between 1X and 2X workMem when replacement selection is used,
2626 * but something we particular count on when input is presorted), we can
2627 * just use that tape as the finished output, rather than doing a useless
2628 * merge. (This obvious optimization is not in Knuth's algorithm.)
2630 if (state->currentRun == RUN_SECOND)
2632 state->result_tape = state->tp_tapenum[state->destTape];
2633 /* must freeze and rewind the finished output tape */
2634 LogicalTapeFreeze(state->tapeset, state->result_tape);
2635 state->status = TSS_SORTEDONTAPE;
2640 * Allocate a new 'memtuples' array, for the heap. It will hold one tuple
2641 * from each input tape.
2643 state->memtupsize = numInputTapes;
2644 state->memtuples = (SortTuple *) palloc(numInputTapes * sizeof(SortTuple));
2645 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
2648 * Use all the remaining memory we have available for read buffers among
2651 * We do this only after checking for the case that we produced only one
2652 * initial run, because there is no need to use a large read buffer when
2653 * we're reading from a single tape. With one tape, the I/O pattern will
2654 * be the same regardless of the buffer size.
2656 * We don't try to "rebalance" the memory among tapes, when we start a new
2657 * merge phase, even if some tapes are inactive in the new phase. That
2658 * would be hard, because logtape.c doesn't know where one run ends and
2659 * another begins. When a new merge phase begins, and a tape doesn't
2660 * participate in it, its buffer nevertheless already contains tuples from
2661 * the next run on same tape, so we cannot release the buffer. That's OK
2662 * in practice, merge performance isn't that sensitive to the amount of
2663 * buffers used, and most merge phases use all or almost all tapes,
2668 elog(LOG, "using " INT64_FORMAT " KB of memory for read buffers among %d input tapes",
2669 (state->availMem) / 1024, numInputTapes);
2672 state->read_buffer_size = Min(state->availMem / numInputTapes, 0);
2673 USEMEM(state, state->availMem);
2675 /* End of step D2: rewind all output tapes to prepare for merging */
2676 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2677 LogicalTapeRewindForRead(state->tapeset, tapenum, state->read_buffer_size);
2682 * At this point we know that tape[T] is empty. If there's just one
2683 * (real or dummy) run left on each input tape, then only one merge
2684 * pass remains. If we don't have to produce a materialized sorted
2685 * tape, we can stop at this point and do the final merge on-the-fly.
2687 if (!state->randomAccess)
2689 bool allOneRun = true;
2691 Assert(state->tp_runs[state->tapeRange] == 0);
2692 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2694 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2702 /* Tell logtape.c we won't be writing anymore */
2703 LogicalTapeSetForgetFreeSpace(state->tapeset);
2704 /* Initialize for the final merge pass */
2706 state->status = TSS_FINALMERGE;
2711 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2712 while (state->tp_runs[state->tapeRange - 1] ||
2713 state->tp_dummy[state->tapeRange - 1])
2715 bool allDummy = true;
2717 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2719 if (state->tp_dummy[tapenum] == 0)
2728 state->tp_dummy[state->tapeRange]++;
2729 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2730 state->tp_dummy[tapenum]--;
2736 /* Step D6: decrease level */
2737 if (--state->Level == 0)
2739 /* rewind output tape T to use as new input */
2740 LogicalTapeRewindForRead(state->tapeset, state->tp_tapenum[state->tapeRange],
2741 state->read_buffer_size);
2742 /* rewind used-up input tape P, and prepare it for write pass */
2743 LogicalTapeRewindForWrite(state->tapeset, state->tp_tapenum[state->tapeRange - 1]);
2744 state->tp_runs[state->tapeRange - 1] = 0;
2747 * reassign tape units per step D6; note we no longer care about A[]
2749 svTape = state->tp_tapenum[state->tapeRange];
2750 svDummy = state->tp_dummy[state->tapeRange];
2751 svRuns = state->tp_runs[state->tapeRange];
2752 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2754 state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2755 state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2756 state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2758 state->tp_tapenum[0] = svTape;
2759 state->tp_dummy[0] = svDummy;
2760 state->tp_runs[0] = svRuns;
2764 * Done. Knuth says that the result is on TAPE[1], but since we exited
2765 * the loop without performing the last iteration of step D6, we have not
2766 * rearranged the tape unit assignment, and therefore the result is on
2767 * TAPE[T]. We need to do it this way so that we can freeze the final
2768 * output tape while rewinding it. The last iteration of step D6 would be
2769 * a waste of cycles anyway...
2771 state->result_tape = state->tp_tapenum[state->tapeRange];
2772 LogicalTapeFreeze(state->tapeset, state->result_tape);
2773 state->status = TSS_SORTEDONTAPE;
2775 /* Release the read buffers of all the other tapes, by rewinding them. */
2776 for (tapenum = 0; tapenum < state->maxTapes; tapenum++)
2778 if (tapenum != state->result_tape)
2779 LogicalTapeRewindForWrite(state->tapeset, tapenum);
2784 * Merge one run from each input tape, except ones with dummy runs.
2786 * This is the inner loop of Algorithm D step D5. We know that the
2787 * output tape is TAPE[T].
2790 mergeonerun(Tuplesortstate *state)
2792 int destTape = state->tp_tapenum[state->tapeRange];
2796 * Start the merge by loading one tuple from each active source tape into
2797 * the heap. We can also decrease the input run/dummy run counts.
2802 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2803 * out, and replacing it with next tuple from same tape (if there is
2806 while (state->memtupcount > 0)
2810 /* write the tuple to destTape */
2811 srcTape = state->memtuples[0].tupindex;
2812 WRITETUP(state, destTape, &state->memtuples[0]);
2814 /* recycle the slot of the tuple we just wrote out, for the next read */
2815 RELEASE_SLAB_SLOT(state, state->memtuples[0].tuple);
2818 * pull next tuple from the tape, and replace the written-out tuple in
2821 if (mergereadnext(state, srcTape, &stup))
2823 stup.tupindex = srcTape;
2824 tuplesort_heap_replace_top(state, &stup, false);
2828 tuplesort_heap_delete_top(state, false);
2832 * When the heap empties, we're done. Write an end-of-run marker on the
2833 * output tape, and increment its count of real runs.
2835 markrunend(state, destTape);
2836 state->tp_runs[state->tapeRange]++;
2840 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2841 pg_rusage_show(&state->ru_start));
2846 * beginmerge - initialize for a merge pass
2848 * We decrease the counts of real and dummy runs for each tape, and mark
2849 * which tapes contain active input runs in mergeactive[]. Then, fill the
2850 * merge heap with the first tuple from each active tape.
2853 beginmerge(Tuplesortstate *state)
2859 /* Heap should be empty here */
2860 Assert(state->memtupcount == 0);
2862 /* Adjust run counts and mark the active tapes */
2863 memset(state->mergeactive, 0,
2864 state->maxTapes * sizeof(*state->mergeactive));
2866 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2868 if (state->tp_dummy[tapenum] > 0)
2869 state->tp_dummy[tapenum]--;
2872 Assert(state->tp_runs[tapenum] > 0);
2873 state->tp_runs[tapenum]--;
2874 srcTape = state->tp_tapenum[tapenum];
2875 state->mergeactive[srcTape] = true;
2879 Assert(activeTapes > 0);
2880 state->activeTapes = activeTapes;
2882 /* Load the merge heap with the first tuple from each input tape */
2883 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2887 if (mergereadnext(state, srcTape, &tup))
2889 tup.tupindex = srcTape;
2890 tuplesort_heap_insert(state, &tup, false);
2896 * mergereadnext - read next tuple from one merge input tape
2898 * Returns false on EOF.
2901 mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup)
2903 unsigned int tuplen;
2905 if (!state->mergeactive[srcTape])
2906 return false; /* tape's run is already exhausted */
2908 /* read next tuple, if any */
2909 if ((tuplen = getlen(state, srcTape, true)) == 0)
2911 state->mergeactive[srcTape] = false;
2914 READTUP(state, stup, srcTape, tuplen);
2920 * dumptuples - remove tuples from memtuples and write to tape
2922 * This is used during initial-run building, but not during merging.
2924 * When alltuples = false and replacement selection is still active, dump
2925 * only enough tuples to get under the availMem limit (and leave at least
2926 * one tuple in memtuples, since puttuple will then assume it is a heap that
2927 * has a tuple to compare to). We always insist there be at least one free
2928 * slot in the memtuples[] array.
2930 * When alltuples = true, dump everything currently in memory. (This
2931 * case is only used at end of input data, although in practice only the
2932 * first run could fail to dump all tuples when we LACKMEM(), and only
2933 * when replacement selection is active.)
2935 * If, when replacement selection is active, we see that the tuple run
2936 * number at the top of the heap has changed, start a new run. This must be
2937 * the first run, because replacement selection is always abandoned for all
2941 dumptuples(Tuplesortstate *state, bool alltuples)
2944 (LACKMEM(state) && state->memtupcount > 1) ||
2945 state->memtupcount >= state->memtupsize)
2947 if (state->replaceActive)
2950 * Still holding out for a case favorable to replacement
2951 * selection. Still incrementally spilling using heap.
2953 * Dump the heap's frontmost entry, and remove it from the heap.
2955 Assert(state->memtupcount > 0);
2956 WRITETUP(state, state->tp_tapenum[state->destTape],
2957 &state->memtuples[0]);
2958 tuplesort_heap_delete_top(state, true);
2963 * Once committed to quicksorting runs, never incrementally spill
2965 dumpbatch(state, alltuples);
2970 * If top run number has changed, we've finished the current run (this
2971 * can only be the first run), and will no longer spill incrementally.
2973 if (state->memtupcount == 0 ||
2974 state->memtuples[0].tupindex == HEAP_RUN_NEXT)
2976 markrunend(state, state->tp_tapenum[state->destTape]);
2977 Assert(state->currentRun == RUN_FIRST);
2978 state->currentRun++;
2979 state->tp_runs[state->destTape]++;
2980 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2984 elog(LOG, "finished incrementally writing %s run %d to tape %d: %s",
2985 (state->memtupcount == 0) ? "only" : "first",
2986 state->currentRun, state->destTape,
2987 pg_rusage_show(&state->ru_start));
2991 * Done if heap is empty, which is possible when there is only one
2994 Assert(state->currentRun == RUN_SECOND);
2995 if (state->memtupcount == 0)
2998 * Replacement selection best case; no final merge required,
2999 * because there was only one initial run (second run has no
3000 * tuples). See RUN_SECOND case in mergeruns().
3006 * Abandon replacement selection for second run (as well as any
3009 state->replaceActive = false;
3012 * First tuple of next run should not be heapified, and so will
3013 * bear placeholder run number. In practice this must actually be
3014 * the second run, which just became the currentRun, so we're
3015 * clear to quicksort and dump the tuples in batch next time
3016 * memtuples becomes full.
3018 Assert(state->memtuples[0].tupindex == HEAP_RUN_NEXT);
3019 selectnewtape(state);
3025 * dumpbatch - sort and dump all memtuples, forming one run on tape
3027 * Second or subsequent runs are never heapified by this module (although
3028 * heapification still respects run number differences between the first and
3029 * second runs), and a heap (replacement selection priority queue) is often
3030 * avoided in the first place.
3033 dumpbatch(Tuplesortstate *state, bool alltuples)
3039 * Final call might require no sorting, in rare cases where we just so
3040 * happen to have previously LACKMEM()'d at the point where exactly all
3041 * remaining tuples are loaded into memory, just before input was
3044 * In general, short final runs are quite possible. Rather than allowing
3045 * a special case where there was a superfluous selectnewtape() call (i.e.
3046 * a call with no subsequent run actually written to destTape), we prefer
3047 * to write out a 0 tuple run.
3049 * mergereadnext() is prepared for 0 tuple runs, and will reliably mark
3050 * the tape inactive for the merge when called from beginmerge(). This
3051 * case is therefore similar to the case where mergeonerun() finds a dummy
3052 * run for the tape, and so doesn't need to merge a run from the tape (or
3053 * conceptually "merges" the dummy run, if you prefer). According to
3054 * Knuth, Algorithm D "isn't strictly optimal" in its method of
3055 * distribution and dummy run assignment; this edge case seems very
3056 * unlikely to make that appreciably worse.
3058 Assert(state->status == TSS_BUILDRUNS);
3061 * It seems unlikely that this limit will ever be exceeded, but take no
3064 if (state->currentRun == INT_MAX)
3066 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3067 errmsg("cannot have more than %d runs for an external sort",
3070 state->currentRun++;
3074 elog(LOG, "starting quicksort of run %d: %s",
3075 state->currentRun, pg_rusage_show(&state->ru_start));
3079 * Sort all tuples accumulated within the allowed amount of memory for
3080 * this run using quicksort
3082 tuplesort_sort_memtuples(state);
3086 elog(LOG, "finished quicksort of run %d: %s",
3087 state->currentRun, pg_rusage_show(&state->ru_start));
3090 memtupwrite = state->memtupcount;
3091 for (i = 0; i < memtupwrite; i++)
3093 WRITETUP(state, state->tp_tapenum[state->destTape],
3094 &state->memtuples[i]);
3095 state->memtupcount--;
3099 * Reset tuple memory. We've freed all of the tuples that we previously
3100 * allocated. It's important to avoid fragmentation when there is a stark
3101 * change in the sizes of incoming tuples. Fragmentation due to
3102 * AllocSetFree's bucketing by size class might be particularly bad if
3103 * this step wasn't taken.
3105 MemoryContextReset(state->tuplecontext);
3107 markrunend(state, state->tp_tapenum[state->destTape]);
3108 state->tp_runs[state->destTape]++;
3109 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3113 elog(LOG, "finished writing run %d to tape %d: %s",
3114 state->currentRun, state->destTape,
3115 pg_rusage_show(&state->ru_start));
3119 selectnewtape(state);
3123 * tuplesort_rescan - rewind and replay the scan
3126 tuplesort_rescan(Tuplesortstate *state)
3128 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3130 Assert(state->randomAccess);
3132 switch (state->status)
3134 case TSS_SORTEDINMEM:
3136 state->eof_reached = false;
3137 state->markpos_offset = 0;
3138 state->markpos_eof = false;
3140 case TSS_SORTEDONTAPE:
3141 LogicalTapeRewindForRead(state->tapeset,
3144 state->eof_reached = false;
3145 state->markpos_block = 0L;
3146 state->markpos_offset = 0;
3147 state->markpos_eof = false;
3150 elog(ERROR, "invalid tuplesort state");
3154 MemoryContextSwitchTo(oldcontext);
3158 * tuplesort_markpos - saves current position in the merged sort file
3161 tuplesort_markpos(Tuplesortstate *state)
3163 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3165 Assert(state->randomAccess);
3167 switch (state->status)
3169 case TSS_SORTEDINMEM:
3170 state->markpos_offset = state->current;
3171 state->markpos_eof = state->eof_reached;
3173 case TSS_SORTEDONTAPE:
3174 LogicalTapeTell(state->tapeset,
3176 &state->markpos_block,
3177 &state->markpos_offset);
3178 state->markpos_eof = state->eof_reached;
3181 elog(ERROR, "invalid tuplesort state");
3185 MemoryContextSwitchTo(oldcontext);
3189 * tuplesort_restorepos - restores current position in merged sort file to
3190 * last saved position
3193 tuplesort_restorepos(Tuplesortstate *state)
3195 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3197 Assert(state->randomAccess);
3199 switch (state->status)
3201 case TSS_SORTEDINMEM:
3202 state->current = state->markpos_offset;
3203 state->eof_reached = state->markpos_eof;
3205 case TSS_SORTEDONTAPE:
3206 if (!LogicalTapeSeek(state->tapeset,
3208 state->markpos_block,
3209 state->markpos_offset))
3210 elog(ERROR, "tuplesort_restorepos failed");
3211 state->eof_reached = state->markpos_eof;
3214 elog(ERROR, "invalid tuplesort state");
3218 MemoryContextSwitchTo(oldcontext);
3222 * tuplesort_get_stats - extract summary statistics
3224 * This can be called after tuplesort_performsort() finishes to obtain
3225 * printable summary information about how the sort was performed.
3226 * spaceUsed is measured in kilobytes.
3229 tuplesort_get_stats(Tuplesortstate *state,
3230 const char **sortMethod,
3231 const char **spaceType,
3235 * Note: it might seem we should provide both memory and disk usage for a
3236 * disk-based sort. However, the current code doesn't track memory space
3237 * accurately once we have begun to return tuples to the caller (since we
3238 * don't account for pfree's the caller is expected to do), so we cannot
3239 * rely on availMem in a disk sort. This does not seem worth the overhead
3240 * to fix. Is it worth creating an API for the memory context code to
3241 * tell us how much is actually used in sortcontext?
3245 *spaceType = "Disk";
3246 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
3250 *spaceType = "Memory";
3251 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
3254 switch (state->status)
3256 case TSS_SORTEDINMEM:
3257 if (state->boundUsed)
3258 *sortMethod = "top-N heapsort";
3260 *sortMethod = "quicksort";
3262 case TSS_SORTEDONTAPE:
3263 *sortMethod = "external sort";
3265 case TSS_FINALMERGE:
3266 *sortMethod = "external merge";
3269 *sortMethod = "still in progress";
3276 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
3278 * Compare two SortTuples. If checkIndex is true, use the tuple index
3279 * as the front of the sort key; otherwise, no.
3281 * Note that for checkIndex callers, the heap invariant is never
3282 * maintained beyond the first run, and so there are no COMPARETUP()
3283 * calls needed to distinguish tuples in HEAP_RUN_NEXT.
3286 #define HEAPCOMPARE(tup1,tup2) \
3287 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex || \
3288 (tup1)->tupindex == HEAP_RUN_NEXT) ? \
3289 ((tup1)->tupindex) - ((tup2)->tupindex) : \
3290 COMPARETUP(state, tup1, tup2))
3293 * Convert the existing unordered array of SortTuples to a bounded heap,
3294 * discarding all but the smallest "state->bound" tuples.
3296 * When working with a bounded heap, we want to keep the largest entry
3297 * at the root (array entry zero), instead of the smallest as in the normal
3298 * sort case. This allows us to discard the largest entry cheaply.
3299 * Therefore, we temporarily reverse the sort direction.
3301 * We assume that all entries in a bounded heap will always have tupindex
3302 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
3303 * the direction of comparison for tupindexes.
3306 make_bounded_heap(Tuplesortstate *state)
3308 int tupcount = state->memtupcount;
3311 Assert(state->status == TSS_INITIAL);
3312 Assert(state->bounded);
3313 Assert(tupcount >= state->bound);
3315 /* Reverse sort direction so largest entry will be at root */
3316 reversedirection(state);
3318 state->memtupcount = 0; /* make the heap empty */
3319 for (i = 0; i < tupcount; i++)
3321 if (state->memtupcount < state->bound)
3323 /* Insert next tuple into heap */
3324 /* Must copy source tuple to avoid possible overwrite */
3325 SortTuple stup = state->memtuples[i];
3327 stup.tupindex = 0; /* not used */
3328 tuplesort_heap_insert(state, &stup, false);
3333 * The heap is full. Replace the largest entry with the new
3334 * tuple, or just discard it, if it's larger than anything already
3337 if (COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
3339 free_sort_tuple(state, &state->memtuples[i]);
3340 CHECK_FOR_INTERRUPTS();
3343 tuplesort_heap_replace_top(state, &state->memtuples[i], false);
3347 Assert(state->memtupcount == state->bound);
3348 state->status = TSS_BOUNDED;
3352 * Convert the bounded heap to a properly-sorted array
3355 sort_bounded_heap(Tuplesortstate *state)
3357 int tupcount = state->memtupcount;
3359 Assert(state->status == TSS_BOUNDED);
3360 Assert(state->bounded);
3361 Assert(tupcount == state->bound);
3364 * We can unheapify in place because each delete-top call will remove the
3365 * largest entry, which we can promptly store in the newly freed slot at
3366 * the end. Once we're down to a single-entry heap, we're done.
3368 while (state->memtupcount > 1)
3370 SortTuple stup = state->memtuples[0];
3372 /* this sifts-up the next-largest entry and decreases memtupcount */
3373 tuplesort_heap_delete_top(state, false);
3374 state->memtuples[state->memtupcount] = stup;
3376 state->memtupcount = tupcount;
3379 * Reverse sort direction back to the original state. This is not
3380 * actually necessary but seems like a good idea for tidiness.
3382 reversedirection(state);
3384 state->status = TSS_SORTEDINMEM;
3385 state->boundUsed = true;
3389 * Sort all memtuples using specialized qsort() routines.
3391 * Quicksort is used for small in-memory sorts. Quicksort is also generally
3392 * preferred to replacement selection for generating runs during external sort
3393 * operations, although replacement selection is sometimes used for the first
3397 tuplesort_sort_memtuples(Tuplesortstate *state)
3399 if (state->memtupcount > 1)
3401 /* Can we use the single-key sort function? */
3402 if (state->onlyKey != NULL)
3403 qsort_ssup(state->memtuples, state->memtupcount,
3406 qsort_tuple(state->memtuples,
3414 * Insert a new tuple into an empty or existing heap, maintaining the
3415 * heap invariant. Caller is responsible for ensuring there's room.
3417 * Note: For some callers, tuple points to a memtuples[] entry above the
3418 * end of the heap. This is safe as long as it's not immediately adjacent
3419 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
3420 * is, it might get overwritten before being moved into the heap!
3423 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
3426 SortTuple *memtuples;
3429 memtuples = state->memtuples;
3430 Assert(state->memtupcount < state->memtupsize);
3431 Assert(!checkIndex || tuple->tupindex == RUN_FIRST);
3433 CHECK_FOR_INTERRUPTS();
3436 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
3437 * using 1-based array indexes, not 0-based.
3439 j = state->memtupcount++;
3442 int i = (j - 1) >> 1;
3444 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
3446 memtuples[j] = memtuples[i];
3449 memtuples[j] = *tuple;
3453 * Remove the tuple at state->memtuples[0] from the heap. Decrement
3454 * memtupcount, and sift up to maintain the heap invariant.
3456 * The caller has already free'd the tuple the top node points to,
3460 tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex)
3462 SortTuple *memtuples = state->memtuples;
3465 Assert(!checkIndex || state->currentRun == RUN_FIRST);
3466 if (--state->memtupcount <= 0)
3470 * Remove the last tuple in the heap, and re-insert it, by replacing the
3471 * current top node with it.
3473 tuple = &memtuples[state->memtupcount];
3474 tuplesort_heap_replace_top(state, tuple, checkIndex);
3478 * Replace the tuple at state->memtuples[0] with a new tuple. Sift up to
3479 * maintain the heap invariant.
3481 * This corresponds to Knuth's "sift-up" algorithm (Algorithm 5.2.3H,
3482 * Heapsort, steps H3-H8).
3485 tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
3488 SortTuple *memtuples = state->memtuples;
3492 Assert(!checkIndex || state->currentRun == RUN_FIRST);
3493 Assert(state->memtupcount >= 1);
3495 CHECK_FOR_INTERRUPTS();
3497 n = state->memtupcount;
3498 i = 0; /* i is where the "hole" is */
3506 HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3508 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3510 memtuples[i] = memtuples[j];
3513 memtuples[i] = *tuple;
3517 * Function to reverse the sort direction from its current state
3519 * It is not safe to call this when performing hash tuplesorts
3522 reversedirection(Tuplesortstate *state)
3524 SortSupport sortKey = state->sortKeys;
3527 for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3529 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3530 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3536 * Tape interface routines
3540 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3544 if (LogicalTapeRead(state->tapeset, tapenum,
3545 &len, sizeof(len)) != sizeof(len))
3546 elog(ERROR, "unexpected end of tape");
3547 if (len == 0 && !eofOK)
3548 elog(ERROR, "unexpected end of data");
3553 markrunend(Tuplesortstate *state, int tapenum)
3555 unsigned int len = 0;
3557 LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3561 * Get memory for tuple from within READTUP() routine.
3563 * We use next free slot from the slab allocator, or palloc() if the tuple
3564 * is too large for that.
3567 readtup_alloc(Tuplesortstate *state, Size tuplen)
3572 * We pre-allocate enough slots in the slab arena that we should never run
3575 Assert(state->slabFreeHead);
3577 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
3578 return MemoryContextAlloc(state->sortcontext, tuplen);
3581 buf = state->slabFreeHead;
3582 /* Reuse this slot */
3583 state->slabFreeHead = buf->nextfree;
3591 * Routines specialized for HeapTuple (actually MinimalTuple) case
3595 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3597 SortSupport sortKey = state->sortKeys;
3610 /* Compare the leading sort key */
3611 compare = ApplySortComparator(a->datum1, a->isnull1,
3612 b->datum1, b->isnull1,
3617 /* Compare additional sort keys */
3618 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3619 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3620 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3621 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3622 tupDesc = state->tupDesc;
3624 if (sortKey->abbrev_converter)
3626 attno = sortKey->ssup_attno;
3628 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3629 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3631 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3639 for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3641 attno = sortKey->ssup_attno;
3643 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3644 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3646 compare = ApplySortComparator(datum1, isnull1,
3657 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3660 * We expect the passed "tup" to be a TupleTableSlot, and form a
3661 * MinimalTuple using the exported interface for that.
3663 TupleTableSlot *slot = (TupleTableSlot *) tup;
3667 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3669 /* copy the tuple into sort storage */
3670 tuple = ExecCopySlotMinimalTuple(slot);
3671 stup->tuple = (void *) tuple;
3672 USEMEM(state, GetMemoryChunkSpace(tuple));
3673 /* set up first-column key value */
3674 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3675 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3676 original = heap_getattr(&htup,
3677 state->sortKeys[0].ssup_attno,
3681 MemoryContextSwitchTo(oldcontext);
3683 if (!state->sortKeys->abbrev_converter || stup->isnull1)
3686 * Store ordinary Datum representation, or NULL value. If there is a
3687 * converter it won't expect NULL values, and cost model is not
3688 * required to account for NULL, so in that case we avoid calling
3689 * converter and just set datum1 to zeroed representation (to be
3690 * consistent, and to support cheap inequality tests for NULL
3691 * abbreviated keys).
3693 stup->datum1 = original;
3695 else if (!consider_abort_common(state))
3697 /* Store abbreviated key representation */
3698 stup->datum1 = state->sortKeys->abbrev_converter(original,
3703 /* Abort abbreviation */
3706 stup->datum1 = original;
3709 * Set state to be consistent with never trying abbreviation.
3711 * Alter datum1 representation in already-copied tuples, so as to
3712 * ensure a consistent representation (current tuple was just
3713 * handled). It does not matter if some dumped tuples are already
3714 * sorted on tape, since serialized tuples lack abbreviated keys
3715 * (TSS_BUILDRUNS state prevents control reaching here in any case).
3717 for (i = 0; i < state->memtupcount; i++)
3719 SortTuple *mtup = &state->memtuples[i];
3721 htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3722 MINIMAL_TUPLE_OFFSET;
3723 htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3724 MINIMAL_TUPLE_OFFSET);
3726 mtup->datum1 = heap_getattr(&htup,
3727 state->sortKeys[0].ssup_attno,
3735 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3737 MinimalTuple tuple = (MinimalTuple) stup->tuple;
3739 /* the part of the MinimalTuple we'll write: */
3740 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3741 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3743 /* total on-disk footprint: */
3744 unsigned int tuplen = tupbodylen + sizeof(int);
3746 LogicalTapeWrite(state->tapeset, tapenum,
3747 (void *) &tuplen, sizeof(tuplen));
3748 LogicalTapeWrite(state->tapeset, tapenum,
3749 (void *) tupbody, tupbodylen);
3750 if (state->randomAccess) /* need trailing length word? */
3751 LogicalTapeWrite(state->tapeset, tapenum,
3752 (void *) &tuplen, sizeof(tuplen));
3754 if (!state->slabAllocatorUsed)
3756 FREEMEM(state, GetMemoryChunkSpace(tuple));
3757 heap_free_minimal_tuple(tuple);
3762 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3763 int tapenum, unsigned int len)
3765 unsigned int tupbodylen = len - sizeof(int);
3766 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3767 MinimalTuple tuple = (MinimalTuple) readtup_alloc(state, tuplen);
3768 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3771 /* read in the tuple proper */
3772 tuple->t_len = tuplen;
3773 LogicalTapeReadExact(state->tapeset, tapenum,
3774 tupbody, tupbodylen);
3775 if (state->randomAccess) /* need trailing length word? */
3776 LogicalTapeReadExact(state->tapeset, tapenum,
3777 &tuplen, sizeof(tuplen));
3778 stup->tuple = (void *) tuple;
3779 /* set up first-column key value */
3780 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3781 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3782 stup->datum1 = heap_getattr(&htup,
3783 state->sortKeys[0].ssup_attno,
3789 * Routines specialized for the CLUSTER case (HeapTuple data, with
3790 * comparisons per a btree index definition)
3794 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3795 Tuplesortstate *state)
3797 SortSupport sortKey = state->sortKeys;
3807 AttrNumber leading = state->indexInfo->ii_KeyAttrNumbers[0];
3809 /* Be prepared to compare additional sort keys */
3810 ltup = (HeapTuple) a->tuple;
3811 rtup = (HeapTuple) b->tuple;
3812 tupDesc = state->tupDesc;
3814 /* Compare the leading sort key, if it's simple */
3817 compare = ApplySortComparator(a->datum1, a->isnull1,
3818 b->datum1, b->isnull1,
3823 if (sortKey->abbrev_converter)
3825 datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3826 datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3828 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3832 if (compare != 0 || state->nKeys == 1)
3834 /* Compare additional columns the hard way */
3840 /* Must compare all keys the hard way */
3844 if (state->indexInfo->ii_Expressions == NULL)
3846 /* If not expression index, just compare the proper heap attrs */
3848 for (; nkey < state->nKeys; nkey++, sortKey++)
3850 AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3852 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3853 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3855 compare = ApplySortComparator(datum1, isnull1,
3865 * In the expression index case, compute the whole index tuple and
3866 * then compare values. It would perhaps be faster to compute only as
3867 * many columns as we need to compare, but that would require
3868 * duplicating all the logic in FormIndexDatum.
3870 Datum l_index_values[INDEX_MAX_KEYS];
3871 bool l_index_isnull[INDEX_MAX_KEYS];
3872 Datum r_index_values[INDEX_MAX_KEYS];
3873 bool r_index_isnull[INDEX_MAX_KEYS];
3874 TupleTableSlot *ecxt_scantuple;
3876 /* Reset context each time to prevent memory leakage */
3877 ResetPerTupleExprContext(state->estate);
3879 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3881 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3882 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3883 l_index_values, l_index_isnull);
3885 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3886 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3887 r_index_values, r_index_isnull);
3889 for (; nkey < state->nKeys; nkey++, sortKey++)
3891 compare = ApplySortComparator(l_index_values[nkey],
3892 l_index_isnull[nkey],
3893 r_index_values[nkey],
3894 r_index_isnull[nkey],
3905 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3907 HeapTuple tuple = (HeapTuple) tup;
3909 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3911 /* copy the tuple into sort storage */
3912 tuple = heap_copytuple(tuple);
3913 stup->tuple = (void *) tuple;
3914 USEMEM(state, GetMemoryChunkSpace(tuple));
3916 MemoryContextSwitchTo(oldcontext);
3919 * set up first-column key value, and potentially abbreviate, if it's a
3922 if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3925 original = heap_getattr(tuple,
3926 state->indexInfo->ii_KeyAttrNumbers[0],
3930 if (!state->sortKeys->abbrev_converter || stup->isnull1)
3933 * Store ordinary Datum representation, or NULL value. If there is a
3934 * converter it won't expect NULL values, and cost model is not
3935 * required to account for NULL, so in that case we avoid calling
3936 * converter and just set datum1 to zeroed representation (to be
3937 * consistent, and to support cheap inequality tests for NULL
3938 * abbreviated keys).
3940 stup->datum1 = original;
3942 else if (!consider_abort_common(state))
3944 /* Store abbreviated key representation */
3945 stup->datum1 = state->sortKeys->abbrev_converter(original,
3950 /* Abort abbreviation */
3953 stup->datum1 = original;
3956 * Set state to be consistent with never trying abbreviation.
3958 * Alter datum1 representation in already-copied tuples, so as to
3959 * ensure a consistent representation (current tuple was just
3960 * handled). It does not matter if some dumped tuples are already
3961 * sorted on tape, since serialized tuples lack abbreviated keys
3962 * (TSS_BUILDRUNS state prevents control reaching here in any case).
3964 for (i = 0; i < state->memtupcount; i++)
3966 SortTuple *mtup = &state->memtuples[i];
3968 tuple = (HeapTuple) mtup->tuple;
3969 mtup->datum1 = heap_getattr(tuple,
3970 state->indexInfo->ii_KeyAttrNumbers[0],
3978 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3980 HeapTuple tuple = (HeapTuple) stup->tuple;
3981 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3983 /* We need to store t_self, but not other fields of HeapTupleData */
3984 LogicalTapeWrite(state->tapeset, tapenum,
3985 &tuplen, sizeof(tuplen));
3986 LogicalTapeWrite(state->tapeset, tapenum,
3987 &tuple->t_self, sizeof(ItemPointerData));
3988 LogicalTapeWrite(state->tapeset, tapenum,
3989 tuple->t_data, tuple->t_len);
3990 if (state->randomAccess) /* need trailing length word? */
3991 LogicalTapeWrite(state->tapeset, tapenum,
3992 &tuplen, sizeof(tuplen));
3994 if (!state->slabAllocatorUsed)
3996 FREEMEM(state, GetMemoryChunkSpace(tuple));
3997 heap_freetuple(tuple);
4002 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
4003 int tapenum, unsigned int tuplen)
4005 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
4006 HeapTuple tuple = (HeapTuple) readtup_alloc(state,
4007 t_len + HEAPTUPLESIZE);
4009 /* Reconstruct the HeapTupleData header */
4010 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
4011 tuple->t_len = t_len;
4012 LogicalTapeReadExact(state->tapeset, tapenum,
4013 &tuple->t_self, sizeof(ItemPointerData));
4014 /* We don't currently bother to reconstruct t_tableOid */
4015 tuple->t_tableOid = InvalidOid;
4016 /* Read in the tuple body */
4017 LogicalTapeReadExact(state->tapeset, tapenum,
4018 tuple->t_data, tuple->t_len);
4019 if (state->randomAccess) /* need trailing length word? */
4020 LogicalTapeReadExact(state->tapeset, tapenum,
4021 &tuplen, sizeof(tuplen));
4022 stup->tuple = (void *) tuple;
4023 /* set up first-column key value, if it's a simple column */
4024 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
4025 stup->datum1 = heap_getattr(tuple,
4026 state->indexInfo->ii_KeyAttrNumbers[0],
4032 * Routines specialized for IndexTuple case
4034 * The btree and hash cases require separate comparison functions, but the
4035 * IndexTuple representation is the same so the copy/write/read support
4036 * functions can be shared.
4040 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
4041 Tuplesortstate *state)
4044 * This is similar to comparetup_heap(), but expects index tuples. There
4045 * is also special handling for enforcing uniqueness, and special
4046 * treatment for equal keys at the end.
4048 SortSupport sortKey = state->sortKeys;
4053 bool equal_hasnull = false;
4062 /* Compare the leading sort key */
4063 compare = ApplySortComparator(a->datum1, a->isnull1,
4064 b->datum1, b->isnull1,
4069 /* Compare additional sort keys */
4070 tuple1 = (IndexTuple) a->tuple;
4071 tuple2 = (IndexTuple) b->tuple;
4072 keysz = state->nKeys;
4073 tupDes = RelationGetDescr(state->indexRel);
4075 if (sortKey->abbrev_converter)
4077 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
4078 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
4080 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4087 /* they are equal, so we only need to examine one null flag */
4089 equal_hasnull = true;
4092 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4094 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
4095 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
4097 compare = ApplySortComparator(datum1, isnull1,
4101 return compare; /* done when we find unequal attributes */
4103 /* they are equal, so we only need to examine one null flag */
4105 equal_hasnull = true;
4109 * If btree has asked us to enforce uniqueness, complain if two equal
4110 * tuples are detected (unless there was at least one NULL field).
4112 * It is sufficient to make the test here, because if two tuples are equal
4113 * they *must* get compared at some stage of the sort --- otherwise the
4114 * sort algorithm wouldn't have checked whether one must appear before the
4117 if (state->enforceUnique && !equal_hasnull)
4119 Datum values[INDEX_MAX_KEYS];
4120 bool isnull[INDEX_MAX_KEYS];
4124 * Some rather brain-dead implementations of qsort (such as the one in
4125 * QNX 4) will sometimes call the comparison routine to compare a
4126 * value to itself, but we always use our own implementation, which
4129 Assert(tuple1 != tuple2);
4131 index_deform_tuple(tuple1, tupDes, values, isnull);
4133 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
4136 (errcode(ERRCODE_UNIQUE_VIOLATION),
4137 errmsg("could not create unique index \"%s\"",
4138 RelationGetRelationName(state->indexRel)),
4139 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
4140 errdetail("Duplicate keys exist."),
4141 errtableconstraint(state->heapRel,
4142 RelationGetRelationName(state->indexRel))));
4146 * If key values are equal, we sort on ItemPointer. This does not affect
4147 * validity of the finished index, but it may be useful to have index
4148 * scans in physical order.
4151 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4152 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4155 return (blk1 < blk2) ? -1 : 1;
4158 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4159 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4162 return (pos1 < pos2) ? -1 : 1;
4169 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
4170 Tuplesortstate *state)
4178 * Fetch hash keys and mask off bits we don't want to sort by. We know
4179 * that the first column of the index tuple is the hash key.
4181 Assert(!a->isnull1);
4182 hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
4183 Assert(!b->isnull1);
4184 hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
4188 else if (hash1 < hash2)
4192 * If hash values are equal, we sort on ItemPointer. This does not affect
4193 * validity of the finished index, but it may be useful to have index
4194 * scans in physical order.
4196 tuple1 = (IndexTuple) a->tuple;
4197 tuple2 = (IndexTuple) b->tuple;
4200 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4201 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4204 return (blk1 < blk2) ? -1 : 1;
4207 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4208 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4211 return (pos1 < pos2) ? -1 : 1;
4218 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
4220 IndexTuple tuple = (IndexTuple) tup;
4221 unsigned int tuplen = IndexTupleSize(tuple);
4222 IndexTuple newtuple;
4225 /* copy the tuple into sort storage */
4226 newtuple = (IndexTuple) MemoryContextAlloc(state->tuplecontext, tuplen);
4227 memcpy(newtuple, tuple, tuplen);
4228 USEMEM(state, GetMemoryChunkSpace(newtuple));
4229 stup->tuple = (void *) newtuple;
4230 /* set up first-column key value */
4231 original = index_getattr(newtuple,
4233 RelationGetDescr(state->indexRel),
4236 if (!state->sortKeys->abbrev_converter || stup->isnull1)
4239 * Store ordinary Datum representation, or NULL value. If there is a
4240 * converter it won't expect NULL values, and cost model is not
4241 * required to account for NULL, so in that case we avoid calling
4242 * converter and just set datum1 to zeroed representation (to be
4243 * consistent, and to support cheap inequality tests for NULL
4244 * abbreviated keys).
4246 stup->datum1 = original;
4248 else if (!consider_abort_common(state))
4250 /* Store abbreviated key representation */
4251 stup->datum1 = state->sortKeys->abbrev_converter(original,
4256 /* Abort abbreviation */
4259 stup->datum1 = original;
4262 * Set state to be consistent with never trying abbreviation.
4264 * Alter datum1 representation in already-copied tuples, so as to
4265 * ensure a consistent representation (current tuple was just
4266 * handled). It does not matter if some dumped tuples are already
4267 * sorted on tape, since serialized tuples lack abbreviated keys
4268 * (TSS_BUILDRUNS state prevents control reaching here in any case).
4270 for (i = 0; i < state->memtupcount; i++)
4272 SortTuple *mtup = &state->memtuples[i];
4274 tuple = (IndexTuple) mtup->tuple;
4275 mtup->datum1 = index_getattr(tuple,
4277 RelationGetDescr(state->indexRel),
4284 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
4286 IndexTuple tuple = (IndexTuple) stup->tuple;
4287 unsigned int tuplen;
4289 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
4290 LogicalTapeWrite(state->tapeset, tapenum,
4291 (void *) &tuplen, sizeof(tuplen));
4292 LogicalTapeWrite(state->tapeset, tapenum,
4293 (void *) tuple, IndexTupleSize(tuple));
4294 if (state->randomAccess) /* need trailing length word? */
4295 LogicalTapeWrite(state->tapeset, tapenum,
4296 (void *) &tuplen, sizeof(tuplen));
4298 if (!state->slabAllocatorUsed)
4300 FREEMEM(state, GetMemoryChunkSpace(tuple));
4306 readtup_index(Tuplesortstate *state, SortTuple *stup,
4307 int tapenum, unsigned int len)
4309 unsigned int tuplen = len - sizeof(unsigned int);
4310 IndexTuple tuple = (IndexTuple) readtup_alloc(state, tuplen);
4312 LogicalTapeReadExact(state->tapeset, tapenum,
4314 if (state->randomAccess) /* need trailing length word? */
4315 LogicalTapeReadExact(state->tapeset, tapenum,
4316 &tuplen, sizeof(tuplen));
4317 stup->tuple = (void *) tuple;
4318 /* set up first-column key value */
4319 stup->datum1 = index_getattr(tuple,
4321 RelationGetDescr(state->indexRel),
4326 * Routines specialized for DatumTuple case
4330 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
4334 compare = ApplySortComparator(a->datum1, a->isnull1,
4335 b->datum1, b->isnull1,
4340 /* if we have abbreviations, then "tuple" has the original value */
4342 if (state->sortKeys->abbrev_converter)
4343 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
4344 PointerGetDatum(b->tuple), b->isnull1,
4351 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
4353 /* Not currently needed */
4354 elog(ERROR, "copytup_datum() should not be called");
4358 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
4361 unsigned int tuplen;
4362 unsigned int writtenlen;
4369 else if (!state->tuples)
4371 waddr = &stup->datum1;
4372 tuplen = sizeof(Datum);
4376 waddr = stup->tuple;
4377 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
4378 Assert(tuplen != 0);
4381 writtenlen = tuplen + sizeof(unsigned int);
4383 LogicalTapeWrite(state->tapeset, tapenum,
4384 (void *) &writtenlen, sizeof(writtenlen));
4385 LogicalTapeWrite(state->tapeset, tapenum,
4387 if (state->randomAccess) /* need trailing length word? */
4388 LogicalTapeWrite(state->tapeset, tapenum,
4389 (void *) &writtenlen, sizeof(writtenlen));
4391 if (!state->slabAllocatorUsed && stup->tuple)
4393 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4399 readtup_datum(Tuplesortstate *state, SortTuple *stup,
4400 int tapenum, unsigned int len)
4402 unsigned int tuplen = len - sizeof(unsigned int);
4407 stup->datum1 = (Datum) 0;
4408 stup->isnull1 = true;
4411 else if (!state->tuples)
4413 Assert(tuplen == sizeof(Datum));
4414 LogicalTapeReadExact(state->tapeset, tapenum,
4415 &stup->datum1, tuplen);
4416 stup->isnull1 = false;
4421 void *raddr = readtup_alloc(state, tuplen);
4423 LogicalTapeReadExact(state->tapeset, tapenum,
4425 stup->datum1 = PointerGetDatum(raddr);
4426 stup->isnull1 = false;
4427 stup->tuple = raddr;
4430 if (state->randomAccess) /* need trailing length word? */
4431 LogicalTapeReadExact(state->tapeset, tapenum,
4432 &tuplen, sizeof(tuplen));
4436 * Convenience routine to free a tuple previously loaded into sort memory
4439 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
4441 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));