1 /*-------------------------------------------------------------------------
4 * Generalized tuple sorting routines.
6 * This module handles sorting of heap tuples, index tuples, or single
7 * Datums (and could easily support other kinds of sortable objects,
8 * if necessary). It works efficiently for both small and large amounts
9 * of data. Small amounts are sorted in-memory using qsort(). Large
10 * amounts are sorted using temporary files and a standard external sort
13 * See Knuth, volume 3, for more than you want to know about the external
14 * sorting algorithm. Historically, we divided the input into sorted runs
15 * using replacement selection, in the form of a priority tree implemented
16 * as a heap (essentially his Algorithm 5.2.3H), but now we only do that
17 * for the first run, and only if the run would otherwise end up being very
18 * short. We merge the runs using polyphase merge, Knuth's Algorithm
19 * 5.4.2D. The logical "tapes" used by Algorithm D are implemented by
20 * logtape.c, which avoids space wastage by recycling disk space as soon
21 * as each block is read from its "tape".
23 * We do not use Knuth's recommended data structure (Algorithm 5.4.1R) for
24 * the replacement selection, because it uses a fixed number of records
25 * in memory at all times. Since we are dealing with tuples that may vary
26 * considerably in size, we want to be able to vary the number of records
27 * kept in memory to ensure full utilization of the allowed sort memory
28 * space. So, we keep the tuples in a variable-size heap, with the next
29 * record to go out at the top of the heap. Like Algorithm 5.4.1R, each
30 * record is stored with the run number that it must go into, and we use
31 * (run number, key) as the ordering key for the heap. When the run number
32 * at the top of the heap changes, we know that no more records of the prior
33 * run are left in the heap. Note that there are in practice only ever two
34 * distinct run numbers, because since PostgreSQL 9.6, we only use
35 * replacement selection to form the first run.
37 * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small
38 * customizations) is only used with the aim of producing just one run,
39 * thereby avoiding all merging. Only the first run can use replacement
40 * selection, which is why there are now only two possible valid run
41 * numbers, and why heapification is customized to not distinguish between
42 * tuples in the second run (those will be quicksorted). We generally
43 * prefer a simple hybrid sort-merge strategy, where runs are sorted in much
44 * the same way as the entire input of an internal sort is sorted (using
45 * qsort()). The replacement_sort_tuples GUC controls the limited remaining
46 * use of replacement selection for the first run.
48 * There are several reasons to favor a hybrid sort-merge strategy.
49 * Maintaining a priority tree/heap has poor CPU cache characteristics.
50 * Furthermore, the growth in main memory sizes has greatly diminished the
51 * value of having runs that are larger than available memory, even in the
52 * case where there is partially sorted input and runs can be made far
53 * larger by using a heap. In most cases, a single-pass merge step is all
54 * that is required even when runs are no larger than available memory.
55 * Avoiding multiple merge passes was traditionally considered to be the
56 * major advantage of using replacement selection.
58 * The approximate amount of memory allowed for any one sort operation
59 * is specified in kilobytes by the caller (most pass work_mem). Initially,
60 * we absorb tuples and simply store them in an unsorted array as long as
61 * we haven't exceeded workMem. If we reach the end of the input without
62 * exceeding workMem, we sort the array using qsort() and subsequently return
63 * tuples just by scanning the tuple array sequentially. If we do exceed
64 * workMem, we begin to emit tuples into sorted runs in temporary tapes.
65 * When tuples are dumped in batch after quicksorting, we begin a new run
66 * with a new output tape (selected per Algorithm D). After the end of the
67 * input is reached, we dump out remaining tuples in memory into a final run
68 * (or two, when replacement selection is still used), then merge the runs
71 * When merging runs, we use a heap containing just the frontmost tuple from
72 * each source run; we repeatedly output the smallest tuple and replace it
73 * with the next tuple from its source tape (if any). When the heap empties,
74 * the merge is complete. The basic merge algorithm thus needs very little
75 * memory --- only M tuples for an M-way merge, and M is constrained to a
76 * small number. However, we can still make good use of our full workMem
77 * allocation by pre-reading additional blocks from each source tape. Without
78 * prereading, our access pattern to the temporary file would be very erratic;
79 * on average we'd read one block from each of M source tapes during the same
80 * time that we're writing M blocks to the output tape, so there is no
81 * sequentiality of access at all, defeating the read-ahead methods used by
82 * most Unix kernels. Worse, the output tape gets written into a very random
83 * sequence of blocks of the temp file, ensuring that things will be even
84 * worse when it comes time to read that tape. A straightforward merge pass
85 * thus ends up doing a lot of waiting for disk seeks. We can improve matters
86 * by prereading from each source tape sequentially, loading about workMem/M
87 * bytes from each tape in turn, and making the sequential blocks immediately
88 * available for reuse. This approach helps to localize both read and write
89 * accesses. The pre-reading is handled by logtape.c, we just tell it how
90 * much memory to use for the buffers.
92 * When the caller requests random access to the sort result, we form
93 * the final sorted run on a logical tape which is then "frozen", so
94 * that we can access it randomly. When the caller does not need random
95 * access, we return from tuplesort_performsort() as soon as we are down
96 * to one run per logical tape. The final merge is then performed
97 * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
98 * saves one cycle of writing all the data out to disk and reading it in.
100 * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
101 * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
102 * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
103 * tape drives are expensive beasts, and in particular that there will always
104 * be many more runs than tape drives. In our implementation a "tape drive"
105 * doesn't cost much more than a few Kb of memory buffers, so we can afford
106 * to have lots of them. In particular, if we can have as many tape drives
107 * as sorted runs, we can eliminate any repeated I/O at all. In the current
108 * code we determine the number of tapes M on the basis of workMem: we want
109 * workMem/M to be large enough that we read a fair amount of data each time
110 * we preread from a tape, so as to maintain the locality of access described
111 * above. Nonetheless, with large workMem we can have many tapes (but not
112 * too many -- see the comments in tuplesort_merge_order).
115 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
116 * Portions Copyright (c) 1994, Regents of the University of California
119 * src/backend/utils/sort/tuplesort.c
121 *-------------------------------------------------------------------------
124 #include "postgres.h"
128 #include "access/htup_details.h"
129 #include "access/nbtree.h"
130 #include "catalog/index.h"
131 #include "catalog/pg_am.h"
132 #include "commands/tablespace.h"
133 #include "executor/executor.h"
134 #include "miscadmin.h"
135 #include "pg_trace.h"
136 #include "utils/datum.h"
137 #include "utils/logtape.h"
138 #include "utils/lsyscache.h"
139 #include "utils/memutils.h"
140 #include "utils/pg_rusage.h"
141 #include "utils/rel.h"
142 #include "utils/sortsupport.h"
143 #include "utils/tuplesort.h"
146 /* sort-type codes for sort__start probes */
150 #define CLUSTER_SORT 3
154 bool trace_sort = false;
157 #ifdef DEBUG_BOUNDED_SORT
158 bool optimize_bounded_sort = true;
163 * The objects we actually sort are SortTuple structs. These contain
164 * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
165 * which is a separate palloc chunk --- we assume it is just one chunk and
166 * can be freed by a simple pfree() (except during merge, when we use a
167 * simple slab allocator). SortTuples also contain the tuple's first key
168 * column in Datum/nullflag format, and an index integer.
170 * Storing the first key column lets us save heap_getattr or index_getattr
171 * calls during tuple comparisons. We could extract and save all the key
172 * columns not just the first, but this would increase code complexity and
173 * overhead, and wouldn't actually save any comparison cycles in the common
174 * case where the first key determines the comparison result. Note that
175 * for a pass-by-reference datatype, datum1 points into the "tuple" storage.
177 * There is one special case: when the sort support infrastructure provides an
178 * "abbreviated key" representation, where the key is (typically) a pass by
179 * value proxy for a pass by reference type. In this case, the abbreviated key
180 * is stored in datum1 in place of the actual first key column.
182 * When sorting single Datums, the data value is represented directly by
183 * datum1/isnull1 for pass by value types (or null values). If the datatype is
184 * pass-by-reference and isnull1 is false, then "tuple" points to a separately
185 * palloc'd data value, otherwise "tuple" is NULL. The value of datum1 is then
186 * either the same pointer as "tuple", or is an abbreviated key value as
187 * described above. Accordingly, "tuple" is always used in preference to
188 * datum1 as the authoritative value for pass-by-reference cases.
190 * While building initial runs, tupindex holds the tuple's run number.
191 * Historically, the run number could meaningfully distinguish many runs, but
192 * it now only distinguishes RUN_FIRST and HEAP_RUN_NEXT, since replacement
193 * selection is always abandoned after the first run; no other run number
194 * should be represented here. During merge passes, we re-use it to hold the
195 * input tape number that each tuple in the heap was read from. tupindex goes
196 * unused if the sort occurs entirely in memory.
200 void *tuple; /* the tuple itself */
201 Datum datum1; /* value of first key column */
202 bool isnull1; /* is first key column NULL? */
203 int tupindex; /* see notes above */
207 * During merge, we use a pre-allocated set of fixed-size slots to hold
208 * tuples. To avoid palloc/pfree overhead.
210 * Merge doesn't require a lot of memory, so we can afford to waste some,
211 * by using gratuitously-sized slots. If a tuple is larger than 1 kB, the
212 * palloc() overhead is not significant anymore.
214 * 'nextfree' is valid when this chunk is in the free list. When in use, the
215 * slot holds a tuple.
217 #define SLAB_SLOT_SIZE 1024
219 typedef union SlabSlot
221 union SlabSlot *nextfree;
222 char buffer[SLAB_SLOT_SIZE];
226 * Possible states of a Tuplesort object. These denote the states that
227 * persist between calls of Tuplesort routines.
231 TSS_INITIAL, /* Loading tuples; still within memory limit */
232 TSS_BOUNDED, /* Loading tuples into bounded-size heap */
233 TSS_BUILDRUNS, /* Loading tuples; writing to tape */
234 TSS_SORTEDINMEM, /* Sort completed entirely in memory */
235 TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
236 TSS_FINALMERGE /* Performing final merge on-the-fly */
240 * Parameters for calculation of number of tapes to use --- see inittapes()
241 * and tuplesort_merge_order().
243 * In this calculation we assume that each tape will cost us about 1 blocks
244 * worth of buffer space. This ignores the overhead of all the other data
245 * structures needed for each tape, but it's probably close enough.
247 * MERGE_BUFFER_SIZE is how much data we'd like to read from each input
248 * tape during a preread cycle (see discussion at top of file).
250 #define MINORDER 6 /* minimum merge order */
251 #define MAXORDER 500 /* maximum merge order */
252 #define TAPE_BUFFER_OVERHEAD BLCKSZ
253 #define MERGE_BUFFER_SIZE (BLCKSZ * 32)
256 * Run numbers, used during external sort operations.
258 * HEAP_RUN_NEXT is only used for SortTuple.tupindex, never state.currentRun.
261 #define HEAP_RUN_NEXT INT_MAX
264 typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
265 Tuplesortstate *state);
268 * Private state of a Tuplesort operation.
270 struct Tuplesortstate
272 TupSortStatus status; /* enumerated value as shown above */
273 int nKeys; /* number of columns in sort key */
274 bool randomAccess; /* did caller request random access? */
275 bool bounded; /* did caller specify a maximum number of
276 * tuples to return? */
277 bool boundUsed; /* true if we made use of a bounded heap */
278 int bound; /* if bounded, the maximum number of tuples */
279 bool tuples; /* Can SortTuple.tuple ever be set? */
280 int64 availMem; /* remaining memory available, in bytes */
281 int64 allowedMem; /* total memory allowed, in bytes */
282 int maxTapes; /* number of tapes (Knuth's T) */
283 int tapeRange; /* maxTapes-1 (Knuth's P) */
284 MemoryContext sortcontext; /* memory context holding most sort data */
285 MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
286 LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
289 * These function pointers decouple the routines that must know what kind
290 * of tuple we are sorting from the routines that don't need to know it.
291 * They are set up by the tuplesort_begin_xxx routines.
293 * Function to compare two tuples; result is per qsort() convention, ie:
294 * <0, 0, >0 according as a<b, a=b, a>b. The API must match
295 * qsort_arg_comparator.
297 SortTupleComparator comparetup;
300 * Function to copy a supplied input tuple into palloc'd space and set up
301 * its SortTuple representation (ie, set tuple/datum1/isnull1). Also,
302 * state->availMem must be decreased by the amount of space used for the
303 * tuple copy (note the SortTuple struct itself is not counted).
305 void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
308 * Function to write a stored tuple onto tape. The representation of the
309 * tuple on tape need not be the same as it is in memory; requirements on
310 * the tape representation are given below. Unless the slab allocator is
311 * used, after writing the tuple, pfree() the out-of-line data (not the
312 * SortTuple struct!), and increase state->availMem by the amount of
313 * memory space thereby released.
315 void (*writetup) (Tuplesortstate *state, int tapenum,
319 * Function to read a stored tuple from tape back into memory. 'len' is
320 * the already-read length of the stored tuple. The tuple is allocated
321 * from the slab memory arena, or is palloc'd, see readtup_alloc().
323 void (*readtup) (Tuplesortstate *state, SortTuple *stup,
324 int tapenum, unsigned int len);
327 * This array holds the tuples now in sort memory. If we are in state
328 * INITIAL, the tuples are in no particular order; if we are in state
329 * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
330 * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
331 * H. In state SORTEDONTAPE, the array is not used.
333 SortTuple *memtuples; /* array of SortTuple structs */
334 int memtupcount; /* number of tuples currently present */
335 int memtupsize; /* allocated length of memtuples array */
336 bool growmemtuples; /* memtuples' growth still underway? */
339 * Memory for tuples is sometimes allocated using a simple slab allocator,
340 * rather than with palloc(). Currently, we switch to slab allocation
341 * when we start merging. Merging only needs to keep a small, fixed
342 * number of tuples in memory at any time, so we can avoid the
343 * palloc/pfree overhead by recycling a fixed number of fixed-size slots
344 * to hold the tuples.
346 * For the slab, we use one large allocation, divided into SLAB_SLOT_SIZE
347 * slots. The allocation is sized to have one slot per tape, plus one
348 * additional slot. We need that many slots to hold all the tuples kept
349 * in the heap during merge, plus the one we have last returned from the
350 * sort, with tuplesort_gettuple.
352 * Initially, all the slots are kept in a linked list of free slots. When
353 * a tuple is read from a tape, it is put to the next available slot, if
354 * it fits. If the tuple is larger than SLAB_SLOT_SIZE, it is palloc'd
357 * When we're done processing a tuple, we return the slot back to the free
358 * list, or pfree() if it was palloc'd. We know that a tuple was
359 * allocated from the slab, if its pointer value is between
360 * slabMemoryBegin and -End.
362 * When the slab allocator is used, the USEMEM/LACKMEM mechanism of
363 * tracking memory usage is not used.
365 bool slabAllocatorUsed;
367 char *slabMemoryBegin; /* beginning of slab memory arena */
368 char *slabMemoryEnd; /* end of slab memory arena */
369 SlabSlot *slabFreeHead; /* head of free list */
371 /* Buffer size to use for reading input tapes, during merge. */
372 size_t read_buffer_size;
375 * When we return a tuple to the caller in tuplesort_gettuple_XXX, that
376 * came from a tape (that is, in TSS_SORTEDONTAPE or TSS_FINALMERGE
377 * modes), we remember the tuple in 'lastReturnedTuple', so that we can
378 * recycle the memory on next gettuple call.
380 void *lastReturnedTuple;
383 * While building initial runs, this indicates if the replacement
384 * selection strategy is in use. When it isn't, then a simple hybrid
385 * sort-merge strategy is in use instead (runs are quicksorted).
390 * While building initial runs, this is the current output run number
391 * (starting at RUN_FIRST). Afterwards, it is the number of initial runs
397 * Unless otherwise noted, all pointer variables below are pointers to
398 * arrays of length maxTapes, holding per-tape data.
402 * This variable is only used during merge passes. mergeactive[i] is true
403 * if we are reading an input run from (actual) tape number i and have not
404 * yet exhausted that run.
406 bool *mergeactive; /* active input run source? */
409 * Variables for Algorithm D. Note that destTape is a "logical" tape
410 * number, ie, an index into the tp_xxx[] arrays. Be careful to keep
411 * "logical" and "actual" tape numbers straight!
413 int Level; /* Knuth's l */
414 int destTape; /* current output tape (Knuth's j, less 1) */
415 int *tp_fib; /* Target Fibonacci run counts (A[]) */
416 int *tp_runs; /* # of real runs on each tape */
417 int *tp_dummy; /* # of dummy runs for each tape (D[]) */
418 int *tp_tapenum; /* Actual tape numbers (TAPE[]) */
419 int activeTapes; /* # of active input tapes in merge pass */
422 * These variables are used after completion of sorting to keep track of
423 * the next tuple to return. (In the tape case, the tape's current read
424 * position is also critical state.)
426 int result_tape; /* actual tape number of finished output */
427 int current; /* array index (only used if SORTEDINMEM) */
428 bool eof_reached; /* reached EOF (needed for cursors) */
430 /* markpos_xxx holds marked position for mark and restore */
431 long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
432 int markpos_offset; /* saved "current", or offset in tape block */
433 bool markpos_eof; /* saved "eof_reached" */
436 * The sortKeys variable is used by every case other than the hash index
437 * case; it is set by tuplesort_begin_xxx. tupDesc is only used by the
438 * MinimalTuple and CLUSTER routines, though.
441 SortSupport sortKeys; /* array of length nKeys */
444 * This variable is shared by the single-key MinimalTuple case and the
445 * Datum case (which both use qsort_ssup()). Otherwise it's NULL.
450 * Additional state for managing "abbreviated key" sortsupport routines
451 * (which currently may be used by all cases except the hash index case).
452 * Tracks the intervals at which the optimization's effectiveness is
455 int64 abbrevNext; /* Tuple # at which to next check
459 * These variables are specific to the CLUSTER case; they are set by
460 * tuplesort_begin_cluster.
462 IndexInfo *indexInfo; /* info about index being used for reference */
463 EState *estate; /* for evaluating index expressions */
466 * These variables are specific to the IndexTuple case; they are set by
467 * tuplesort_begin_index_xxx and used only by the IndexTuple routines.
469 Relation heapRel; /* table the index is being built on */
470 Relation indexRel; /* index being built */
472 /* These are specific to the index_btree subcase: */
473 bool enforceUnique; /* complain if we find duplicate tuples */
475 /* These are specific to the index_hash subcase: */
476 uint32 hash_mask; /* mask for sortable part of hash code */
479 * These variables are specific to the Datum case; they are set by
480 * tuplesort_begin_datum and used only by the DatumTuple routines.
483 /* we need typelen in order to know how to copy the Datums. */
487 * Resource snapshot for time of sort start.
495 * Is the given tuple allocated from the slab memory arena?
497 #define IS_SLAB_SLOT(state, tuple) \
498 ((char *) (tuple) >= (state)->slabMemoryBegin && \
499 (char *) (tuple) < (state)->slabMemoryEnd)
502 * Return the given tuple to the slab memory free list, or free it
503 * if it was palloc'd.
505 #define RELEASE_SLAB_SLOT(state, tuple) \
507 SlabSlot *buf = (SlabSlot *) tuple; \
509 if (IS_SLAB_SLOT((state), buf)) \
511 buf->nextfree = (state)->slabFreeHead; \
512 (state)->slabFreeHead = buf; \
517 #define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
518 #define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
519 #define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
520 #define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
521 #define LACKMEM(state) ((state)->availMem < 0 && !(state)->slabAllocatorUsed)
522 #define USEMEM(state,amt) ((state)->availMem -= (amt))
523 #define FREEMEM(state,amt) ((state)->availMem += (amt))
526 * NOTES about on-tape representation of tuples:
528 * We require the first "unsigned int" of a stored tuple to be the total size
529 * on-tape of the tuple, including itself (so it is never zero; an all-zero
530 * unsigned int is used to delimit runs). The remainder of the stored tuple
531 * may or may not match the in-memory representation of the tuple ---
532 * any conversion needed is the job of the writetup and readtup routines.
534 * If state->randomAccess is true, then the stored representation of the
535 * tuple must be followed by another "unsigned int" that is a copy of the
536 * length --- so the total tape space used is actually sizeof(unsigned int)
537 * more than the stored length value. This allows read-backwards. When
538 * randomAccess is not true, the write/read routines may omit the extra
541 * writetup is expected to write both length words as well as the tuple
542 * data. When readtup is called, the tape is positioned just after the
543 * front length word; readtup must read the tuple data and advance past
544 * the back length word (if present).
546 * The write/read routines can make use of the tuple description data
547 * stored in the Tuplesortstate record, if needed. They are also expected
548 * to adjust state->availMem by the amount of memory space (not tape space!)
549 * released or consumed. There is no error return from either writetup
550 * or readtup; they should ereport() on failure.
553 * NOTES about memory consumption calculations:
555 * We count space allocated for tuples against the workMem limit, plus
556 * the space used by the variable-size memtuples array. Fixed-size space
557 * is not counted; it's small enough to not be interesting.
559 * Note that we count actual space used (as shown by GetMemoryChunkSpace)
560 * rather than the originally-requested size. This is important since
561 * palloc can add substantial overhead. It's not a complete answer since
562 * we won't count any wasted space in palloc allocation blocks, but it's
563 * a lot better than what we were doing before 7.3. As of 9.6, a
564 * separate memory context is used for caller passed tuples. Resetting
565 * it at certain key increments significantly ameliorates fragmentation.
566 * Note that this places a responsibility on readtup and copytup routines
567 * to use the right memory context for these tuples (and to not use the
568 * reset context for anything whose lifetime needs to span multiple
569 * external sort runs).
572 /* When using this macro, beware of double evaluation of len */
573 #define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \
575 if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \
576 elog(ERROR, "unexpected end of data"); \
580 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
581 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
582 static bool consider_abort_common(Tuplesortstate *state);
583 static bool useselection(Tuplesortstate *state);
584 static void inittapes(Tuplesortstate *state);
585 static void selectnewtape(Tuplesortstate *state);
586 static void init_slab_allocator(Tuplesortstate *state, int numSlots);
587 static void mergeruns(Tuplesortstate *state);
588 static void mergeonerun(Tuplesortstate *state);
589 static void beginmerge(Tuplesortstate *state);
590 static bool mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup);
591 static void dumptuples(Tuplesortstate *state, bool alltuples);
592 static void dumpbatch(Tuplesortstate *state, bool alltuples);
593 static void make_bounded_heap(Tuplesortstate *state);
594 static void sort_bounded_heap(Tuplesortstate *state);
595 static void tuplesort_sort_memtuples(Tuplesortstate *state);
596 static void tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
598 static void tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
600 static void tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex);
601 static void reversedirection(Tuplesortstate *state);
602 static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
603 static void markrunend(Tuplesortstate *state, int tapenum);
604 static void *readtup_alloc(Tuplesortstate *state, Size tuplen);
605 static int comparetup_heap(const SortTuple *a, const SortTuple *b,
606 Tuplesortstate *state);
607 static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
608 static void writetup_heap(Tuplesortstate *state, int tapenum,
610 static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
611 int tapenum, unsigned int len);
612 static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
613 Tuplesortstate *state);
614 static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup);
615 static void writetup_cluster(Tuplesortstate *state, int tapenum,
617 static void readtup_cluster(Tuplesortstate *state, SortTuple *stup,
618 int tapenum, unsigned int len);
619 static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
620 Tuplesortstate *state);
621 static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
622 Tuplesortstate *state);
623 static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
624 static void writetup_index(Tuplesortstate *state, int tapenum,
626 static void readtup_index(Tuplesortstate *state, SortTuple *stup,
627 int tapenum, unsigned int len);
628 static int comparetup_datum(const SortTuple *a, const SortTuple *b,
629 Tuplesortstate *state);
630 static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
631 static void writetup_datum(Tuplesortstate *state, int tapenum,
633 static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
634 int tapenum, unsigned int len);
635 static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
638 * Special versions of qsort just for SortTuple objects. qsort_tuple() sorts
639 * any variant of SortTuples, using the appropriate comparetup function.
640 * qsort_ssup() is specialized for the case where the comparetup function
641 * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts
644 #include "qsort_tuple.c"
648 * tuplesort_begin_xxx
650 * Initialize for a tuple sort operation.
652 * After calling tuplesort_begin, the caller should call tuplesort_putXXX
653 * zero or more times, then call tuplesort_performsort when all the tuples
654 * have been supplied. After performsort, retrieve the tuples in sorted
655 * order by calling tuplesort_getXXX until it returns false/NULL. (If random
656 * access was requested, rescan, markpos, and restorepos can also be called.)
657 * Call tuplesort_end to terminate the operation and release memory/disk space.
659 * Each variant of tuplesort_begin has a workMem parameter specifying the
660 * maximum number of kilobytes of RAM to use before spilling data to disk.
661 * (The normal value of this parameter is work_mem, but some callers use
662 * other values.) Each variant also has a randomAccess parameter specifying
663 * whether the caller needs non-sequential access to the sort result.
666 static Tuplesortstate *
667 tuplesort_begin_common(int workMem, bool randomAccess)
669 Tuplesortstate *state;
670 MemoryContext sortcontext;
671 MemoryContext tuplecontext;
672 MemoryContext oldcontext;
675 * Create a working memory context for this sort operation. All data
676 * needed by the sort will live inside this context.
678 sortcontext = AllocSetContextCreate(CurrentMemoryContext,
680 ALLOCSET_DEFAULT_SIZES);
683 * Caller tuple (e.g. IndexTuple) memory context.
685 * A dedicated child context used exclusively for caller passed tuples
686 * eases memory management. Resetting at key points reduces
687 * fragmentation. Note that the memtuples array of SortTuples is allocated
688 * in the parent context, not this context, because there is no need to
689 * free memtuples early.
691 tuplecontext = AllocSetContextCreate(sortcontext,
693 ALLOCSET_DEFAULT_SIZES);
696 * Make the Tuplesortstate within the per-sort context. This way, we
697 * don't need a separate pfree() operation for it at shutdown.
699 oldcontext = MemoryContextSwitchTo(sortcontext);
701 state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
705 pg_rusage_init(&state->ru_start);
708 state->status = TSS_INITIAL;
709 state->randomAccess = randomAccess;
710 state->bounded = false;
711 state->tuples = true;
712 state->boundUsed = false;
713 state->allowedMem = workMem * (int64) 1024;
714 state->availMem = state->allowedMem;
715 state->sortcontext = sortcontext;
716 state->tuplecontext = tuplecontext;
717 state->tapeset = NULL;
719 state->memtupcount = 0;
722 * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD;
723 * see comments in grow_memtuples().
725 state->memtupsize = Max(1024,
726 ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1);
728 state->growmemtuples = true;
729 state->slabAllocatorUsed = false;
730 state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple));
732 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
734 /* workMem must be large enough for the minimal memtuples array */
736 elog(ERROR, "insufficient memory allowed for sort");
738 state->currentRun = RUN_FIRST;
741 * maxTapes, tapeRange, and Algorithm D variables will be initialized by
742 * inittapes(), if needed
745 state->result_tape = -1; /* flag that result tape has not been formed */
747 MemoryContextSwitchTo(oldcontext);
753 tuplesort_begin_heap(TupleDesc tupDesc,
754 int nkeys, AttrNumber *attNums,
755 Oid *sortOperators, Oid *sortCollations,
756 bool *nullsFirstFlags,
757 int workMem, bool randomAccess)
759 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
760 MemoryContext oldcontext;
763 oldcontext = MemoryContextSwitchTo(state->sortcontext);
765 AssertArg(nkeys > 0);
770 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
771 nkeys, workMem, randomAccess ? 't' : 'f');
774 state->nKeys = nkeys;
776 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
777 false, /* no unique check */
782 state->comparetup = comparetup_heap;
783 state->copytup = copytup_heap;
784 state->writetup = writetup_heap;
785 state->readtup = readtup_heap;
787 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
788 state->abbrevNext = 10;
790 /* Prepare SortSupport data for each column */
791 state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
793 for (i = 0; i < nkeys; i++)
795 SortSupport sortKey = state->sortKeys + i;
797 AssertArg(attNums[i] != 0);
798 AssertArg(sortOperators[i] != 0);
800 sortKey->ssup_cxt = CurrentMemoryContext;
801 sortKey->ssup_collation = sortCollations[i];
802 sortKey->ssup_nulls_first = nullsFirstFlags[i];
803 sortKey->ssup_attno = attNums[i];
804 /* Convey if abbreviation optimization is applicable in principle */
805 sortKey->abbreviate = (i == 0);
807 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
811 * The "onlyKey" optimization cannot be used with abbreviated keys, since
812 * tie-breaker comparisons may be required. Typically, the optimization
813 * is only of value to pass-by-value types anyway, whereas abbreviated
814 * keys are typically only of value to pass-by-reference types.
816 if (nkeys == 1 && !state->sortKeys->abbrev_converter)
817 state->onlyKey = state->sortKeys;
819 MemoryContextSwitchTo(oldcontext);
825 tuplesort_begin_cluster(TupleDesc tupDesc,
827 int workMem, bool randomAccess)
829 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
830 ScanKey indexScanKey;
831 MemoryContext oldcontext;
834 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
836 oldcontext = MemoryContextSwitchTo(state->sortcontext);
841 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
842 RelationGetNumberOfAttributes(indexRel),
843 workMem, randomAccess ? 't' : 'f');
846 state->nKeys = RelationGetNumberOfAttributes(indexRel);
848 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
849 false, /* no unique check */
854 state->comparetup = comparetup_cluster;
855 state->copytup = copytup_cluster;
856 state->writetup = writetup_cluster;
857 state->readtup = readtup_cluster;
858 state->abbrevNext = 10;
860 state->indexInfo = BuildIndexInfo(indexRel);
862 state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
864 indexScanKey = _bt_mkscankey_nodata(indexRel);
866 if (state->indexInfo->ii_Expressions != NULL)
868 TupleTableSlot *slot;
869 ExprContext *econtext;
872 * We will need to use FormIndexDatum to evaluate the index
873 * expressions. To do that, we need an EState, as well as a
874 * TupleTableSlot to put the table tuples into. The econtext's
875 * scantuple has to point to that slot, too.
877 state->estate = CreateExecutorState();
878 slot = MakeSingleTupleTableSlot(tupDesc);
879 econtext = GetPerTupleExprContext(state->estate);
880 econtext->ecxt_scantuple = slot;
883 /* Prepare SortSupport data for each column */
884 state->sortKeys = (SortSupport) palloc0(state->nKeys *
885 sizeof(SortSupportData));
887 for (i = 0; i < state->nKeys; i++)
889 SortSupport sortKey = state->sortKeys + i;
890 ScanKey scanKey = indexScanKey + i;
893 sortKey->ssup_cxt = CurrentMemoryContext;
894 sortKey->ssup_collation = scanKey->sk_collation;
895 sortKey->ssup_nulls_first =
896 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
897 sortKey->ssup_attno = scanKey->sk_attno;
898 /* Convey if abbreviation optimization is applicable in principle */
899 sortKey->abbreviate = (i == 0);
901 AssertState(sortKey->ssup_attno != 0);
903 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
904 BTGreaterStrategyNumber : BTLessStrategyNumber;
906 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
909 _bt_freeskey(indexScanKey);
911 MemoryContextSwitchTo(oldcontext);
917 tuplesort_begin_index_btree(Relation heapRel,
920 int workMem, bool randomAccess)
922 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
923 ScanKey indexScanKey;
924 MemoryContext oldcontext;
927 oldcontext = MemoryContextSwitchTo(state->sortcontext);
932 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
933 enforceUnique ? 't' : 'f',
934 workMem, randomAccess ? 't' : 'f');
937 state->nKeys = RelationGetNumberOfAttributes(indexRel);
939 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
945 state->comparetup = comparetup_index_btree;
946 state->copytup = copytup_index;
947 state->writetup = writetup_index;
948 state->readtup = readtup_index;
949 state->abbrevNext = 10;
951 state->heapRel = heapRel;
952 state->indexRel = indexRel;
953 state->enforceUnique = enforceUnique;
955 indexScanKey = _bt_mkscankey_nodata(indexRel);
956 state->nKeys = RelationGetNumberOfAttributes(indexRel);
958 /* Prepare SortSupport data for each column */
959 state->sortKeys = (SortSupport) palloc0(state->nKeys *
960 sizeof(SortSupportData));
962 for (i = 0; i < state->nKeys; i++)
964 SortSupport sortKey = state->sortKeys + i;
965 ScanKey scanKey = indexScanKey + i;
968 sortKey->ssup_cxt = CurrentMemoryContext;
969 sortKey->ssup_collation = scanKey->sk_collation;
970 sortKey->ssup_nulls_first =
971 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
972 sortKey->ssup_attno = scanKey->sk_attno;
973 /* Convey if abbreviation optimization is applicable in principle */
974 sortKey->abbreviate = (i == 0);
976 AssertState(sortKey->ssup_attno != 0);
978 strategy = (scanKey->sk_flags & SK_BT_DESC) != 0 ?
979 BTGreaterStrategyNumber : BTLessStrategyNumber;
981 PrepareSortSupportFromIndexRel(indexRel, strategy, sortKey);
984 _bt_freeskey(indexScanKey);
986 MemoryContextSwitchTo(oldcontext);
992 tuplesort_begin_index_hash(Relation heapRel,
995 int workMem, bool randomAccess)
997 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
998 MemoryContext oldcontext;
1000 oldcontext = MemoryContextSwitchTo(state->sortcontext);
1005 "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c",
1007 workMem, randomAccess ? 't' : 'f');
1010 state->nKeys = 1; /* Only one sort column, the hash code */
1012 state->comparetup = comparetup_index_hash;
1013 state->copytup = copytup_index;
1014 state->writetup = writetup_index;
1015 state->readtup = readtup_index;
1017 state->heapRel = heapRel;
1018 state->indexRel = indexRel;
1020 state->hash_mask = hash_mask;
1022 MemoryContextSwitchTo(oldcontext);
1028 tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
1029 bool nullsFirstFlag,
1030 int workMem, bool randomAccess)
1032 Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
1033 MemoryContext oldcontext;
1037 oldcontext = MemoryContextSwitchTo(state->sortcontext);
1042 "begin datum sort: workMem = %d, randomAccess = %c",
1043 workMem, randomAccess ? 't' : 'f');
1046 state->nKeys = 1; /* always a one-column sort */
1048 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
1049 false, /* no unique check */
1054 state->comparetup = comparetup_datum;
1055 state->copytup = copytup_datum;
1056 state->writetup = writetup_datum;
1057 state->readtup = readtup_datum;
1058 state->abbrevNext = 10;
1060 state->datumType = datumType;
1062 /* lookup necessary attributes of the datum type */
1063 get_typlenbyval(datumType, &typlen, &typbyval);
1064 state->datumTypeLen = typlen;
1065 state->tuples = !typbyval;
1067 /* Prepare SortSupport data */
1068 state->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
1070 state->sortKeys->ssup_cxt = CurrentMemoryContext;
1071 state->sortKeys->ssup_collation = sortCollation;
1072 state->sortKeys->ssup_nulls_first = nullsFirstFlag;
1075 * Abbreviation is possible here only for by-reference types. In theory,
1076 * a pass-by-value datatype could have an abbreviated form that is cheaper
1077 * to compare. In a tuple sort, we could support that, because we can
1078 * always extract the original datum from the tuple is needed. Here, we
1079 * can't, because a datum sort only stores a single copy of the datum; the
1080 * "tuple" field of each sortTuple is NULL.
1082 state->sortKeys->abbreviate = !typbyval;
1084 PrepareSortSupportFromOrderingOp(sortOperator, state->sortKeys);
1087 * The "onlyKey" optimization cannot be used with abbreviated keys, since
1088 * tie-breaker comparisons may be required. Typically, the optimization
1089 * is only of value to pass-by-value types anyway, whereas abbreviated
1090 * keys are typically only of value to pass-by-reference types.
1092 if (!state->sortKeys->abbrev_converter)
1093 state->onlyKey = state->sortKeys;
1095 MemoryContextSwitchTo(oldcontext);
1101 * tuplesort_set_bound
1103 * Advise tuplesort that at most the first N result tuples are required.
1105 * Must be called before inserting any tuples. (Actually, we could allow it
1106 * as long as the sort hasn't spilled to disk, but there seems no need for
1107 * delayed calls at the moment.)
1109 * This is a hint only. The tuplesort may still return more tuples than
1113 tuplesort_set_bound(Tuplesortstate *state, int64 bound)
1115 /* Assert we're called before loading any tuples */
1116 Assert(state->status == TSS_INITIAL);
1117 Assert(state->memtupcount == 0);
1118 Assert(!state->bounded);
1120 #ifdef DEBUG_BOUNDED_SORT
1121 /* Honor GUC setting that disables the feature (for easy testing) */
1122 if (!optimize_bounded_sort)
1126 /* We want to be able to compute bound * 2, so limit the setting */
1127 if (bound > (int64) (INT_MAX / 2))
1130 state->bounded = true;
1131 state->bound = (int) bound;
1134 * Bounded sorts are not an effective target for abbreviated key
1135 * optimization. Disable by setting state to be consistent with no
1136 * abbreviation support.
1138 state->sortKeys->abbrev_converter = NULL;
1139 if (state->sortKeys->abbrev_full_comparator)
1140 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
1142 /* Not strictly necessary, but be tidy */
1143 state->sortKeys->abbrev_abort = NULL;
1144 state->sortKeys->abbrev_full_comparator = NULL;
1150 * Release resources and clean up.
1152 * NOTE: after calling this, any pointers returned by tuplesort_getXXX are
1153 * pointing to garbage. Be careful not to attempt to use or free such
1154 * pointers afterwards!
1157 tuplesort_end(Tuplesortstate *state)
1159 /* context swap probably not needed, but let's be safe */
1160 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1166 spaceUsed = LogicalTapeSetBlocks(state->tapeset);
1168 spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
1172 * Delete temporary "tape" files, if any.
1174 * Note: want to include this in reported total cost of sort, hence need
1175 * for two #ifdef TRACE_SORT sections.
1178 LogicalTapeSetClose(state->tapeset);
1184 elog(LOG, "external sort ended, %ld disk blocks used: %s",
1185 spaceUsed, pg_rusage_show(&state->ru_start));
1187 elog(LOG, "internal sort ended, %ld KB used: %s",
1188 spaceUsed, pg_rusage_show(&state->ru_start));
1191 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
1195 * If you disabled TRACE_SORT, you can still probe sort__done, but you
1196 * ain't getting space-used stats.
1198 TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
1201 /* Free any execution state created for CLUSTER case */
1202 if (state->estate != NULL)
1204 ExprContext *econtext = GetPerTupleExprContext(state->estate);
1206 ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple);
1207 FreeExecutorState(state->estate);
1210 MemoryContextSwitchTo(oldcontext);
1213 * Free the per-sort memory context, thereby releasing all working memory,
1214 * including the Tuplesortstate struct itself.
1216 MemoryContextDelete(state->sortcontext);
1220 * Grow the memtuples[] array, if possible within our memory constraint. We
1221 * must not exceed INT_MAX tuples in memory or the caller-provided memory
1222 * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
1224 * Normally, at each increment we double the size of the array. When doing
1225 * that would exceed a limit, we attempt one last, smaller increase (and then
1226 * clear the growmemtuples flag so we don't try any more). That allows us to
1227 * use memory as fully as permitted; sticking to the pure doubling rule could
1228 * result in almost half going unused. Because availMem moves around with
1229 * tuple addition/removal, we need some rule to prevent making repeated small
1230 * increases in memtupsize, which would just be useless thrashing. The
1231 * growmemtuples flag accomplishes that and also prevents useless
1232 * recalculations in this function.
1235 grow_memtuples(Tuplesortstate *state)
1238 int memtupsize = state->memtupsize;
1239 int64 memNowUsed = state->allowedMem - state->availMem;
1241 /* Forget it if we've already maxed out memtuples, per comment above */
1242 if (!state->growmemtuples)
1245 /* Select new value of memtupsize */
1246 if (memNowUsed <= state->availMem)
1249 * We've used no more than half of allowedMem; double our usage,
1250 * clamping at INT_MAX tuples.
1252 if (memtupsize < INT_MAX / 2)
1253 newmemtupsize = memtupsize * 2;
1256 newmemtupsize = INT_MAX;
1257 state->growmemtuples = false;
1263 * This will be the last increment of memtupsize. Abandon doubling
1264 * strategy and instead increase as much as we safely can.
1266 * To stay within allowedMem, we can't increase memtupsize by more
1267 * than availMem / sizeof(SortTuple) elements. In practice, we want
1268 * to increase it by considerably less, because we need to leave some
1269 * space for the tuples to which the new array slots will refer. We
1270 * assume the new tuples will be about the same size as the tuples
1271 * we've already seen, and thus we can extrapolate from the space
1272 * consumption so far to estimate an appropriate new size for the
1273 * memtuples array. The optimal value might be higher or lower than
1274 * this estimate, but it's hard to know that in advance. We again
1275 * clamp at INT_MAX tuples.
1277 * This calculation is safe against enlarging the array so much that
1278 * LACKMEM becomes true, because the memory currently used includes
1279 * the present array; thus, there would be enough allowedMem for the
1280 * new array elements even if no other memory were currently used.
1282 * We do the arithmetic in float8, because otherwise the product of
1283 * memtupsize and allowedMem could overflow. Any inaccuracy in the
1284 * result should be insignificant; but even if we computed a
1285 * completely insane result, the checks below will prevent anything
1286 * really bad from happening.
1290 grow_ratio = (double) state->allowedMem / (double) memNowUsed;
1291 if (memtupsize * grow_ratio < INT_MAX)
1292 newmemtupsize = (int) (memtupsize * grow_ratio);
1294 newmemtupsize = INT_MAX;
1296 /* We won't make any further enlargement attempts */
1297 state->growmemtuples = false;
1300 /* Must enlarge array by at least one element, else report failure */
1301 if (newmemtupsize <= memtupsize)
1305 * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
1306 * to ensure our request won't be rejected. Note that we can easily
1307 * exhaust address space before facing this outcome. (This is presently
1308 * impossible due to guc.c's MAX_KILOBYTES limitation on work_mem, but
1309 * don't rely on that at this distance.)
1311 if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
1313 newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
1314 state->growmemtuples = false; /* can't grow any more */
1318 * We need to be sure that we do not cause LACKMEM to become true, else
1319 * the space management algorithm will go nuts. The code above should
1320 * never generate a dangerous request, but to be safe, check explicitly
1321 * that the array growth fits within availMem. (We could still cause
1322 * LACKMEM if the memory chunk overhead associated with the memtuples
1323 * array were to increase. That shouldn't happen because we chose the
1324 * initial array size large enough to ensure that palloc will be treating
1325 * both old and new arrays as separate chunks. But we'll check LACKMEM
1326 * explicitly below just in case.)
1328 if (state->availMem < (int64) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
1332 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
1333 state->memtupsize = newmemtupsize;
1334 state->memtuples = (SortTuple *)
1335 repalloc_huge(state->memtuples,
1336 state->memtupsize * sizeof(SortTuple));
1337 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
1339 elog(ERROR, "unexpected out-of-memory situation in tuplesort");
1343 /* If for any reason we didn't realloc, shut off future attempts */
1344 state->growmemtuples = false;
1349 * Accept one tuple while collecting input data for sort.
1351 * Note that the input data is always copied; the caller need not save it.
1354 tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
1356 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1360 * Copy the given tuple into memory we control, and decrease availMem.
1361 * Then call the common code.
1363 COPYTUP(state, &stup, (void *) slot);
1365 puttuple_common(state, &stup);
1367 MemoryContextSwitchTo(oldcontext);
1371 * Accept one tuple while collecting input data for sort.
1373 * Note that the input data is always copied; the caller need not save it.
1376 tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
1378 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1382 * Copy the given tuple into memory we control, and decrease availMem.
1383 * Then call the common code.
1385 COPYTUP(state, &stup, (void *) tup);
1387 puttuple_common(state, &stup);
1389 MemoryContextSwitchTo(oldcontext);
1393 * Collect one index tuple while collecting input data for sort, building
1394 * it from caller-supplied values.
1397 tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
1398 ItemPointer self, Datum *values,
1401 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1406 stup.tuple = index_form_tuple(RelationGetDescr(rel), values, isnull);
1407 tuple = ((IndexTuple) stup.tuple);
1408 tuple->t_tid = *self;
1409 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1410 /* set up first-column key value */
1411 original = index_getattr(tuple,
1413 RelationGetDescr(state->indexRel),
1416 MemoryContextSwitchTo(state->sortcontext);
1418 if (!state->sortKeys || !state->sortKeys->abbrev_converter || stup.isnull1)
1421 * Store ordinary Datum representation, or NULL value. If there is a
1422 * converter it won't expect NULL values, and cost model is not
1423 * required to account for NULL, so in that case we avoid calling
1424 * converter and just set datum1 to zeroed representation (to be
1425 * consistent, and to support cheap inequality tests for NULL
1426 * abbreviated keys).
1428 stup.datum1 = original;
1430 else if (!consider_abort_common(state))
1432 /* Store abbreviated key representation */
1433 stup.datum1 = state->sortKeys->abbrev_converter(original,
1438 /* Abort abbreviation */
1441 stup.datum1 = original;
1444 * Set state to be consistent with never trying abbreviation.
1446 * Alter datum1 representation in already-copied tuples, so as to
1447 * ensure a consistent representation (current tuple was just
1448 * handled). It does not matter if some dumped tuples are already
1449 * sorted on tape, since serialized tuples lack abbreviated keys
1450 * (TSS_BUILDRUNS state prevents control reaching here in any case).
1452 for (i = 0; i < state->memtupcount; i++)
1454 SortTuple *mtup = &state->memtuples[i];
1456 tuple = mtup->tuple;
1457 mtup->datum1 = index_getattr(tuple,
1459 RelationGetDescr(state->indexRel),
1464 puttuple_common(state, &stup);
1466 MemoryContextSwitchTo(oldcontext);
1470 * Accept one Datum while collecting input data for sort.
1472 * If the Datum is pass-by-ref type, the value will be copied.
1475 tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
1477 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
1481 * Pass-by-value types or null values are just stored directly in
1482 * stup.datum1 (and stup.tuple is not used and set to NULL).
1484 * Non-null pass-by-reference values need to be copied into memory we
1485 * control, and possibly abbreviated. The copied value is pointed to by
1486 * stup.tuple and is treated as the canonical copy (e.g. to return via
1487 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
1488 * abbreviated value if abbreviation is happening, otherwise it's
1489 * identical to stup.tuple.
1492 if (isNull || !state->tuples)
1495 * Set datum1 to zeroed representation for NULLs (to be consistent,
1496 * and to support cheap inequality tests for NULL abbreviated keys).
1498 stup.datum1 = !isNull ? val : (Datum) 0;
1499 stup.isnull1 = isNull;
1500 stup.tuple = NULL; /* no separate storage */
1501 MemoryContextSwitchTo(state->sortcontext);
1505 Datum original = datumCopy(val, false, state->datumTypeLen);
1507 stup.isnull1 = false;
1508 stup.tuple = DatumGetPointer(original);
1509 USEMEM(state, GetMemoryChunkSpace(stup.tuple));
1510 MemoryContextSwitchTo(state->sortcontext);
1512 if (!state->sortKeys->abbrev_converter)
1514 stup.datum1 = original;
1516 else if (!consider_abort_common(state))
1518 /* Store abbreviated key representation */
1519 stup.datum1 = state->sortKeys->abbrev_converter(original,
1524 /* Abort abbreviation */
1527 stup.datum1 = original;
1530 * Set state to be consistent with never trying abbreviation.
1532 * Alter datum1 representation in already-copied tuples, so as to
1533 * ensure a consistent representation (current tuple was just
1534 * handled). It does not matter if some dumped tuples are already
1535 * sorted on tape, since serialized tuples lack abbreviated keys
1536 * (TSS_BUILDRUNS state prevents control reaching here in any
1539 for (i = 0; i < state->memtupcount; i++)
1541 SortTuple *mtup = &state->memtuples[i];
1543 mtup->datum1 = PointerGetDatum(mtup->tuple);
1548 puttuple_common(state, &stup);
1550 MemoryContextSwitchTo(oldcontext);
1554 * Shared code for tuple and datum cases.
1557 puttuple_common(Tuplesortstate *state, SortTuple *tuple)
1559 switch (state->status)
1564 * Save the tuple into the unsorted array. First, grow the array
1565 * as needed. Note that we try to grow the array when there is
1566 * still one free slot remaining --- if we fail, there'll still be
1567 * room to store the incoming tuple, and then we'll switch to
1568 * tape-based operation.
1570 if (state->memtupcount >= state->memtupsize - 1)
1572 (void) grow_memtuples(state);
1573 Assert(state->memtupcount < state->memtupsize);
1575 state->memtuples[state->memtupcount++] = *tuple;
1578 * Check if it's time to switch over to a bounded heapsort. We do
1579 * so if the input tuple count exceeds twice the desired tuple
1580 * count (this is a heuristic for where heapsort becomes cheaper
1581 * than a quicksort), or if we've just filled workMem and have
1582 * enough tuples to meet the bound.
1584 * Note that once we enter TSS_BOUNDED state we will always try to
1585 * complete the sort that way. In the worst case, if later input
1586 * tuples are larger than earlier ones, this might cause us to
1587 * exceed workMem significantly.
1589 if (state->bounded &&
1590 (state->memtupcount > state->bound * 2 ||
1591 (state->memtupcount > state->bound && LACKMEM(state))))
1595 elog(LOG, "switching to bounded heapsort at %d tuples: %s",
1597 pg_rusage_show(&state->ru_start));
1599 make_bounded_heap(state);
1604 * Done if we still fit in available memory and have array slots.
1606 if (state->memtupcount < state->memtupsize && !LACKMEM(state))
1610 * Nope; time to switch to tape-based operation.
1615 * Dump tuples until we are back under the limit.
1617 dumptuples(state, false);
1623 * We don't want to grow the array here, so check whether the new
1624 * tuple can be discarded before putting it in. This should be a
1625 * good speed optimization, too, since when there are many more
1626 * input tuples than the bound, most input tuples can be discarded
1627 * with just this one comparison. Note that because we currently
1628 * have the sort direction reversed, we must check for <= not >=.
1630 if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0)
1632 /* new tuple <= top of the heap, so we can discard it */
1633 free_sort_tuple(state, tuple);
1634 CHECK_FOR_INTERRUPTS();
1638 /* discard top of heap, replacing it with the new tuple */
1639 free_sort_tuple(state, &state->memtuples[0]);
1640 tuple->tupindex = 0; /* not used */
1641 tuplesort_heap_replace_top(state, tuple, false);
1648 * Insert the tuple into the heap, with run number currentRun if
1649 * it can go into the current run, else HEAP_RUN_NEXT. The tuple
1650 * can go into the current run if it is >= the first
1651 * not-yet-output tuple. (Actually, it could go into the current
1652 * run if it is >= the most recently output tuple ... but that
1653 * would require keeping around the tuple we last output, and it's
1654 * simplest to let writetup free each tuple as soon as it's
1657 * Note that this only applies when:
1659 * - currentRun is RUN_FIRST
1661 * - Replacement selection is in use (typically it is never used).
1663 * When these two conditions are not both true, all tuples are
1664 * appended indifferently, much like the TSS_INITIAL case.
1666 * There should always be room to store the incoming tuple.
1668 Assert(!state->replaceActive || state->memtupcount > 0);
1669 if (state->replaceActive &&
1670 COMPARETUP(state, tuple, &state->memtuples[0]) >= 0)
1672 Assert(state->currentRun == RUN_FIRST);
1675 * Insert tuple into first, fully heapified run.
1677 * Unlike classic replacement selection, which this module was
1678 * previously based on, only RUN_FIRST tuples are fully
1679 * heapified. Any second/next run tuples are appended
1680 * indifferently. While HEAP_RUN_NEXT tuples may be sifted
1681 * out of the way of first run tuples, COMPARETUP() will never
1682 * be called for the run's tuples during sifting (only our
1683 * initial COMPARETUP() call is required for the tuple, to
1684 * determine that the tuple does not belong in RUN_FIRST).
1686 tuple->tupindex = state->currentRun;
1687 tuplesort_heap_insert(state, tuple, true);
1692 * Tuple was determined to not belong to heapified RUN_FIRST,
1693 * or replacement selection not in play. Append the tuple to
1694 * memtuples indifferently.
1696 * dumptuples() does not trust that the next run's tuples are
1697 * heapified. Anything past the first run will always be
1698 * quicksorted even when replacement selection is initially
1699 * used. (When it's never used, every tuple still takes this
1702 tuple->tupindex = HEAP_RUN_NEXT;
1703 state->memtuples[state->memtupcount++] = *tuple;
1707 * If we are over the memory limit, dump tuples till we're under.
1709 dumptuples(state, false);
1713 elog(ERROR, "invalid tuplesort state");
1719 consider_abort_common(Tuplesortstate *state)
1721 Assert(state->sortKeys[0].abbrev_converter != NULL);
1722 Assert(state->sortKeys[0].abbrev_abort != NULL);
1723 Assert(state->sortKeys[0].abbrev_full_comparator != NULL);
1726 * Check effectiveness of abbreviation optimization. Consider aborting
1727 * when still within memory limit.
1729 if (state->status == TSS_INITIAL &&
1730 state->memtupcount >= state->abbrevNext)
1732 state->abbrevNext *= 2;
1735 * Check opclass-supplied abbreviation abort routine. It may indicate
1736 * that abbreviation should not proceed.
1738 if (!state->sortKeys->abbrev_abort(state->memtupcount,
1743 * Finally, restore authoritative comparator, and indicate that
1744 * abbreviation is not in play by setting abbrev_converter to NULL
1746 state->sortKeys[0].comparator = state->sortKeys[0].abbrev_full_comparator;
1747 state->sortKeys[0].abbrev_converter = NULL;
1748 /* Not strictly necessary, but be tidy */
1749 state->sortKeys[0].abbrev_abort = NULL;
1750 state->sortKeys[0].abbrev_full_comparator = NULL;
1752 /* Give up - expect original pass-by-value representation */
1760 * All tuples have been provided; finish the sort.
1763 tuplesort_performsort(Tuplesortstate *state)
1765 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
1769 elog(LOG, "performsort starting: %s",
1770 pg_rusage_show(&state->ru_start));
1773 switch (state->status)
1778 * We were able to accumulate all the tuples within the allowed
1779 * amount of memory. Just qsort 'em and we're done.
1781 tuplesort_sort_memtuples(state);
1783 state->eof_reached = false;
1784 state->markpos_offset = 0;
1785 state->markpos_eof = false;
1786 state->status = TSS_SORTEDINMEM;
1792 * We were able to accumulate all the tuples required for output
1793 * in memory, using a heap to eliminate excess tuples. Now we
1794 * have to transform the heap to a properly-sorted array.
1796 sort_bounded_heap(state);
1798 state->eof_reached = false;
1799 state->markpos_offset = 0;
1800 state->markpos_eof = false;
1801 state->status = TSS_SORTEDINMEM;
1807 * Finish tape-based sort. First, flush all tuples remaining in
1808 * memory out to tape; then merge until we have a single remaining
1809 * run (or, if !randomAccess, one run per tape). Note that
1810 * mergeruns sets the correct state->status.
1812 dumptuples(state, true);
1814 state->eof_reached = false;
1815 state->markpos_block = 0L;
1816 state->markpos_offset = 0;
1817 state->markpos_eof = false;
1821 elog(ERROR, "invalid tuplesort state");
1828 if (state->status == TSS_FINALMERGE)
1829 elog(LOG, "performsort done (except %d-way final merge): %s",
1831 pg_rusage_show(&state->ru_start));
1833 elog(LOG, "performsort done: %s",
1834 pg_rusage_show(&state->ru_start));
1838 MemoryContextSwitchTo(oldcontext);
1842 * Internal routine to fetch the next tuple in either forward or back
1843 * direction into *stup. Returns FALSE if no more tuples.
1844 * Returned tuple belongs to tuplesort memory context, and must not be freed
1845 * by caller. Caller should not use tuple following next call here.
1848 tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
1851 unsigned int tuplen;
1854 switch (state->status)
1856 case TSS_SORTEDINMEM:
1857 Assert(forward || state->randomAccess);
1858 Assert(!state->slabAllocatorUsed);
1861 if (state->current < state->memtupcount)
1863 *stup = state->memtuples[state->current++];
1866 state->eof_reached = true;
1869 * Complain if caller tries to retrieve more tuples than
1870 * originally asked for in a bounded sort. This is because
1871 * returning EOF here might be the wrong thing.
1873 if (state->bounded && state->current >= state->bound)
1874 elog(ERROR, "retrieved too many tuples in a bounded sort");
1880 if (state->current <= 0)
1884 * if all tuples are fetched already then we return last
1885 * tuple, else - tuple before last returned.
1887 if (state->eof_reached)
1888 state->eof_reached = false;
1891 state->current--; /* last returned tuple */
1892 if (state->current <= 0)
1895 *stup = state->memtuples[state->current - 1];
1900 case TSS_SORTEDONTAPE:
1901 Assert(forward || state->randomAccess);
1902 Assert(state->slabAllocatorUsed);
1905 * The slot that held the tuple that we returned in previous
1906 * gettuple call can now be reused.
1908 if (state->lastReturnedTuple)
1910 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
1911 state->lastReturnedTuple = NULL;
1916 if (state->eof_reached)
1919 if ((tuplen = getlen(state, state->result_tape, true)) != 0)
1921 READTUP(state, stup, state->result_tape, tuplen);
1924 * Remember the tuple we return, so that we can recycle
1925 * its memory on next call. (This can be NULL, in the
1926 * !state->tuples case).
1928 state->lastReturnedTuple = stup->tuple;
1934 state->eof_reached = true;
1942 * if all tuples are fetched already then we return last tuple,
1943 * else - tuple before last returned.
1945 if (state->eof_reached)
1948 * Seek position is pointing just past the zero tuplen at the
1949 * end of file; back up to fetch last tuple's ending length
1950 * word. If seek fails we must have a completely empty file.
1952 nmoved = LogicalTapeBackspace(state->tapeset,
1954 2 * sizeof(unsigned int));
1957 else if (nmoved != 2 * sizeof(unsigned int))
1958 elog(ERROR, "unexpected tape position");
1959 state->eof_reached = false;
1964 * Back up and fetch previously-returned tuple's ending length
1965 * word. If seek fails, assume we are at start of file.
1967 nmoved = LogicalTapeBackspace(state->tapeset,
1969 sizeof(unsigned int));
1972 else if (nmoved != sizeof(unsigned int))
1973 elog(ERROR, "unexpected tape position");
1974 tuplen = getlen(state, state->result_tape, false);
1977 * Back up to get ending length word of tuple before it.
1979 nmoved = LogicalTapeBackspace(state->tapeset,
1981 tuplen + 2 * sizeof(unsigned int));
1982 if (nmoved == tuplen + sizeof(unsigned int))
1985 * We backed up over the previous tuple, but there was no
1986 * ending length word before it. That means that the prev
1987 * tuple is the first tuple in the file. It is now the
1988 * next to read in forward direction (not obviously right,
1989 * but that is what in-memory case does).
1993 else if (nmoved != tuplen + 2 * sizeof(unsigned int))
1994 elog(ERROR, "bogus tuple length in backward scan");
1997 tuplen = getlen(state, state->result_tape, false);
2000 * Now we have the length of the prior tuple, back up and read it.
2001 * Note: READTUP expects we are positioned after the initial
2002 * length word of the tuple, so back up to that point.
2004 nmoved = LogicalTapeBackspace(state->tapeset,
2007 if (nmoved != tuplen)
2008 elog(ERROR, "bogus tuple length in backward scan");
2009 READTUP(state, stup, state->result_tape, tuplen);
2012 * Remember the tuple we return, so that we can recycle its memory
2013 * on next call. (This can be NULL, in the Datum case).
2015 state->lastReturnedTuple = stup->tuple;
2019 case TSS_FINALMERGE:
2021 /* We are managing memory ourselves, with the slab allocator. */
2022 Assert(state->slabAllocatorUsed);
2025 * The slab slot holding the tuple that we returned in previous
2026 * gettuple call can now be reused.
2028 if (state->lastReturnedTuple)
2030 RELEASE_SLAB_SLOT(state, state->lastReturnedTuple);
2031 state->lastReturnedTuple = NULL;
2035 * This code should match the inner loop of mergeonerun().
2037 if (state->memtupcount > 0)
2039 int srcTape = state->memtuples[0].tupindex;
2042 *stup = state->memtuples[0];
2045 * Remember the tuple we return, so that we can recycle its
2046 * memory on next call. (This can be NULL, in the Datum case).
2048 state->lastReturnedTuple = stup->tuple;
2051 * Pull next tuple from tape, and replace the returned tuple
2052 * at top of the heap with it.
2054 if (!mergereadnext(state, srcTape, &newtup))
2057 * If no more data, we've reached end of run on this tape.
2058 * Remove the top node from the heap.
2060 tuplesort_heap_delete_top(state, false);
2063 * Rewind to free the read buffer. It'd go away at the
2064 * end of the sort anyway, but better to release the
2067 LogicalTapeRewindForWrite(state->tapeset, srcTape);
2070 newtup.tupindex = srcTape;
2071 tuplesort_heap_replace_top(state, &newtup, false);
2077 elog(ERROR, "invalid tuplesort state");
2078 return false; /* keep compiler quiet */
2083 * Fetch the next tuple in either forward or back direction.
2084 * If successful, put tuple in slot and return TRUE; else, clear the slot
2087 * Caller may optionally be passed back abbreviated value (on TRUE return
2088 * value) when abbreviation was used, which can be used to cheaply avoid
2089 * equality checks that might otherwise be required. Caller can safely make a
2090 * determination of "non-equal tuple" based on simple binary inequality. A
2091 * NULL value in leading attribute will set abbreviated value to zeroed
2092 * representation, which caller may rely on in abbreviated inequality check.
2094 * The slot receives a copied tuple (sometimes allocated in caller memory
2095 * context) that will stay valid regardless of future manipulations of the
2096 * tuplesort's state.
2099 tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
2100 TupleTableSlot *slot, Datum *abbrev)
2102 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2105 if (!tuplesort_gettuple_common(state, forward, &stup))
2108 MemoryContextSwitchTo(oldcontext);
2112 /* Record abbreviated key for caller */
2113 if (state->sortKeys->abbrev_converter && abbrev)
2114 *abbrev = stup.datum1;
2116 stup.tuple = heap_copy_minimal_tuple((MinimalTuple) stup.tuple);
2117 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, true);
2122 ExecClearTuple(slot);
2128 * Fetch the next tuple in either forward or back direction.
2129 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
2130 * context, and must not be freed by caller. Caller should not use tuple
2131 * following next call here.
2134 tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
2136 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2139 if (!tuplesort_gettuple_common(state, forward, &stup))
2142 MemoryContextSwitchTo(oldcontext);
2148 * Fetch the next index tuple in either forward or back direction.
2149 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
2150 * context, and must not be freed by caller. Caller should not use tuple
2151 * following next call here.
2154 tuplesort_getindextuple(Tuplesortstate *state, bool forward)
2156 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2159 if (!tuplesort_gettuple_common(state, forward, &stup))
2162 MemoryContextSwitchTo(oldcontext);
2164 return (IndexTuple) stup.tuple;
2168 * Fetch the next Datum in either forward or back direction.
2169 * Returns FALSE if no more datums.
2171 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
2172 * and is now owned by the caller (this differs from similar routines for
2173 * other types of tuplesorts).
2175 * Caller may optionally be passed back abbreviated value (on TRUE return
2176 * value) when abbreviation was used, which can be used to cheaply avoid
2177 * equality checks that might otherwise be required. Caller can safely make a
2178 * determination of "non-equal tuple" based on simple binary inequality. A
2179 * NULL value will have a zeroed abbreviated value representation, which caller
2180 * may rely on in abbreviated inequality check.
2183 tuplesort_getdatum(Tuplesortstate *state, bool forward,
2184 Datum *val, bool *isNull, Datum *abbrev)
2186 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
2189 if (!tuplesort_gettuple_common(state, forward, &stup))
2191 MemoryContextSwitchTo(oldcontext);
2195 /* Record abbreviated key for caller */
2196 if (state->sortKeys->abbrev_converter && abbrev)
2197 *abbrev = stup.datum1;
2199 if (stup.isnull1 || !state->tuples)
2202 *isNull = stup.isnull1;
2206 /* use stup.tuple because stup.datum1 may be an abbreviation */
2207 *val = datumCopy(PointerGetDatum(stup.tuple), false, state->datumTypeLen);
2211 MemoryContextSwitchTo(oldcontext);
2217 * Advance over N tuples in either forward or back direction,
2218 * without returning any data. N==0 is a no-op.
2219 * Returns TRUE if successful, FALSE if ran out of tuples.
2222 tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
2224 MemoryContext oldcontext;
2227 * We don't actually support backwards skip yet, because no callers need
2228 * it. The API is designed to allow for that later, though.
2231 Assert(ntuples >= 0);
2233 switch (state->status)
2235 case TSS_SORTEDINMEM:
2236 if (state->memtupcount - state->current >= ntuples)
2238 state->current += ntuples;
2241 state->current = state->memtupcount;
2242 state->eof_reached = true;
2245 * Complain if caller tries to retrieve more tuples than
2246 * originally asked for in a bounded sort. This is because
2247 * returning EOF here might be the wrong thing.
2249 if (state->bounded && state->current >= state->bound)
2250 elog(ERROR, "retrieved too many tuples in a bounded sort");
2254 case TSS_SORTEDONTAPE:
2255 case TSS_FINALMERGE:
2258 * We could probably optimize these cases better, but for now it's
2259 * not worth the trouble.
2261 oldcontext = MemoryContextSwitchTo(state->sortcontext);
2262 while (ntuples-- > 0)
2266 if (!tuplesort_gettuple_common(state, forward, &stup))
2268 MemoryContextSwitchTo(oldcontext);
2271 CHECK_FOR_INTERRUPTS();
2273 MemoryContextSwitchTo(oldcontext);
2277 elog(ERROR, "invalid tuplesort state");
2278 return false; /* keep compiler quiet */
2283 * tuplesort_merge_order - report merge order we'll use for given memory
2284 * (note: "merge order" just means the number of input tapes in the merge).
2286 * This is exported for use by the planner. allowedMem is in bytes.
2289 tuplesort_merge_order(int64 allowedMem)
2294 * We need one tape for each merge input, plus another one for the output,
2295 * and each of these tapes needs buffer space. In addition we want
2296 * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
2299 * Note: you might be thinking we need to account for the memtuples[]
2300 * array in this calculation, but we effectively treat that as part of the
2301 * MERGE_BUFFER_SIZE workspace.
2303 mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
2304 (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
2307 * Even in minimum memory, use at least a MINORDER merge. On the other
2308 * hand, even when we have lots of memory, do not use more than a MAXORDER
2309 * merge. Tapes are pretty cheap, but they're not entirely free. Each
2310 * additional tape reduces the amount of memory available to build runs,
2311 * which in turn can cause the same sort to need more runs, which makes
2312 * merging slower even if it can still be done in a single pass. Also,
2313 * high order merges are quite slow due to CPU cache effects; it can be
2314 * faster to pay the I/O cost of a polyphase merge than to perform a single
2315 * merge pass across many hundreds of tapes.
2317 mOrder = Max(mOrder, MINORDER);
2318 mOrder = Min(mOrder, MAXORDER);
2324 * useselection - determine algorithm to use to sort first run.
2326 * It can sometimes be useful to use the replacement selection algorithm if it
2327 * results in one large run, and there is little available workMem. See
2328 * remarks on RUN_SECOND optimization within dumptuples().
2331 useselection(Tuplesortstate *state)
2334 * memtupsize might be noticeably higher than memtupcount here in atypical
2335 * cases. It seems slightly preferable to not allow recent outliers to
2336 * impact this determination. Note that caller's trace_sort output
2337 * reports memtupcount instead.
2339 if (state->memtupsize <= replacement_sort_tuples)
2346 * inittapes - initialize for tape sorting.
2348 * This is called only if we have found we don't have room to sort in memory.
2351 inittapes(Tuplesortstate *state)
2357 /* Compute number of tapes to use: merge order plus 1 */
2358 maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
2360 state->maxTapes = maxTapes;
2361 state->tapeRange = maxTapes - 1;
2365 elog(LOG, "switching to external sort with %d tapes: %s",
2366 maxTapes, pg_rusage_show(&state->ru_start));
2370 * Decrease availMem to reflect the space needed for tape buffers, when
2371 * writing the initial runs; but don't decrease it to the point that we
2372 * have no room for tuples. (That case is only likely to occur if sorting
2373 * pass-by-value Datums; in all other scenarios the memtuples[] array is
2374 * unlikely to occupy more than half of allowedMem. In the pass-by-value
2375 * case it's not important to account for tuple space, so we don't care if
2376 * LACKMEM becomes inaccurate.)
2378 tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
2380 if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
2381 USEMEM(state, tapeSpace);
2384 * Make sure that the temp file(s) underlying the tape set are created in
2385 * suitable temp tablespaces.
2387 PrepareTempTablespaces();
2390 * Create the tape set and allocate the per-tape data arrays.
2392 state->tapeset = LogicalTapeSetCreate(maxTapes);
2394 state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool));
2395 state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
2396 state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
2397 state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
2398 state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
2401 * Give replacement selection a try based on user setting. There will be
2402 * a switch to a simple hybrid sort-merge strategy after the first run
2403 * (iff we could not output one long run).
2405 state->replaceActive = useselection(state);
2407 if (state->replaceActive)
2410 * Convert the unsorted contents of memtuples[] into a heap. Each
2411 * tuple is marked as belonging to run number zero.
2413 * NOTE: we pass false for checkIndex since there's no point in
2414 * comparing indexes in this step, even though we do intend the
2415 * indexes to be part of the sort key...
2417 int ntuples = state->memtupcount;
2421 elog(LOG, "replacement selection will sort %d first run tuples",
2422 state->memtupcount);
2424 state->memtupcount = 0; /* make the heap empty */
2426 for (j = 0; j < ntuples; j++)
2428 /* Must copy source tuple to avoid possible overwrite */
2429 SortTuple stup = state->memtuples[j];
2431 stup.tupindex = RUN_FIRST;
2432 tuplesort_heap_insert(state, &stup, false);
2434 Assert(state->memtupcount == ntuples);
2437 state->currentRun = RUN_FIRST;
2440 * Initialize variables of Algorithm D (step D1).
2442 for (j = 0; j < maxTapes; j++)
2444 state->tp_fib[j] = 1;
2445 state->tp_runs[j] = 0;
2446 state->tp_dummy[j] = 1;
2447 state->tp_tapenum[j] = j;
2449 state->tp_fib[state->tapeRange] = 0;
2450 state->tp_dummy[state->tapeRange] = 0;
2453 state->destTape = 0;
2455 state->status = TSS_BUILDRUNS;
2459 * selectnewtape -- select new tape for new initial run.
2461 * This is called after finishing a run when we know another run
2462 * must be started. This implements steps D3, D4 of Algorithm D.
2465 selectnewtape(Tuplesortstate *state)
2470 /* Step D3: advance j (destTape) */
2471 if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1])
2476 if (state->tp_dummy[state->destTape] != 0)
2478 state->destTape = 0;
2482 /* Step D4: increase level */
2484 a = state->tp_fib[0];
2485 for (j = 0; j < state->tapeRange; j++)
2487 state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j];
2488 state->tp_fib[j] = a + state->tp_fib[j + 1];
2490 state->destTape = 0;
2494 * Initialize the slab allocation arena, for the given number of slots.
2497 init_slab_allocator(Tuplesortstate *state, int numSlots)
2504 state->slabMemoryBegin = palloc(numSlots * SLAB_SLOT_SIZE);
2505 state->slabMemoryEnd = state->slabMemoryBegin +
2506 numSlots * SLAB_SLOT_SIZE;
2507 state->slabFreeHead = (SlabSlot *) state->slabMemoryBegin;
2508 USEMEM(state, numSlots * SLAB_SLOT_SIZE);
2510 p = state->slabMemoryBegin;
2511 for (i = 0; i < numSlots - 1; i++)
2513 ((SlabSlot *) p)->nextfree = (SlabSlot *) (p + SLAB_SLOT_SIZE);
2514 p += SLAB_SLOT_SIZE;
2516 ((SlabSlot *) p)->nextfree = NULL;
2520 state->slabMemoryBegin = state->slabMemoryEnd = NULL;
2521 state->slabFreeHead = NULL;
2523 state->slabAllocatorUsed = true;
2527 * mergeruns -- merge all the completed initial runs.
2529 * This implements steps D5, D6 of Algorithm D. All input data has
2530 * already been written to initial runs on tape (see dumptuples).
2533 mergeruns(Tuplesortstate *state)
2542 Assert(state->status == TSS_BUILDRUNS);
2543 Assert(state->memtupcount == 0);
2545 if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
2548 * If there are multiple runs to be merged, when we go to read back
2549 * tuples from disk, abbreviated keys will not have been stored, and
2550 * we don't care to regenerate them. Disable abbreviation from this
2553 state->sortKeys->abbrev_converter = NULL;
2554 state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
2556 /* Not strictly necessary, but be tidy */
2557 state->sortKeys->abbrev_abort = NULL;
2558 state->sortKeys->abbrev_full_comparator = NULL;
2562 * Reset tuple memory. We've freed all the tuples that we previously
2563 * allocated. We will use the slab allocator from now on.
2565 MemoryContextDelete(state->tuplecontext);
2566 state->tuplecontext = NULL;
2569 * We no longer need a large memtuples array. (We will allocate a smaller
2570 * one for the heap later.)
2572 FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
2573 pfree(state->memtuples);
2574 state->memtuples = NULL;
2577 * If we had fewer runs than tapes, refund the memory that we imagined we
2578 * would need for the tape buffers of the unused tapes.
2580 * numTapes and numInputTapes reflect the actual number of tapes we will
2581 * use. Note that the output tape's tape number is maxTapes - 1, so the
2582 * tape numbers of the used tapes are not consecutive, and you cannot just
2583 * loop from 0 to numTapes to visit all used tapes!
2585 if (state->Level == 1)
2587 numInputTapes = state->currentRun;
2588 numTapes = numInputTapes + 1;
2589 FREEMEM(state, (state->maxTapes - numTapes) * TAPE_BUFFER_OVERHEAD);
2593 numInputTapes = state->tapeRange;
2594 numTapes = state->maxTapes;
2598 * Initialize the slab allocator. We need one slab slot per input tape,
2599 * for the tuples in the heap, plus one to hold the tuple last returned
2600 * from tuplesort_gettuple. (If we're sorting pass-by-val Datums,
2601 * however, we don't need to do allocate anything.)
2603 * From this point on, we no longer use the USEMEM()/LACKMEM() mechanism
2604 * to track memory usage of individual tuples.
2607 init_slab_allocator(state, numInputTapes + 1);
2609 init_slab_allocator(state, 0);
2612 * If we produced only one initial run (quite likely if the total data
2613 * volume is between 1X and 2X workMem when replacement selection is used,
2614 * but something we particular count on when input is presorted), we can
2615 * just use that tape as the finished output, rather than doing a useless
2616 * merge. (This obvious optimization is not in Knuth's algorithm.)
2618 if (state->currentRun == RUN_SECOND)
2620 state->result_tape = state->tp_tapenum[state->destTape];
2621 /* must freeze and rewind the finished output tape */
2622 LogicalTapeFreeze(state->tapeset, state->result_tape);
2623 state->status = TSS_SORTEDONTAPE;
2628 * Allocate a new 'memtuples' array, for the heap. It will hold one tuple
2629 * from each input tape.
2631 state->memtupsize = numInputTapes;
2632 state->memtuples = (SortTuple *) palloc(numInputTapes * sizeof(SortTuple));
2633 USEMEM(state, GetMemoryChunkSpace(state->memtuples));
2636 * Use all the remaining memory we have available for read buffers among
2639 * We do this only after checking for the case that we produced only one
2640 * initial run, because there is no need to use a large read buffer when
2641 * we're reading from a single tape. With one tape, the I/O pattern will
2642 * be the same regardless of the buffer size.
2644 * We don't try to "rebalance" the memory among tapes, when we start a new
2645 * merge phase, even if some tapes are inactive in the new phase. That
2646 * would be hard, because logtape.c doesn't know where one run ends and
2647 * another begins. When a new merge phase begins, and a tape doesn't
2648 * participate in it, its buffer nevertheless already contains tuples from
2649 * the next run on same tape, so we cannot release the buffer. That's OK
2650 * in practice, merge performance isn't that sensitive to the amount of
2651 * buffers used, and most merge phases use all or almost all tapes,
2656 elog(LOG, "using " INT64_FORMAT " KB of memory for read buffers among %d input tapes",
2657 (state->availMem) / 1024, numInputTapes);
2660 state->read_buffer_size = Max(state->availMem / numInputTapes, 0);
2661 USEMEM(state, state->read_buffer_size * numInputTapes);
2663 /* End of step D2: rewind all output tapes to prepare for merging */
2664 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2665 LogicalTapeRewindForRead(state->tapeset, tapenum, state->read_buffer_size);
2670 * At this point we know that tape[T] is empty. If there's just one
2671 * (real or dummy) run left on each input tape, then only one merge
2672 * pass remains. If we don't have to produce a materialized sorted
2673 * tape, we can stop at this point and do the final merge on-the-fly.
2675 if (!state->randomAccess)
2677 bool allOneRun = true;
2679 Assert(state->tp_runs[state->tapeRange] == 0);
2680 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2682 if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1)
2690 /* Tell logtape.c we won't be writing anymore */
2691 LogicalTapeSetForgetFreeSpace(state->tapeset);
2692 /* Initialize for the final merge pass */
2694 state->status = TSS_FINALMERGE;
2699 /* Step D5: merge runs onto tape[T] until tape[P] is empty */
2700 while (state->tp_runs[state->tapeRange - 1] ||
2701 state->tp_dummy[state->tapeRange - 1])
2703 bool allDummy = true;
2705 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2707 if (state->tp_dummy[tapenum] == 0)
2716 state->tp_dummy[state->tapeRange]++;
2717 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2718 state->tp_dummy[tapenum]--;
2724 /* Step D6: decrease level */
2725 if (--state->Level == 0)
2727 /* rewind output tape T to use as new input */
2728 LogicalTapeRewindForRead(state->tapeset, state->tp_tapenum[state->tapeRange],
2729 state->read_buffer_size);
2730 /* rewind used-up input tape P, and prepare it for write pass */
2731 LogicalTapeRewindForWrite(state->tapeset, state->tp_tapenum[state->tapeRange - 1]);
2732 state->tp_runs[state->tapeRange - 1] = 0;
2735 * reassign tape units per step D6; note we no longer care about A[]
2737 svTape = state->tp_tapenum[state->tapeRange];
2738 svDummy = state->tp_dummy[state->tapeRange];
2739 svRuns = state->tp_runs[state->tapeRange];
2740 for (tapenum = state->tapeRange; tapenum > 0; tapenum--)
2742 state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1];
2743 state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1];
2744 state->tp_runs[tapenum] = state->tp_runs[tapenum - 1];
2746 state->tp_tapenum[0] = svTape;
2747 state->tp_dummy[0] = svDummy;
2748 state->tp_runs[0] = svRuns;
2752 * Done. Knuth says that the result is on TAPE[1], but since we exited
2753 * the loop without performing the last iteration of step D6, we have not
2754 * rearranged the tape unit assignment, and therefore the result is on
2755 * TAPE[T]. We need to do it this way so that we can freeze the final
2756 * output tape while rewinding it. The last iteration of step D6 would be
2757 * a waste of cycles anyway...
2759 state->result_tape = state->tp_tapenum[state->tapeRange];
2760 LogicalTapeFreeze(state->tapeset, state->result_tape);
2761 state->status = TSS_SORTEDONTAPE;
2763 /* Release the read buffers of all the other tapes, by rewinding them. */
2764 for (tapenum = 0; tapenum < state->maxTapes; tapenum++)
2766 if (tapenum != state->result_tape)
2767 LogicalTapeRewindForWrite(state->tapeset, tapenum);
2772 * Merge one run from each input tape, except ones with dummy runs.
2774 * This is the inner loop of Algorithm D step D5. We know that the
2775 * output tape is TAPE[T].
2778 mergeonerun(Tuplesortstate *state)
2780 int destTape = state->tp_tapenum[state->tapeRange];
2784 * Start the merge by loading one tuple from each active source tape into
2785 * the heap. We can also decrease the input run/dummy run counts.
2790 * Execute merge by repeatedly extracting lowest tuple in heap, writing it
2791 * out, and replacing it with next tuple from same tape (if there is
2794 while (state->memtupcount > 0)
2798 /* write the tuple to destTape */
2799 srcTape = state->memtuples[0].tupindex;
2800 WRITETUP(state, destTape, &state->memtuples[0]);
2802 /* recycle the slot of the tuple we just wrote out, for the next read */
2803 RELEASE_SLAB_SLOT(state, state->memtuples[0].tuple);
2806 * pull next tuple from the tape, and replace the written-out tuple in
2809 if (mergereadnext(state, srcTape, &stup))
2811 stup.tupindex = srcTape;
2812 tuplesort_heap_replace_top(state, &stup, false);
2816 tuplesort_heap_delete_top(state, false);
2820 * When the heap empties, we're done. Write an end-of-run marker on the
2821 * output tape, and increment its count of real runs.
2823 markrunend(state, destTape);
2824 state->tp_runs[state->tapeRange]++;
2828 elog(LOG, "finished %d-way merge step: %s", state->activeTapes,
2829 pg_rusage_show(&state->ru_start));
2834 * beginmerge - initialize for a merge pass
2836 * We decrease the counts of real and dummy runs for each tape, and mark
2837 * which tapes contain active input runs in mergeactive[]. Then, fill the
2838 * merge heap with the first tuple from each active tape.
2841 beginmerge(Tuplesortstate *state)
2847 /* Heap should be empty here */
2848 Assert(state->memtupcount == 0);
2850 /* Adjust run counts and mark the active tapes */
2851 memset(state->mergeactive, 0,
2852 state->maxTapes * sizeof(*state->mergeactive));
2854 for (tapenum = 0; tapenum < state->tapeRange; tapenum++)
2856 if (state->tp_dummy[tapenum] > 0)
2857 state->tp_dummy[tapenum]--;
2860 Assert(state->tp_runs[tapenum] > 0);
2861 state->tp_runs[tapenum]--;
2862 srcTape = state->tp_tapenum[tapenum];
2863 state->mergeactive[srcTape] = true;
2867 Assert(activeTapes > 0);
2868 state->activeTapes = activeTapes;
2870 /* Load the merge heap with the first tuple from each input tape */
2871 for (srcTape = 0; srcTape < state->maxTapes; srcTape++)
2875 if (mergereadnext(state, srcTape, &tup))
2877 tup.tupindex = srcTape;
2878 tuplesort_heap_insert(state, &tup, false);
2884 * mergereadnext - read next tuple from one merge input tape
2886 * Returns false on EOF.
2889 mergereadnext(Tuplesortstate *state, int srcTape, SortTuple *stup)
2891 unsigned int tuplen;
2893 if (!state->mergeactive[srcTape])
2894 return false; /* tape's run is already exhausted */
2896 /* read next tuple, if any */
2897 if ((tuplen = getlen(state, srcTape, true)) == 0)
2899 state->mergeactive[srcTape] = false;
2902 READTUP(state, stup, srcTape, tuplen);
2908 * dumptuples - remove tuples from memtuples and write to tape
2910 * This is used during initial-run building, but not during merging.
2912 * When alltuples = false and replacement selection is still active, dump
2913 * only enough tuples to get under the availMem limit (and leave at least
2914 * one tuple in memtuples, since puttuple will then assume it is a heap that
2915 * has a tuple to compare to). We always insist there be at least one free
2916 * slot in the memtuples[] array.
2918 * When alltuples = true, dump everything currently in memory. (This
2919 * case is only used at end of input data, although in practice only the
2920 * first run could fail to dump all tuples when we LACKMEM(), and only
2921 * when replacement selection is active.)
2923 * If, when replacement selection is active, we see that the tuple run
2924 * number at the top of the heap has changed, start a new run. This must be
2925 * the first run, because replacement selection is always abandoned for all
2929 dumptuples(Tuplesortstate *state, bool alltuples)
2932 (LACKMEM(state) && state->memtupcount > 1) ||
2933 state->memtupcount >= state->memtupsize)
2935 if (state->replaceActive)
2938 * Still holding out for a case favorable to replacement
2939 * selection. Still incrementally spilling using heap.
2941 * Dump the heap's frontmost entry, and remove it from the heap.
2943 Assert(state->memtupcount > 0);
2944 WRITETUP(state, state->tp_tapenum[state->destTape],
2945 &state->memtuples[0]);
2946 tuplesort_heap_delete_top(state, true);
2951 * Once committed to quicksorting runs, never incrementally spill
2953 dumpbatch(state, alltuples);
2958 * If top run number has changed, we've finished the current run (this
2959 * can only be the first run), and will no longer spill incrementally.
2961 if (state->memtupcount == 0 ||
2962 state->memtuples[0].tupindex == HEAP_RUN_NEXT)
2964 markrunend(state, state->tp_tapenum[state->destTape]);
2965 Assert(state->currentRun == RUN_FIRST);
2966 state->currentRun++;
2967 state->tp_runs[state->destTape]++;
2968 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
2972 elog(LOG, "finished incrementally writing %s run %d to tape %d: %s",
2973 (state->memtupcount == 0) ? "only" : "first",
2974 state->currentRun, state->destTape,
2975 pg_rusage_show(&state->ru_start));
2979 * Done if heap is empty, which is possible when there is only one
2982 Assert(state->currentRun == RUN_SECOND);
2983 if (state->memtupcount == 0)
2986 * Replacement selection best case; no final merge required,
2987 * because there was only one initial run (second run has no
2988 * tuples). See RUN_SECOND case in mergeruns().
2994 * Abandon replacement selection for second run (as well as any
2997 state->replaceActive = false;
3000 * First tuple of next run should not be heapified, and so will
3001 * bear placeholder run number. In practice this must actually be
3002 * the second run, which just became the currentRun, so we're
3003 * clear to quicksort and dump the tuples in batch next time
3004 * memtuples becomes full.
3006 Assert(state->memtuples[0].tupindex == HEAP_RUN_NEXT);
3007 selectnewtape(state);
3013 * dumpbatch - sort and dump all memtuples, forming one run on tape
3015 * Second or subsequent runs are never heapified by this module (although
3016 * heapification still respects run number differences between the first and
3017 * second runs), and a heap (replacement selection priority queue) is often
3018 * avoided in the first place.
3021 dumpbatch(Tuplesortstate *state, bool alltuples)
3027 * Final call might require no sorting, in rare cases where we just so
3028 * happen to have previously LACKMEM()'d at the point where exactly all
3029 * remaining tuples are loaded into memory, just before input was
3032 * In general, short final runs are quite possible. Rather than allowing
3033 * a special case where there was a superfluous selectnewtape() call (i.e.
3034 * a call with no subsequent run actually written to destTape), we prefer
3035 * to write out a 0 tuple run.
3037 * mergereadnext() is prepared for 0 tuple runs, and will reliably mark
3038 * the tape inactive for the merge when called from beginmerge(). This
3039 * case is therefore similar to the case where mergeonerun() finds a dummy
3040 * run for the tape, and so doesn't need to merge a run from the tape (or
3041 * conceptually "merges" the dummy run, if you prefer). According to
3042 * Knuth, Algorithm D "isn't strictly optimal" in its method of
3043 * distribution and dummy run assignment; this edge case seems very
3044 * unlikely to make that appreciably worse.
3046 Assert(state->status == TSS_BUILDRUNS);
3049 * It seems unlikely that this limit will ever be exceeded, but take no
3052 if (state->currentRun == INT_MAX)
3054 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
3055 errmsg("cannot have more than %d runs for an external sort",
3058 state->currentRun++;
3062 elog(LOG, "starting quicksort of run %d: %s",
3063 state->currentRun, pg_rusage_show(&state->ru_start));
3067 * Sort all tuples accumulated within the allowed amount of memory for
3068 * this run using quicksort
3070 tuplesort_sort_memtuples(state);
3074 elog(LOG, "finished quicksort of run %d: %s",
3075 state->currentRun, pg_rusage_show(&state->ru_start));
3078 memtupwrite = state->memtupcount;
3079 for (i = 0; i < memtupwrite; i++)
3081 WRITETUP(state, state->tp_tapenum[state->destTape],
3082 &state->memtuples[i]);
3083 state->memtupcount--;
3087 * Reset tuple memory. We've freed all of the tuples that we previously
3088 * allocated. It's important to avoid fragmentation when there is a stark
3089 * change in the sizes of incoming tuples. Fragmentation due to
3090 * AllocSetFree's bucketing by size class might be particularly bad if
3091 * this step wasn't taken.
3093 MemoryContextReset(state->tuplecontext);
3095 markrunend(state, state->tp_tapenum[state->destTape]);
3096 state->tp_runs[state->destTape]++;
3097 state->tp_dummy[state->destTape]--; /* per Alg D step D2 */
3101 elog(LOG, "finished writing run %d to tape %d: %s",
3102 state->currentRun, state->destTape,
3103 pg_rusage_show(&state->ru_start));
3107 selectnewtape(state);
3111 * tuplesort_rescan - rewind and replay the scan
3114 tuplesort_rescan(Tuplesortstate *state)
3116 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3118 Assert(state->randomAccess);
3120 switch (state->status)
3122 case TSS_SORTEDINMEM:
3124 state->eof_reached = false;
3125 state->markpos_offset = 0;
3126 state->markpos_eof = false;
3128 case TSS_SORTEDONTAPE:
3129 LogicalTapeRewindForRead(state->tapeset,
3132 state->eof_reached = false;
3133 state->markpos_block = 0L;
3134 state->markpos_offset = 0;
3135 state->markpos_eof = false;
3138 elog(ERROR, "invalid tuplesort state");
3142 MemoryContextSwitchTo(oldcontext);
3146 * tuplesort_markpos - saves current position in the merged sort file
3149 tuplesort_markpos(Tuplesortstate *state)
3151 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3153 Assert(state->randomAccess);
3155 switch (state->status)
3157 case TSS_SORTEDINMEM:
3158 state->markpos_offset = state->current;
3159 state->markpos_eof = state->eof_reached;
3161 case TSS_SORTEDONTAPE:
3162 LogicalTapeTell(state->tapeset,
3164 &state->markpos_block,
3165 &state->markpos_offset);
3166 state->markpos_eof = state->eof_reached;
3169 elog(ERROR, "invalid tuplesort state");
3173 MemoryContextSwitchTo(oldcontext);
3177 * tuplesort_restorepos - restores current position in merged sort file to
3178 * last saved position
3181 tuplesort_restorepos(Tuplesortstate *state)
3183 MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext);
3185 Assert(state->randomAccess);
3187 switch (state->status)
3189 case TSS_SORTEDINMEM:
3190 state->current = state->markpos_offset;
3191 state->eof_reached = state->markpos_eof;
3193 case TSS_SORTEDONTAPE:
3194 LogicalTapeSeek(state->tapeset,
3196 state->markpos_block,
3197 state->markpos_offset);
3198 state->eof_reached = state->markpos_eof;
3201 elog(ERROR, "invalid tuplesort state");
3205 MemoryContextSwitchTo(oldcontext);
3209 * tuplesort_get_stats - extract summary statistics
3211 * This can be called after tuplesort_performsort() finishes to obtain
3212 * printable summary information about how the sort was performed.
3213 * spaceUsed is measured in kilobytes.
3216 tuplesort_get_stats(Tuplesortstate *state,
3217 const char **sortMethod,
3218 const char **spaceType,
3222 * Note: it might seem we should provide both memory and disk usage for a
3223 * disk-based sort. However, the current code doesn't track memory space
3224 * accurately once we have begun to return tuples to the caller (since we
3225 * don't account for pfree's the caller is expected to do), so we cannot
3226 * rely on availMem in a disk sort. This does not seem worth the overhead
3227 * to fix. Is it worth creating an API for the memory context code to
3228 * tell us how much is actually used in sortcontext?
3232 *spaceType = "Disk";
3233 *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024);
3237 *spaceType = "Memory";
3238 *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
3241 switch (state->status)
3243 case TSS_SORTEDINMEM:
3244 if (state->boundUsed)
3245 *sortMethod = "top-N heapsort";
3247 *sortMethod = "quicksort";
3249 case TSS_SORTEDONTAPE:
3250 *sortMethod = "external sort";
3252 case TSS_FINALMERGE:
3253 *sortMethod = "external merge";
3256 *sortMethod = "still in progress";
3263 * Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
3265 * Compare two SortTuples. If checkIndex is true, use the tuple index
3266 * as the front of the sort key; otherwise, no.
3268 * Note that for checkIndex callers, the heap invariant is never
3269 * maintained beyond the first run, and so there are no COMPARETUP()
3270 * calls needed to distinguish tuples in HEAP_RUN_NEXT.
3273 #define HEAPCOMPARE(tup1,tup2) \
3274 (checkIndex && ((tup1)->tupindex != (tup2)->tupindex || \
3275 (tup1)->tupindex == HEAP_RUN_NEXT) ? \
3276 ((tup1)->tupindex) - ((tup2)->tupindex) : \
3277 COMPARETUP(state, tup1, tup2))
3280 * Convert the existing unordered array of SortTuples to a bounded heap,
3281 * discarding all but the smallest "state->bound" tuples.
3283 * When working with a bounded heap, we want to keep the largest entry
3284 * at the root (array entry zero), instead of the smallest as in the normal
3285 * sort case. This allows us to discard the largest entry cheaply.
3286 * Therefore, we temporarily reverse the sort direction.
3288 * We assume that all entries in a bounded heap will always have tupindex
3289 * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse
3290 * the direction of comparison for tupindexes.
3293 make_bounded_heap(Tuplesortstate *state)
3295 int tupcount = state->memtupcount;
3298 Assert(state->status == TSS_INITIAL);
3299 Assert(state->bounded);
3300 Assert(tupcount >= state->bound);
3302 /* Reverse sort direction so largest entry will be at root */
3303 reversedirection(state);
3305 state->memtupcount = 0; /* make the heap empty */
3306 for (i = 0; i < tupcount; i++)
3308 if (state->memtupcount < state->bound)
3310 /* Insert next tuple into heap */
3311 /* Must copy source tuple to avoid possible overwrite */
3312 SortTuple stup = state->memtuples[i];
3314 stup.tupindex = 0; /* not used */
3315 tuplesort_heap_insert(state, &stup, false);
3320 * The heap is full. Replace the largest entry with the new
3321 * tuple, or just discard it, if it's larger than anything already
3324 if (COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0)
3326 free_sort_tuple(state, &state->memtuples[i]);
3327 CHECK_FOR_INTERRUPTS();
3330 tuplesort_heap_replace_top(state, &state->memtuples[i], false);
3334 Assert(state->memtupcount == state->bound);
3335 state->status = TSS_BOUNDED;
3339 * Convert the bounded heap to a properly-sorted array
3342 sort_bounded_heap(Tuplesortstate *state)
3344 int tupcount = state->memtupcount;
3346 Assert(state->status == TSS_BOUNDED);
3347 Assert(state->bounded);
3348 Assert(tupcount == state->bound);
3351 * We can unheapify in place because each delete-top call will remove the
3352 * largest entry, which we can promptly store in the newly freed slot at
3353 * the end. Once we're down to a single-entry heap, we're done.
3355 while (state->memtupcount > 1)
3357 SortTuple stup = state->memtuples[0];
3359 /* this sifts-up the next-largest entry and decreases memtupcount */
3360 tuplesort_heap_delete_top(state, false);
3361 state->memtuples[state->memtupcount] = stup;
3363 state->memtupcount = tupcount;
3366 * Reverse sort direction back to the original state. This is not
3367 * actually necessary but seems like a good idea for tidiness.
3369 reversedirection(state);
3371 state->status = TSS_SORTEDINMEM;
3372 state->boundUsed = true;
3376 * Sort all memtuples using specialized qsort() routines.
3378 * Quicksort is used for small in-memory sorts. Quicksort is also generally
3379 * preferred to replacement selection for generating runs during external sort
3380 * operations, although replacement selection is sometimes used for the first
3384 tuplesort_sort_memtuples(Tuplesortstate *state)
3386 if (state->memtupcount > 1)
3388 /* Can we use the single-key sort function? */
3389 if (state->onlyKey != NULL)
3390 qsort_ssup(state->memtuples, state->memtupcount,
3393 qsort_tuple(state->memtuples,
3401 * Insert a new tuple into an empty or existing heap, maintaining the
3402 * heap invariant. Caller is responsible for ensuring there's room.
3404 * Note: For some callers, tuple points to a memtuples[] entry above the
3405 * end of the heap. This is safe as long as it's not immediately adjacent
3406 * to the end of the heap (ie, in the [memtupcount] array entry) --- if it
3407 * is, it might get overwritten before being moved into the heap!
3410 tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
3413 SortTuple *memtuples;
3416 memtuples = state->memtuples;
3417 Assert(state->memtupcount < state->memtupsize);
3418 Assert(!checkIndex || tuple->tupindex == RUN_FIRST);
3420 CHECK_FOR_INTERRUPTS();
3423 * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
3424 * using 1-based array indexes, not 0-based.
3426 j = state->memtupcount++;
3429 int i = (j - 1) >> 1;
3431 if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0)
3433 memtuples[j] = memtuples[i];
3436 memtuples[j] = *tuple;
3440 * Remove the tuple at state->memtuples[0] from the heap. Decrement
3441 * memtupcount, and sift up to maintain the heap invariant.
3443 * The caller has already free'd the tuple the top node points to,
3447 tuplesort_heap_delete_top(Tuplesortstate *state, bool checkIndex)
3449 SortTuple *memtuples = state->memtuples;
3452 Assert(!checkIndex || state->currentRun == RUN_FIRST);
3453 if (--state->memtupcount <= 0)
3457 * Remove the last tuple in the heap, and re-insert it, by replacing the
3458 * current top node with it.
3460 tuple = &memtuples[state->memtupcount];
3461 tuplesort_heap_replace_top(state, tuple, checkIndex);
3465 * Replace the tuple at state->memtuples[0] with a new tuple. Sift up to
3466 * maintain the heap invariant.
3468 * This corresponds to Knuth's "sift-up" algorithm (Algorithm 5.2.3H,
3469 * Heapsort, steps H3-H8).
3472 tuplesort_heap_replace_top(Tuplesortstate *state, SortTuple *tuple,
3475 SortTuple *memtuples = state->memtuples;
3479 Assert(!checkIndex || state->currentRun == RUN_FIRST);
3480 Assert(state->memtupcount >= 1);
3482 CHECK_FOR_INTERRUPTS();
3484 n = state->memtupcount;
3485 i = 0; /* i is where the "hole" is */
3493 HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0)
3495 if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0)
3497 memtuples[i] = memtuples[j];
3500 memtuples[i] = *tuple;
3504 * Function to reverse the sort direction from its current state
3506 * It is not safe to call this when performing hash tuplesorts
3509 reversedirection(Tuplesortstate *state)
3511 SortSupport sortKey = state->sortKeys;
3514 for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
3516 sortKey->ssup_reverse = !sortKey->ssup_reverse;
3517 sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first;
3523 * Tape interface routines
3527 getlen(Tuplesortstate *state, int tapenum, bool eofOK)
3531 if (LogicalTapeRead(state->tapeset, tapenum,
3532 &len, sizeof(len)) != sizeof(len))
3533 elog(ERROR, "unexpected end of tape");
3534 if (len == 0 && !eofOK)
3535 elog(ERROR, "unexpected end of data");
3540 markrunend(Tuplesortstate *state, int tapenum)
3542 unsigned int len = 0;
3544 LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len));
3548 * Get memory for tuple from within READTUP() routine.
3550 * We use next free slot from the slab allocator, or palloc() if the tuple
3551 * is too large for that.
3554 readtup_alloc(Tuplesortstate *state, Size tuplen)
3559 * We pre-allocate enough slots in the slab arena that we should never run
3562 Assert(state->slabFreeHead);
3564 if (tuplen > SLAB_SLOT_SIZE || !state->slabFreeHead)
3565 return MemoryContextAlloc(state->sortcontext, tuplen);
3568 buf = state->slabFreeHead;
3569 /* Reuse this slot */
3570 state->slabFreeHead = buf->nextfree;
3578 * Routines specialized for HeapTuple (actually MinimalTuple) case
3582 comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
3584 SortSupport sortKey = state->sortKeys;
3597 /* Compare the leading sort key */
3598 compare = ApplySortComparator(a->datum1, a->isnull1,
3599 b->datum1, b->isnull1,
3604 /* Compare additional sort keys */
3605 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3606 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
3607 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
3608 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
3609 tupDesc = state->tupDesc;
3611 if (sortKey->abbrev_converter)
3613 attno = sortKey->ssup_attno;
3615 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3616 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3618 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3626 for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++)
3628 attno = sortKey->ssup_attno;
3630 datum1 = heap_getattr(<up, attno, tupDesc, &isnull1);
3631 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
3633 compare = ApplySortComparator(datum1, isnull1,
3644 copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
3647 * We expect the passed "tup" to be a TupleTableSlot, and form a
3648 * MinimalTuple using the exported interface for that.
3650 TupleTableSlot *slot = (TupleTableSlot *) tup;
3654 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3656 /* copy the tuple into sort storage */
3657 tuple = ExecCopySlotMinimalTuple(slot);
3658 stup->tuple = (void *) tuple;
3659 USEMEM(state, GetMemoryChunkSpace(tuple));
3660 /* set up first-column key value */
3661 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3662 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3663 original = heap_getattr(&htup,
3664 state->sortKeys[0].ssup_attno,
3668 MemoryContextSwitchTo(oldcontext);
3670 if (!state->sortKeys->abbrev_converter || stup->isnull1)
3673 * Store ordinary Datum representation, or NULL value. If there is a
3674 * converter it won't expect NULL values, and cost model is not
3675 * required to account for NULL, so in that case we avoid calling
3676 * converter and just set datum1 to zeroed representation (to be
3677 * consistent, and to support cheap inequality tests for NULL
3678 * abbreviated keys).
3680 stup->datum1 = original;
3682 else if (!consider_abort_common(state))
3684 /* Store abbreviated key representation */
3685 stup->datum1 = state->sortKeys->abbrev_converter(original,
3690 /* Abort abbreviation */
3693 stup->datum1 = original;
3696 * Set state to be consistent with never trying abbreviation.
3698 * Alter datum1 representation in already-copied tuples, so as to
3699 * ensure a consistent representation (current tuple was just
3700 * handled). It does not matter if some dumped tuples are already
3701 * sorted on tape, since serialized tuples lack abbreviated keys
3702 * (TSS_BUILDRUNS state prevents control reaching here in any case).
3704 for (i = 0; i < state->memtupcount; i++)
3706 SortTuple *mtup = &state->memtuples[i];
3708 htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
3709 MINIMAL_TUPLE_OFFSET;
3710 htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
3711 MINIMAL_TUPLE_OFFSET);
3713 mtup->datum1 = heap_getattr(&htup,
3714 state->sortKeys[0].ssup_attno,
3722 writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup)
3724 MinimalTuple tuple = (MinimalTuple) stup->tuple;
3726 /* the part of the MinimalTuple we'll write: */
3727 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3728 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
3730 /* total on-disk footprint: */
3731 unsigned int tuplen = tupbodylen + sizeof(int);
3733 LogicalTapeWrite(state->tapeset, tapenum,
3734 (void *) &tuplen, sizeof(tuplen));
3735 LogicalTapeWrite(state->tapeset, tapenum,
3736 (void *) tupbody, tupbodylen);
3737 if (state->randomAccess) /* need trailing length word? */
3738 LogicalTapeWrite(state->tapeset, tapenum,
3739 (void *) &tuplen, sizeof(tuplen));
3741 if (!state->slabAllocatorUsed)
3743 FREEMEM(state, GetMemoryChunkSpace(tuple));
3744 heap_free_minimal_tuple(tuple);
3749 readtup_heap(Tuplesortstate *state, SortTuple *stup,
3750 int tapenum, unsigned int len)
3752 unsigned int tupbodylen = len - sizeof(int);
3753 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
3754 MinimalTuple tuple = (MinimalTuple) readtup_alloc(state, tuplen);
3755 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
3758 /* read in the tuple proper */
3759 tuple->t_len = tuplen;
3760 LogicalTapeReadExact(state->tapeset, tapenum,
3761 tupbody, tupbodylen);
3762 if (state->randomAccess) /* need trailing length word? */
3763 LogicalTapeReadExact(state->tapeset, tapenum,
3764 &tuplen, sizeof(tuplen));
3765 stup->tuple = (void *) tuple;
3766 /* set up first-column key value */
3767 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
3768 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
3769 stup->datum1 = heap_getattr(&htup,
3770 state->sortKeys[0].ssup_attno,
3776 * Routines specialized for the CLUSTER case (HeapTuple data, with
3777 * comparisons per a btree index definition)
3781 comparetup_cluster(const SortTuple *a, const SortTuple *b,
3782 Tuplesortstate *state)
3784 SortSupport sortKey = state->sortKeys;
3794 AttrNumber leading = state->indexInfo->ii_KeyAttrNumbers[0];
3796 /* Be prepared to compare additional sort keys */
3797 ltup = (HeapTuple) a->tuple;
3798 rtup = (HeapTuple) b->tuple;
3799 tupDesc = state->tupDesc;
3801 /* Compare the leading sort key, if it's simple */
3804 compare = ApplySortComparator(a->datum1, a->isnull1,
3805 b->datum1, b->isnull1,
3810 if (sortKey->abbrev_converter)
3812 datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
3813 datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
3815 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
3819 if (compare != 0 || state->nKeys == 1)
3821 /* Compare additional columns the hard way */
3827 /* Must compare all keys the hard way */
3831 if (state->indexInfo->ii_Expressions == NULL)
3833 /* If not expression index, just compare the proper heap attrs */
3835 for (; nkey < state->nKeys; nkey++, sortKey++)
3837 AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey];
3839 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
3840 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
3842 compare = ApplySortComparator(datum1, isnull1,
3852 * In the expression index case, compute the whole index tuple and
3853 * then compare values. It would perhaps be faster to compute only as
3854 * many columns as we need to compare, but that would require
3855 * duplicating all the logic in FormIndexDatum.
3857 Datum l_index_values[INDEX_MAX_KEYS];
3858 bool l_index_isnull[INDEX_MAX_KEYS];
3859 Datum r_index_values[INDEX_MAX_KEYS];
3860 bool r_index_isnull[INDEX_MAX_KEYS];
3861 TupleTableSlot *ecxt_scantuple;
3863 /* Reset context each time to prevent memory leakage */
3864 ResetPerTupleExprContext(state->estate);
3866 ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple;
3868 ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false);
3869 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3870 l_index_values, l_index_isnull);
3872 ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false);
3873 FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate,
3874 r_index_values, r_index_isnull);
3876 for (; nkey < state->nKeys; nkey++, sortKey++)
3878 compare = ApplySortComparator(l_index_values[nkey],
3879 l_index_isnull[nkey],
3880 r_index_values[nkey],
3881 r_index_isnull[nkey],
3892 copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
3894 HeapTuple tuple = (HeapTuple) tup;
3896 MemoryContext oldcontext = MemoryContextSwitchTo(state->tuplecontext);
3898 /* copy the tuple into sort storage */
3899 tuple = heap_copytuple(tuple);
3900 stup->tuple = (void *) tuple;
3901 USEMEM(state, GetMemoryChunkSpace(tuple));
3903 MemoryContextSwitchTo(oldcontext);
3906 * set up first-column key value, and potentially abbreviate, if it's a
3909 if (state->indexInfo->ii_KeyAttrNumbers[0] == 0)
3912 original = heap_getattr(tuple,
3913 state->indexInfo->ii_KeyAttrNumbers[0],
3917 if (!state->sortKeys->abbrev_converter || stup->isnull1)
3920 * Store ordinary Datum representation, or NULL value. If there is a
3921 * converter it won't expect NULL values, and cost model is not
3922 * required to account for NULL, so in that case we avoid calling
3923 * converter and just set datum1 to zeroed representation (to be
3924 * consistent, and to support cheap inequality tests for NULL
3925 * abbreviated keys).
3927 stup->datum1 = original;
3929 else if (!consider_abort_common(state))
3931 /* Store abbreviated key representation */
3932 stup->datum1 = state->sortKeys->abbrev_converter(original,
3937 /* Abort abbreviation */
3940 stup->datum1 = original;
3943 * Set state to be consistent with never trying abbreviation.
3945 * Alter datum1 representation in already-copied tuples, so as to
3946 * ensure a consistent representation (current tuple was just
3947 * handled). It does not matter if some dumped tuples are already
3948 * sorted on tape, since serialized tuples lack abbreviated keys
3949 * (TSS_BUILDRUNS state prevents control reaching here in any case).
3951 for (i = 0; i < state->memtupcount; i++)
3953 SortTuple *mtup = &state->memtuples[i];
3955 tuple = (HeapTuple) mtup->tuple;
3956 mtup->datum1 = heap_getattr(tuple,
3957 state->indexInfo->ii_KeyAttrNumbers[0],
3965 writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup)
3967 HeapTuple tuple = (HeapTuple) stup->tuple;
3968 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
3970 /* We need to store t_self, but not other fields of HeapTupleData */
3971 LogicalTapeWrite(state->tapeset, tapenum,
3972 &tuplen, sizeof(tuplen));
3973 LogicalTapeWrite(state->tapeset, tapenum,
3974 &tuple->t_self, sizeof(ItemPointerData));
3975 LogicalTapeWrite(state->tapeset, tapenum,
3976 tuple->t_data, tuple->t_len);
3977 if (state->randomAccess) /* need trailing length word? */
3978 LogicalTapeWrite(state->tapeset, tapenum,
3979 &tuplen, sizeof(tuplen));
3981 if (!state->slabAllocatorUsed)
3983 FREEMEM(state, GetMemoryChunkSpace(tuple));
3984 heap_freetuple(tuple);
3989 readtup_cluster(Tuplesortstate *state, SortTuple *stup,
3990 int tapenum, unsigned int tuplen)
3992 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
3993 HeapTuple tuple = (HeapTuple) readtup_alloc(state,
3994 t_len + HEAPTUPLESIZE);
3996 /* Reconstruct the HeapTupleData header */
3997 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
3998 tuple->t_len = t_len;
3999 LogicalTapeReadExact(state->tapeset, tapenum,
4000 &tuple->t_self, sizeof(ItemPointerData));
4001 /* We don't currently bother to reconstruct t_tableOid */
4002 tuple->t_tableOid = InvalidOid;
4003 /* Read in the tuple body */
4004 LogicalTapeReadExact(state->tapeset, tapenum,
4005 tuple->t_data, tuple->t_len);
4006 if (state->randomAccess) /* need trailing length word? */
4007 LogicalTapeReadExact(state->tapeset, tapenum,
4008 &tuplen, sizeof(tuplen));
4009 stup->tuple = (void *) tuple;
4010 /* set up first-column key value, if it's a simple column */
4011 if (state->indexInfo->ii_KeyAttrNumbers[0] != 0)
4012 stup->datum1 = heap_getattr(tuple,
4013 state->indexInfo->ii_KeyAttrNumbers[0],
4019 * Routines specialized for IndexTuple case
4021 * The btree and hash cases require separate comparison functions, but the
4022 * IndexTuple representation is the same so the copy/write/read support
4023 * functions can be shared.
4027 comparetup_index_btree(const SortTuple *a, const SortTuple *b,
4028 Tuplesortstate *state)
4031 * This is similar to comparetup_heap(), but expects index tuples. There
4032 * is also special handling for enforcing uniqueness, and special
4033 * treatment for equal keys at the end.
4035 SortSupport sortKey = state->sortKeys;
4040 bool equal_hasnull = false;
4049 /* Compare the leading sort key */
4050 compare = ApplySortComparator(a->datum1, a->isnull1,
4051 b->datum1, b->isnull1,
4056 /* Compare additional sort keys */
4057 tuple1 = (IndexTuple) a->tuple;
4058 tuple2 = (IndexTuple) b->tuple;
4059 keysz = state->nKeys;
4060 tupDes = RelationGetDescr(state->indexRel);
4062 if (sortKey->abbrev_converter)
4064 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
4065 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
4067 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
4074 /* they are equal, so we only need to examine one null flag */
4076 equal_hasnull = true;
4079 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
4081 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
4082 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
4084 compare = ApplySortComparator(datum1, isnull1,
4088 return compare; /* done when we find unequal attributes */
4090 /* they are equal, so we only need to examine one null flag */
4092 equal_hasnull = true;
4096 * If btree has asked us to enforce uniqueness, complain if two equal
4097 * tuples are detected (unless there was at least one NULL field).
4099 * It is sufficient to make the test here, because if two tuples are equal
4100 * they *must* get compared at some stage of the sort --- otherwise the
4101 * sort algorithm wouldn't have checked whether one must appear before the
4104 if (state->enforceUnique && !equal_hasnull)
4106 Datum values[INDEX_MAX_KEYS];
4107 bool isnull[INDEX_MAX_KEYS];
4111 * Some rather brain-dead implementations of qsort (such as the one in
4112 * QNX 4) will sometimes call the comparison routine to compare a
4113 * value to itself, but we always use our own implementation, which
4116 Assert(tuple1 != tuple2);
4118 index_deform_tuple(tuple1, tupDes, values, isnull);
4120 key_desc = BuildIndexValueDescription(state->indexRel, values, isnull);
4123 (errcode(ERRCODE_UNIQUE_VIOLATION),
4124 errmsg("could not create unique index \"%s\"",
4125 RelationGetRelationName(state->indexRel)),
4126 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
4127 errdetail("Duplicate keys exist."),
4128 errtableconstraint(state->heapRel,
4129 RelationGetRelationName(state->indexRel))));
4133 * If key values are equal, we sort on ItemPointer. This does not affect
4134 * validity of the finished index, but it may be useful to have index
4135 * scans in physical order.
4138 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4139 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4142 return (blk1 < blk2) ? -1 : 1;
4145 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4146 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4149 return (pos1 < pos2) ? -1 : 1;
4156 comparetup_index_hash(const SortTuple *a, const SortTuple *b,
4157 Tuplesortstate *state)
4165 * Fetch hash keys and mask off bits we don't want to sort by. We know
4166 * that the first column of the index tuple is the hash key.
4168 Assert(!a->isnull1);
4169 hash1 = DatumGetUInt32(a->datum1) & state->hash_mask;
4170 Assert(!b->isnull1);
4171 hash2 = DatumGetUInt32(b->datum1) & state->hash_mask;
4175 else if (hash1 < hash2)
4179 * If hash values are equal, we sort on ItemPointer. This does not affect
4180 * validity of the finished index, but it may be useful to have index
4181 * scans in physical order.
4183 tuple1 = (IndexTuple) a->tuple;
4184 tuple2 = (IndexTuple) b->tuple;
4187 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
4188 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
4191 return (blk1 < blk2) ? -1 : 1;
4194 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
4195 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
4198 return (pos1 < pos2) ? -1 : 1;
4205 copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
4207 IndexTuple tuple = (IndexTuple) tup;
4208 unsigned int tuplen = IndexTupleSize(tuple);
4209 IndexTuple newtuple;
4212 /* copy the tuple into sort storage */
4213 newtuple = (IndexTuple) MemoryContextAlloc(state->tuplecontext, tuplen);
4214 memcpy(newtuple, tuple, tuplen);
4215 USEMEM(state, GetMemoryChunkSpace(newtuple));
4216 stup->tuple = (void *) newtuple;
4217 /* set up first-column key value */
4218 original = index_getattr(newtuple,
4220 RelationGetDescr(state->indexRel),
4223 if (!state->sortKeys->abbrev_converter || stup->isnull1)
4226 * Store ordinary Datum representation, or NULL value. If there is a
4227 * converter it won't expect NULL values, and cost model is not
4228 * required to account for NULL, so in that case we avoid calling
4229 * converter and just set datum1 to zeroed representation (to be
4230 * consistent, and to support cheap inequality tests for NULL
4231 * abbreviated keys).
4233 stup->datum1 = original;
4235 else if (!consider_abort_common(state))
4237 /* Store abbreviated key representation */
4238 stup->datum1 = state->sortKeys->abbrev_converter(original,
4243 /* Abort abbreviation */
4246 stup->datum1 = original;
4249 * Set state to be consistent with never trying abbreviation.
4251 * Alter datum1 representation in already-copied tuples, so as to
4252 * ensure a consistent representation (current tuple was just
4253 * handled). It does not matter if some dumped tuples are already
4254 * sorted on tape, since serialized tuples lack abbreviated keys
4255 * (TSS_BUILDRUNS state prevents control reaching here in any case).
4257 for (i = 0; i < state->memtupcount; i++)
4259 SortTuple *mtup = &state->memtuples[i];
4261 tuple = (IndexTuple) mtup->tuple;
4262 mtup->datum1 = index_getattr(tuple,
4264 RelationGetDescr(state->indexRel),
4271 writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup)
4273 IndexTuple tuple = (IndexTuple) stup->tuple;
4274 unsigned int tuplen;
4276 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
4277 LogicalTapeWrite(state->tapeset, tapenum,
4278 (void *) &tuplen, sizeof(tuplen));
4279 LogicalTapeWrite(state->tapeset, tapenum,
4280 (void *) tuple, IndexTupleSize(tuple));
4281 if (state->randomAccess) /* need trailing length word? */
4282 LogicalTapeWrite(state->tapeset, tapenum,
4283 (void *) &tuplen, sizeof(tuplen));
4285 if (!state->slabAllocatorUsed)
4287 FREEMEM(state, GetMemoryChunkSpace(tuple));
4293 readtup_index(Tuplesortstate *state, SortTuple *stup,
4294 int tapenum, unsigned int len)
4296 unsigned int tuplen = len - sizeof(unsigned int);
4297 IndexTuple tuple = (IndexTuple) readtup_alloc(state, tuplen);
4299 LogicalTapeReadExact(state->tapeset, tapenum,
4301 if (state->randomAccess) /* need trailing length word? */
4302 LogicalTapeReadExact(state->tapeset, tapenum,
4303 &tuplen, sizeof(tuplen));
4304 stup->tuple = (void *) tuple;
4305 /* set up first-column key value */
4306 stup->datum1 = index_getattr(tuple,
4308 RelationGetDescr(state->indexRel),
4313 * Routines specialized for DatumTuple case
4317 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
4321 compare = ApplySortComparator(a->datum1, a->isnull1,
4322 b->datum1, b->isnull1,
4327 /* if we have abbreviations, then "tuple" has the original value */
4329 if (state->sortKeys->abbrev_converter)
4330 compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
4331 PointerGetDatum(b->tuple), b->isnull1,
4338 copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup)
4340 /* Not currently needed */
4341 elog(ERROR, "copytup_datum() should not be called");
4345 writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup)
4348 unsigned int tuplen;
4349 unsigned int writtenlen;
4356 else if (!state->tuples)
4358 waddr = &stup->datum1;
4359 tuplen = sizeof(Datum);
4363 waddr = stup->tuple;
4364 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, state->datumTypeLen);
4365 Assert(tuplen != 0);
4368 writtenlen = tuplen + sizeof(unsigned int);
4370 LogicalTapeWrite(state->tapeset, tapenum,
4371 (void *) &writtenlen, sizeof(writtenlen));
4372 LogicalTapeWrite(state->tapeset, tapenum,
4374 if (state->randomAccess) /* need trailing length word? */
4375 LogicalTapeWrite(state->tapeset, tapenum,
4376 (void *) &writtenlen, sizeof(writtenlen));
4378 if (!state->slabAllocatorUsed && stup->tuple)
4380 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));
4386 readtup_datum(Tuplesortstate *state, SortTuple *stup,
4387 int tapenum, unsigned int len)
4389 unsigned int tuplen = len - sizeof(unsigned int);
4394 stup->datum1 = (Datum) 0;
4395 stup->isnull1 = true;
4398 else if (!state->tuples)
4400 Assert(tuplen == sizeof(Datum));
4401 LogicalTapeReadExact(state->tapeset, tapenum,
4402 &stup->datum1, tuplen);
4403 stup->isnull1 = false;
4408 void *raddr = readtup_alloc(state, tuplen);
4410 LogicalTapeReadExact(state->tapeset, tapenum,
4412 stup->datum1 = PointerGetDatum(raddr);
4413 stup->isnull1 = false;
4414 stup->tuple = raddr;
4417 if (state->randomAccess) /* need trailing length word? */
4418 LogicalTapeReadExact(state->tapeset, tapenum,
4419 &tuplen, sizeof(tuplen));
4423 * Convenience routine to free a tuple previously loaded into sort memory
4426 free_sort_tuple(Tuplesortstate *state, SortTuple *stup)
4428 FREEMEM(state, GetMemoryChunkSpace(stup->tuple));