1 /*-------------------------------------------------------------------------
4 * Routines to hash relations for hashjoin
6 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/executor/nodeHash.c
13 * See note on parallelism in nodeHashjoin.c.
15 *-------------------------------------------------------------------------
19 * MultiExecHash - generate an in-memory hash table of the relation
20 * ExecInitHash - initialize node and subnodes
21 * ExecEndHash - shutdown node and subnodes
29 #include "access/htup_details.h"
30 #include "access/parallel.h"
31 #include "catalog/pg_statistic.h"
32 #include "commands/tablespace.h"
33 #include "executor/execdebug.h"
34 #include "executor/hashjoin.h"
35 #include "executor/nodeHash.h"
36 #include "executor/nodeHashjoin.h"
37 #include "miscadmin.h"
39 #include "port/atomics.h"
40 #include "utils/dynahash.h"
41 #include "utils/memutils.h"
42 #include "utils/lsyscache.h"
43 #include "utils/syscache.h"
46 static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
47 static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
48 static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
49 static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
50 static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
52 static void ExecHashSkewTableInsert(HashJoinTable hashtable,
56 static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
58 static void *dense_alloc(HashJoinTable hashtable, Size size);
59 static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
62 static void MultiExecPrivateHash(HashState *node);
63 static void MultiExecParallelHash(HashState *node);
64 static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable table,
66 static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable table,
68 static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
70 dsa_pointer tuple_shared);
71 static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
72 static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
73 static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
74 static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
75 static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable table,
77 static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
80 static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
81 static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
84 /* ----------------------------------------------------------------
87 * stub for pro forma compliance
88 * ----------------------------------------------------------------
90 static TupleTableSlot *
91 ExecHash(PlanState *pstate)
93 elog(ERROR, "Hash node does not support ExecProcNode call convention");
97 /* ----------------------------------------------------------------
100 * build hash table for hashjoin, doing partitioning if more
101 * than one batch is required.
102 * ----------------------------------------------------------------
105 MultiExecHash(HashState *node)
107 /* must provide our own instrumentation support */
108 if (node->ps.instrument)
109 InstrStartNode(node->ps.instrument);
111 if (node->parallel_state != NULL)
112 MultiExecParallelHash(node);
114 MultiExecPrivateHash(node);
116 /* must provide our own instrumentation support */
117 if (node->ps.instrument)
118 InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
121 * We do not return the hash table directly because it's not a subtype of
122 * Node, and so would violate the MultiExecProcNode API. Instead, our
123 * parent Hashjoin node is expected to know how to fish it out of our node
124 * state. Ugly but not really worth cleaning up, since Hashjoin knows
125 * quite a bit more about Hash besides that.
130 /* ----------------------------------------------------------------
131 * MultiExecPrivateHash
133 * parallel-oblivious version, building a backend-private
134 * hash table and (if necessary) batch files.
135 * ----------------------------------------------------------------
138 MultiExecPrivateHash(HashState *node)
140 PlanState *outerNode;
142 HashJoinTable hashtable;
143 TupleTableSlot *slot;
144 ExprContext *econtext;
148 * get state info from node
150 outerNode = outerPlanState(node);
151 hashtable = node->hashtable;
154 * set expression context
156 hashkeys = node->hashkeys;
157 econtext = node->ps.ps_ExprContext;
160 * get all inner tuples and insert into the hash table (or temp files)
164 slot = ExecProcNode(outerNode);
167 /* We have to compute the hash value */
168 econtext->ecxt_innertuple = slot;
169 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
170 false, hashtable->keepNulls,
175 bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
176 if (bucketNumber != INVALID_SKEW_BUCKET_NO)
178 /* It's a skew tuple, so put it into that hash table */
179 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
181 hashtable->skewTuples += 1;
185 /* Not subject to skew optimization, so insert normally */
186 ExecHashTableInsert(hashtable, slot, hashvalue);
188 hashtable->totalTuples += 1;
192 /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
193 if (hashtable->nbuckets != hashtable->nbuckets_optimal)
194 ExecHashIncreaseNumBuckets(hashtable);
196 /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
197 hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
198 if (hashtable->spaceUsed > hashtable->spacePeak)
199 hashtable->spacePeak = hashtable->spaceUsed;
201 hashtable->partialTuples = hashtable->totalTuples;
204 /* ----------------------------------------------------------------
205 * MultiExecParallelHash
207 * parallel-aware version, building a shared hash table and
208 * (if necessary) batch files using the combined effort of
209 * a set of co-operating backends.
210 * ----------------------------------------------------------------
213 MultiExecParallelHash(HashState *node)
215 ParallelHashJoinState *pstate;
216 PlanState *outerNode;
218 HashJoinTable hashtable;
219 TupleTableSlot *slot;
220 ExprContext *econtext;
222 Barrier *build_barrier;
226 * get state info from node
228 outerNode = outerPlanState(node);
229 hashtable = node->hashtable;
232 * set expression context
234 hashkeys = node->hashkeys;
235 econtext = node->ps.ps_ExprContext;
238 * Synchronize the parallel hash table build. At this stage we know that
239 * the shared hash table has been or is being set up by
240 * ExecHashTableCreate(), but we don't know if our peers have returned
241 * from there or are here in MultiExecParallelHash(), and if so how far
242 * through they are. To find out, we check the build_barrier phase then
243 * and jump to the right step in the build algorithm.
245 pstate = hashtable->parallel_state;
246 build_barrier = &pstate->build_barrier;
247 Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING);
248 switch (BarrierPhase(build_barrier))
250 case PHJ_BUILD_ALLOCATING:
253 * Either I just allocated the initial hash table in
254 * ExecHashTableCreate(), or someone else is doing that. Either
255 * way, wait for everyone to arrive here so we can proceed.
257 BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
260 case PHJ_BUILD_HASHING_INNER:
263 * It's time to begin hashing, or if we just arrived here then
264 * hashing is already underway, so join in that effort. While
265 * hashing we have to be prepared to help increase the number of
266 * batches or buckets at any time, and if we arrived here when
267 * that was already underway we'll have to help complete that work
268 * immediately so that it's safe to access batches and buckets
271 if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
272 PHJ_GROW_BATCHES_ELECTING)
273 ExecParallelHashIncreaseNumBatches(hashtable);
274 if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
275 PHJ_GROW_BUCKETS_ELECTING)
276 ExecParallelHashIncreaseNumBuckets(hashtable);
277 ExecParallelHashEnsureBatchAccessors(hashtable);
278 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
281 slot = ExecProcNode(outerNode);
284 econtext->ecxt_innertuple = slot;
285 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
286 false, hashtable->keepNulls,
288 ExecParallelHashTableInsert(hashtable, slot, hashvalue);
289 hashtable->partialTuples++;
293 * Make sure that any tuples we wrote to disk are visible to
294 * others before anyone tries to load them.
296 for (i = 0; i < hashtable->nbatch; ++i)
297 sts_end_write(hashtable->batches[i].inner_tuples);
300 * Update shared counters. We need an accurate total tuple count
301 * to control the empty table optimization.
303 ExecParallelHashMergeCounters(hashtable);
305 BarrierDetach(&pstate->grow_buckets_barrier);
306 BarrierDetach(&pstate->grow_batches_barrier);
309 * Wait for everyone to finish building and flushing files and
312 if (BarrierArriveAndWait(build_barrier,
313 WAIT_EVENT_HASH_BUILD_HASHING_INNER))
316 * Elect one backend to disable any further growth. Batches
317 * are now fixed. While building them we made sure they'd fit
318 * in our memory budget when we load them back in later (or we
319 * tried to do that and gave up because we detected extreme
322 pstate->growth = PHJ_GROWTH_DISABLED;
327 * We're not yet attached to a batch. We all agree on the dimensions and
328 * number of inner tuples (for the empty table optimization).
330 hashtable->curbatch = -1;
331 hashtable->nbuckets = pstate->nbuckets;
332 hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
333 hashtable->totalTuples = pstate->total_tuples;
334 ExecParallelHashEnsureBatchAccessors(hashtable);
337 * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
338 * case, which will bring the build phase to PHJ_BUILD_DONE (if it isn't
341 Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER ||
342 BarrierPhase(build_barrier) == PHJ_BUILD_DONE);
345 /* ----------------------------------------------------------------
348 * Init routine for Hash node
349 * ----------------------------------------------------------------
352 ExecInitHash(Hash *node, EState *estate, int eflags)
354 HashState *hashstate;
356 /* check for unsupported flags */
357 Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
360 * create state structure
362 hashstate = makeNode(HashState);
363 hashstate->ps.plan = (Plan *) node;
364 hashstate->ps.state = estate;
365 hashstate->ps.ExecProcNode = ExecHash;
366 hashstate->hashtable = NULL;
367 hashstate->hashkeys = NIL; /* will be set by parent HashJoin */
370 * Miscellaneous initialization
372 * create expression context for node
374 ExecAssignExprContext(estate, &hashstate->ps);
377 * initialize child nodes
379 outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
382 * initialize our result slot and type. No need to build projection
383 * because this node doesn't do projections.
385 ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
386 hashstate->ps.ps_ProjInfo = NULL;
389 * initialize child expressions
392 ExecInitQual(node->plan.qual, (PlanState *) hashstate);
397 /* ---------------------------------------------------------------
400 * clean up routine for Hash node
401 * ----------------------------------------------------------------
404 ExecEndHash(HashState *node)
406 PlanState *outerPlan;
411 ExecFreeExprContext(&node->ps);
414 * shut down the subplan
416 outerPlan = outerPlanState(node);
417 ExecEndNode(outerPlan);
421 /* ----------------------------------------------------------------
422 * ExecHashTableCreate
424 * create an empty hashtable data structure for hashjoin.
425 * ----------------------------------------------------------------
428 ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls)
431 HashJoinTable hashtable;
433 size_t space_allowed;
442 MemoryContext oldcxt;
445 * Get information about the size of the relation to be hashed (it's the
446 * "outer" subtree of this node, but the inner relation of the hashjoin).
447 * Compute the appropriate size of the hash table.
449 node = (Hash *) state->ps.plan;
450 outerNode = outerPlan(node);
453 * If this is shared hash table with a partial plan, then we can't use
454 * outerNode->plan_rows to estimate its size. We need an estimate of the
455 * total number of rows across all copies of the partial plan.
457 rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
459 ExecChooseHashTableSize(rows, outerNode->plan_width,
460 OidIsValid(node->skewTable),
461 state->parallel_state != NULL,
462 state->parallel_state != NULL ?
463 state->parallel_state->nparticipants - 1 : 0,
465 &nbuckets, &nbatch, &num_skew_mcvs);
467 /* nbuckets must be a power of 2 */
468 log2_nbuckets = my_log2(nbuckets);
469 Assert(nbuckets == (1 << log2_nbuckets));
472 * Initialize the hash table control block.
474 * The hashtable control block is just palloc'd from the executor's
475 * per-query memory context. Everything else should be kept inside the
476 * subsidiary hashCxt or batchCxt.
478 hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
479 hashtable->nbuckets = nbuckets;
480 hashtable->nbuckets_original = nbuckets;
481 hashtable->nbuckets_optimal = nbuckets;
482 hashtable->log2_nbuckets = log2_nbuckets;
483 hashtable->log2_nbuckets_optimal = log2_nbuckets;
484 hashtable->buckets.unshared = NULL;
485 hashtable->keepNulls = keepNulls;
486 hashtable->skewEnabled = false;
487 hashtable->skewBucket = NULL;
488 hashtable->skewBucketLen = 0;
489 hashtable->nSkewBuckets = 0;
490 hashtable->skewBucketNums = NULL;
491 hashtable->nbatch = nbatch;
492 hashtable->curbatch = 0;
493 hashtable->nbatch_original = nbatch;
494 hashtable->nbatch_outstart = nbatch;
495 hashtable->growEnabled = true;
496 hashtable->totalTuples = 0;
497 hashtable->partialTuples = 0;
498 hashtable->skewTuples = 0;
499 hashtable->innerBatchFile = NULL;
500 hashtable->outerBatchFile = NULL;
501 hashtable->spaceUsed = 0;
502 hashtable->spacePeak = 0;
503 hashtable->spaceAllowed = space_allowed;
504 hashtable->spaceUsedSkew = 0;
505 hashtable->spaceAllowedSkew =
506 hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100;
507 hashtable->chunks = NULL;
508 hashtable->current_chunk = NULL;
509 hashtable->parallel_state = state->parallel_state;
510 hashtable->area = state->ps.state->es_query_dsa;
511 hashtable->batches = NULL;
514 printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
515 hashtable, nbatch, nbuckets);
519 * Create temporary memory contexts in which to keep the hashtable working
520 * storage. See notes in executor/hashjoin.h.
522 hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
524 ALLOCSET_DEFAULT_SIZES);
526 hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
528 ALLOCSET_DEFAULT_SIZES);
530 /* Allocate data that will live for the life of the hashjoin */
532 oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
535 * Get info about the hash functions to be used for each hash key. Also
536 * remember whether the join operators are strict.
538 nkeys = list_length(hashOperators);
539 hashtable->outer_hashfunctions =
540 (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
541 hashtable->inner_hashfunctions =
542 (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
543 hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
545 foreach(ho, hashOperators)
547 Oid hashop = lfirst_oid(ho);
551 if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
552 elog(ERROR, "could not find hash function for hash operator %u",
554 fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
555 fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
556 hashtable->hashStrict[i] = op_strict(hashop);
560 if (nbatch > 1 && hashtable->parallel_state == NULL)
563 * allocate and initialize the file arrays in hashCxt (not needed for
564 * parallel case which uses shared tuplestores instead of raw files)
566 hashtable->innerBatchFile = (BufFile **)
567 palloc0(nbatch * sizeof(BufFile *));
568 hashtable->outerBatchFile = (BufFile **)
569 palloc0(nbatch * sizeof(BufFile *));
570 /* The files will not be opened until needed... */
571 /* ... but make sure we have temp tablespaces established for them */
572 PrepareTempTablespaces();
575 MemoryContextSwitchTo(oldcxt);
577 if (hashtable->parallel_state)
579 ParallelHashJoinState *pstate = hashtable->parallel_state;
580 Barrier *build_barrier;
583 * Attach to the build barrier. The corresponding detach operation is
584 * in ExecHashTableDetach. Note that we won't attach to the
585 * batch_barrier for batch 0 yet. We'll attach later and start it out
586 * in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front
587 * and then loaded while hashing (the standard hybrid hash join
588 * algorithm), and we'll coordinate that using build_barrier.
590 build_barrier = &pstate->build_barrier;
591 BarrierAttach(build_barrier);
594 * So far we have no idea whether there are any other participants,
595 * and if so, what phase they are working on. The only thing we care
596 * about at this point is whether someone has already created the
597 * SharedHashJoinBatch objects and the hash table for batch 0. One
598 * backend will be elected to do that now if necessary.
600 if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING &&
601 BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECTING))
603 pstate->nbatch = nbatch;
604 pstate->space_allowed = space_allowed;
605 pstate->growth = PHJ_GROWTH_OK;
607 /* Set up the shared state for coordinating batches. */
608 ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
611 * Allocate batch 0's hash table up front so we can load it
612 * directly while hashing.
614 pstate->nbuckets = nbuckets;
615 ExecParallelHashTableAlloc(hashtable, 0);
619 * The next Parallel Hash synchronization point is in
620 * MultiExecParallelHash(), which will progress it all the way to
621 * PHJ_BUILD_DONE. The caller must not return control from this
622 * executor node between now and then.
628 * Prepare context for the first-scan space allocations; allocate the
629 * hashbucket array therein, and set each bucket "empty".
631 MemoryContextSwitchTo(hashtable->batchCxt);
633 hashtable->buckets.unshared = (HashJoinTuple *)
634 palloc0(nbuckets * sizeof(HashJoinTuple));
637 * Set up for skew optimization, if possible and there's a need for
638 * more than one batch. (In a one-batch join, there's no point in
642 ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
644 MemoryContextSwitchTo(oldcxt);
652 * Compute appropriate size for hashtable given the estimated size of the
653 * relation to be hashed (number of rows and average row width).
655 * This is exported so that the planner's costsize.c can use it.
658 /* Target bucket loading (tuples per bucket) */
659 #define NTUP_PER_BUCKET 1
662 ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
663 bool try_combined_work_mem,
664 int parallel_workers,
665 size_t *space_allowed,
671 double inner_rel_bytes;
673 long hash_table_bytes;
674 long skew_table_bytes;
681 /* Force a plausible relation size if no info */
686 * Estimate tupsize based on footprint of tuple in hashtable... note this
687 * does not allow for any palloc overhead. The manipulations of spaceUsed
688 * don't count palloc overhead either.
690 tupsize = HJTUPLE_OVERHEAD +
691 MAXALIGN(SizeofMinimalTupleHeader) +
693 inner_rel_bytes = ntuples * tupsize;
696 * Target in-memory hashtable size is work_mem kilobytes.
698 hash_table_bytes = work_mem * 1024L;
701 * Parallel Hash tries to use the combined work_mem of all workers to
702 * avoid the need to batch. If that won't work, it falls back to work_mem
703 * per worker and tries to process batches in parallel.
705 if (try_combined_work_mem)
706 hash_table_bytes += hash_table_bytes * parallel_workers;
708 *space_allowed = hash_table_bytes;
711 * If skew optimization is possible, estimate the number of skew buckets
712 * that will fit in the memory allowed, and decrement the assumed space
713 * available for the main hash table accordingly.
715 * We make the optimistic assumption that each skew bucket will contain
716 * one inner-relation tuple. If that turns out to be low, we will recover
717 * at runtime by reducing the number of skew buckets.
719 * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
720 * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
721 * will round up to the next power of 2 and then multiply by 4 to reduce
726 skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
730 * size of a hash tuple +
731 * worst-case size of skewBucket[] per MCV +
732 * size of skewBucketNums[] entry +
733 * size of skew bucket struct itself
736 *num_skew_mcvs = skew_table_bytes / (tupsize +
737 (8 * sizeof(HashSkewBucket *)) +
739 SKEW_BUCKET_OVERHEAD);
740 if (*num_skew_mcvs > 0)
741 hash_table_bytes -= skew_table_bytes;
747 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
748 * memory is filled, assuming a single batch; but limit the value so that
749 * the pointer arrays we'll try to allocate do not exceed work_mem nor
752 * Note that both nbuckets and nbatch must be powers of 2 to make
753 * ExecHashGetBucketAndBatch fast.
755 max_pointers = *space_allowed / sizeof(HashJoinTuple);
756 max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
757 /* If max_pointers isn't a power of 2, must round it down to one */
758 mppow2 = 1L << my_log2(max_pointers);
759 if (max_pointers != mppow2)
760 max_pointers = mppow2 / 2;
762 /* Also ensure we avoid integer overflow in nbatch and nbuckets */
763 /* (this step is redundant given the current value of MaxAllocSize) */
764 max_pointers = Min(max_pointers, INT_MAX / 2);
766 dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
767 dbuckets = Min(dbuckets, max_pointers);
768 nbuckets = (int) dbuckets;
769 /* don't let nbuckets be really small, though ... */
770 nbuckets = Max(nbuckets, 1024);
771 /* ... and force it to be a power of 2. */
772 nbuckets = 1 << my_log2(nbuckets);
775 * If there's not enough space to store the projected number of tuples and
776 * the required bucket headers, we will need multiple batches.
778 bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
779 if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
781 /* We'll need multiple batches */
788 * If Parallel Hash with combined work_mem would still need multiple
789 * batches, we'll have to fall back to regular work_mem budget.
791 if (try_combined_work_mem)
793 ExecChooseHashTableSize(ntuples, tupwidth, useskew,
794 false, parallel_workers,
803 * Estimate the number of buckets we'll want to have when work_mem is
804 * entirely full. Each bucket will contain a bucket pointer plus
805 * NTUP_PER_BUCKET tuples, whose projected size already includes
806 * overhead for the hash code, pointer to the next tuple, etc.
808 bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
809 lbuckets = 1L << my_log2(hash_table_bytes / bucket_size);
810 lbuckets = Min(lbuckets, max_pointers);
811 nbuckets = (int) lbuckets;
812 nbuckets = 1 << my_log2(nbuckets);
813 bucket_bytes = nbuckets * sizeof(HashJoinTuple);
816 * Buckets are simple pointers to hashjoin tuples, while tupsize
817 * includes the pointer, hash code, and MinimalTupleData. So buckets
818 * should never really exceed 25% of work_mem (even for
819 * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
820 * 2^N bytes, where we might get more because of doubling. So let's
823 Assert(bucket_bytes <= hash_table_bytes / 2);
825 /* Calculate required number of batches. */
826 dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
827 dbatch = Min(dbatch, max_pointers);
828 minbatch = (int) dbatch;
830 while (nbatch < minbatch)
834 Assert(nbuckets > 0);
837 *numbuckets = nbuckets;
838 *numbatches = nbatch;
842 /* ----------------------------------------------------------------
843 * ExecHashTableDestroy
845 * destroy a hash table
846 * ----------------------------------------------------------------
849 ExecHashTableDestroy(HashJoinTable hashtable)
854 * Make sure all the temp files are closed. We skip batch 0, since it
855 * can't have any temp files (and the arrays might not even exist if
856 * nbatch is only 1). Parallel hash joins don't use these files.
858 if (hashtable->innerBatchFile != NULL)
860 for (i = 1; i < hashtable->nbatch; i++)
862 if (hashtable->innerBatchFile[i])
863 BufFileClose(hashtable->innerBatchFile[i]);
864 if (hashtable->outerBatchFile[i])
865 BufFileClose(hashtable->outerBatchFile[i]);
869 /* Release working memory (batchCxt is a child, so it goes away too) */
870 MemoryContextDelete(hashtable->hashCxt);
872 /* And drop the control block */
877 * ExecHashIncreaseNumBatches
878 * increase the original number of batches in order to reduce
879 * current memory consumption
882 ExecHashIncreaseNumBatches(HashJoinTable hashtable)
884 int oldnbatch = hashtable->nbatch;
885 int curbatch = hashtable->curbatch;
887 MemoryContext oldcxt;
890 HashMemoryChunk oldchunks;
892 /* do nothing if we've decided to shut off growth */
893 if (!hashtable->growEnabled)
896 /* safety check to avoid overflow */
897 if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
900 nbatch = oldnbatch * 2;
904 printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
905 hashtable, nbatch, hashtable->spaceUsed);
908 oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
910 if (hashtable->innerBatchFile == NULL)
912 /* we had no file arrays before */
913 hashtable->innerBatchFile = (BufFile **)
914 palloc0(nbatch * sizeof(BufFile *));
915 hashtable->outerBatchFile = (BufFile **)
916 palloc0(nbatch * sizeof(BufFile *));
917 /* time to establish the temp tablespaces, too */
918 PrepareTempTablespaces();
922 /* enlarge arrays and zero out added entries */
923 hashtable->innerBatchFile = (BufFile **)
924 repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
925 hashtable->outerBatchFile = (BufFile **)
926 repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
927 MemSet(hashtable->innerBatchFile + oldnbatch, 0,
928 (nbatch - oldnbatch) * sizeof(BufFile *));
929 MemSet(hashtable->outerBatchFile + oldnbatch, 0,
930 (nbatch - oldnbatch) * sizeof(BufFile *));
933 MemoryContextSwitchTo(oldcxt);
935 hashtable->nbatch = nbatch;
938 * Scan through the existing hash table entries and dump out any that are
939 * no longer of the current batch.
941 ninmemory = nfreed = 0;
943 /* If know we need to resize nbuckets, we can do it while rebatching. */
944 if (hashtable->nbuckets_optimal != hashtable->nbuckets)
946 /* we never decrease the number of buckets */
947 Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
949 hashtable->nbuckets = hashtable->nbuckets_optimal;
950 hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
952 hashtable->buckets.unshared =
953 repalloc(hashtable->buckets.unshared,
954 sizeof(HashJoinTuple) * hashtable->nbuckets);
958 * We will scan through the chunks directly, so that we can reset the
959 * buckets now and not have to keep track which tuples in the buckets have
960 * already been processed. We will free the old chunks as we go.
962 memset(hashtable->buckets.unshared, 0,
963 sizeof(HashJoinTuple) * hashtable->nbuckets);
964 oldchunks = hashtable->chunks;
965 hashtable->chunks = NULL;
967 /* so, let's scan through the old chunks, and all tuples in each chunk */
968 while (oldchunks != NULL)
970 HashMemoryChunk nextchunk = oldchunks->next.unshared;
972 /* position within the buffer (up to oldchunks->used) */
975 /* process all tuples stored in this chunk (and then free it) */
976 while (idx < oldchunks->used)
978 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
979 MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
980 int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
985 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
986 &bucketno, &batchno);
988 if (batchno == curbatch)
990 /* keep tuple in memory - copy it into the new chunk */
991 HashJoinTuple copyTuple;
993 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
994 memcpy(copyTuple, hashTuple, hashTupleSize);
996 /* and add it back to the appropriate bucket */
997 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
998 hashtable->buckets.unshared[bucketno] = copyTuple;
1003 Assert(batchno > curbatch);
1004 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1005 hashTuple->hashvalue,
1006 &hashtable->innerBatchFile[batchno]);
1008 hashtable->spaceUsed -= hashTupleSize;
1012 /* next tuple in this chunk */
1013 idx += MAXALIGN(hashTupleSize);
1015 /* allow this loop to be cancellable */
1016 CHECK_FOR_INTERRUPTS();
1019 /* we're done with this chunk - free it and proceed to the next one */
1021 oldchunks = nextchunk;
1025 printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1026 hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1030 * If we dumped out either all or none of the tuples in the table, disable
1031 * further expansion of nbatch. This situation implies that we have
1032 * enough tuples of identical hashvalues to overflow spaceAllowed.
1033 * Increasing nbatch will not fix it since there's no way to subdivide the
1034 * group any more finely. We have to just gut it out and hope the server
1037 if (nfreed == 0 || nfreed == ninmemory)
1039 hashtable->growEnabled = false;
1041 printf("Hashjoin %p: disabling further increase of nbatch\n",
1048 * ExecParallelHashIncreaseNumBatches
1049 * Every participant attached to grow_barrier must run this function
1050 * when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1053 ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1055 ParallelHashJoinState *pstate = hashtable->parallel_state;
1058 Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
1061 * It's unlikely, but we need to be prepared for new participants to show
1062 * up while we're in the middle of this operation so we need to switch on
1063 * barrier phase here.
1065 switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
1067 case PHJ_GROW_BATCHES_ELECTING:
1070 * Elect one participant to prepare to grow the number of batches.
1071 * This involves reallocating or resetting the buckets of batch 0
1072 * in preparation for all participants to begin repartitioning the
1075 if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1076 WAIT_EVENT_HASH_GROW_BATCHES_ELECTING))
1078 dsa_pointer_atomic *buckets;
1079 ParallelHashJoinBatch *old_batch0;
1083 /* Move the old batch out of the way. */
1084 old_batch0 = hashtable->batches[0].shared;
1085 pstate->old_batches = pstate->batches;
1086 pstate->old_nbatch = hashtable->nbatch;
1087 pstate->batches = InvalidDsaPointer;
1089 /* Free this backend's old accessors. */
1090 ExecParallelHashCloseBatchAccessors(hashtable);
1092 /* Figure out how many batches to use. */
1093 if (hashtable->nbatch == 1)
1096 * We are going from single-batch to multi-batch. We need
1097 * to switch from one large combined memory budget to the
1098 * regular work_mem budget.
1100 pstate->space_allowed = work_mem * 1024L;
1103 * The combined work_mem of all participants wasn't
1104 * enough. Therefore one batch per participant would be
1105 * approximately equivalent and would probably also be
1106 * insufficient. So try two batches per particiant,
1107 * rounded up to a power of two.
1109 new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
1114 * We were already multi-batched. Try doubling the number
1117 new_nbatch = hashtable->nbatch * 2;
1120 /* Allocate new larger generation of batches. */
1121 Assert(hashtable->nbatch == pstate->nbatch);
1122 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1123 Assert(hashtable->nbatch == pstate->nbatch);
1125 /* Replace or recycle batch 0's bucket array. */
1126 if (pstate->old_nbatch == 1)
1133 * We probably also need a smaller bucket array. How many
1134 * tuples do we expect per batch, assuming we have only
1135 * half of them so far? Normally we don't need to change
1136 * the bucket array's size, because the size of each batch
1137 * stays the same as we add more batches, but in this
1138 * special case we move from a large batch to many smaller
1139 * batches and it would be wasteful to keep the large
1142 dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1143 dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1144 dbuckets = Min(dbuckets,
1145 MaxAllocSize / sizeof(dsa_pointer_atomic));
1146 new_nbuckets = (int) dbuckets;
1147 new_nbuckets = Max(new_nbuckets, 1024);
1148 new_nbuckets = 1 << my_log2(new_nbuckets);
1149 dsa_free(hashtable->area, old_batch0->buckets);
1150 hashtable->batches[0].shared->buckets =
1151 dsa_allocate(hashtable->area,
1152 sizeof(dsa_pointer_atomic) * new_nbuckets);
1153 buckets = (dsa_pointer_atomic *)
1154 dsa_get_address(hashtable->area,
1155 hashtable->batches[0].shared->buckets);
1156 for (i = 0; i < new_nbuckets; ++i)
1157 dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1158 pstate->nbuckets = new_nbuckets;
1162 /* Recycle the existing bucket array. */
1163 hashtable->batches[0].shared->buckets = old_batch0->buckets;
1164 buckets = (dsa_pointer_atomic *)
1165 dsa_get_address(hashtable->area, old_batch0->buckets);
1166 for (i = 0; i < hashtable->nbuckets; ++i)
1167 dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1170 /* Move all chunks to the work queue for parallel processing. */
1171 pstate->chunk_work_queue = old_batch0->chunks;
1173 /* Disable further growth temporarily while we're growing. */
1174 pstate->growth = PHJ_GROWTH_DISABLED;
1178 /* All other participants just flush their tuples to disk. */
1179 ExecParallelHashCloseBatchAccessors(hashtable);
1183 case PHJ_GROW_BATCHES_ALLOCATING:
1184 /* Wait for the above to be finished. */
1185 BarrierArriveAndWait(&pstate->grow_batches_barrier,
1186 WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
1189 case PHJ_GROW_BATCHES_REPARTITIONING:
1190 /* Make sure that we have the current dimensions and buckets. */
1191 ExecParallelHashEnsureBatchAccessors(hashtable);
1192 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1193 /* Then partition, flush counters. */
1194 ExecParallelHashRepartitionFirst(hashtable);
1195 ExecParallelHashRepartitionRest(hashtable);
1196 ExecParallelHashMergeCounters(hashtable);
1197 /* Wait for the above to be finished. */
1198 BarrierArriveAndWait(&pstate->grow_batches_barrier,
1199 WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
1202 case PHJ_GROW_BATCHES_DECIDING:
1205 * Elect one participant to clean up and decide whether further
1206 * repartitioning is needed, or should be disabled because it's
1209 if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1210 WAIT_EVENT_HASH_GROW_BATCHES_DECIDING))
1212 bool space_exhausted = false;
1213 bool extreme_skew_detected = false;
1215 /* Make sure that we have the current dimensions and buckets. */
1216 ExecParallelHashEnsureBatchAccessors(hashtable);
1217 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1219 /* Are any of the new generation of batches exhausted? */
1220 for (i = 0; i < hashtable->nbatch; ++i)
1222 ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
1224 if (batch->space_exhausted ||
1225 batch->estimated_size > pstate->space_allowed)
1229 space_exhausted = true;
1232 * Did this batch receive ALL of the tuples from its
1233 * parent batch? That would indicate that further
1234 * repartitioning isn't going to help (the hash values
1235 * are probably all the same).
1237 parent = i % pstate->old_nbatch;
1238 if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1239 extreme_skew_detected = true;
1243 /* Don't keep growing if it's not helping or we'd overflow. */
1244 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1245 pstate->growth = PHJ_GROWTH_DISABLED;
1246 else if (space_exhausted)
1247 pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1249 pstate->growth = PHJ_GROWTH_OK;
1251 /* Free the old batches in shared memory. */
1252 dsa_free(hashtable->area, pstate->old_batches);
1253 pstate->old_batches = InvalidDsaPointer;
1257 case PHJ_GROW_BATCHES_FINISHING:
1258 /* Wait for the above to complete. */
1259 BarrierArriveAndWait(&pstate->grow_batches_barrier,
1260 WAIT_EVENT_HASH_GROW_BATCHES_FINISHING);
1265 * Repartition the tuples currently loaded into memory for inner batch 0
1266 * because the number of batches has been increased. Some tuples are retained
1267 * in memory and some are written out to a later batch.
1270 ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1272 dsa_pointer chunk_shared;
1273 HashMemoryChunk chunk;
1275 Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1277 while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1281 /* Repartition all tuples in this chunk. */
1282 while (idx < chunk->used)
1284 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1285 MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1286 HashJoinTuple copyTuple;
1291 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1292 &bucketno, &batchno);
1294 Assert(batchno < hashtable->nbatch);
1297 /* It still belongs in batch 0. Copy to a new chunk. */
1299 ExecParallelHashTupleAlloc(hashtable,
1300 HJTUPLE_OVERHEAD + tuple->t_len,
1302 copyTuple->hashvalue = hashTuple->hashvalue;
1303 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1304 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1310 MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1312 /* It belongs in a later batch. */
1313 hashtable->batches[batchno].estimated_size += tuple_size;
1314 sts_puttuple(hashtable->batches[batchno].inner_tuples,
1315 &hashTuple->hashvalue, tuple);
1318 /* Count this tuple. */
1319 ++hashtable->batches[0].old_ntuples;
1320 ++hashtable->batches[batchno].ntuples;
1322 idx += MAXALIGN(HJTUPLE_OVERHEAD +
1323 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1326 /* Free this chunk. */
1327 dsa_free(hashtable->area, chunk_shared);
1329 CHECK_FOR_INTERRUPTS();
1334 * Help repartition inner batches 1..n.
1337 ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1339 ParallelHashJoinState *pstate = hashtable->parallel_state;
1340 int old_nbatch = pstate->old_nbatch;
1341 SharedTuplestoreAccessor **old_inner_tuples;
1342 ParallelHashJoinBatch *old_batches;
1345 /* Get our hands on the previous generation of batches. */
1346 old_batches = (ParallelHashJoinBatch *)
1347 dsa_get_address(hashtable->area, pstate->old_batches);
1348 old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch);
1349 for (i = 1; i < old_nbatch; ++i)
1351 ParallelHashJoinBatch *shared =
1352 NthParallelHashJoinBatch(old_batches, i);
1354 old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1355 ParallelWorkerNumber + 1,
1359 /* Join in the effort to repartition them. */
1360 for (i = 1; i < old_nbatch; ++i)
1365 /* Scan one partition from the previous generation. */
1366 sts_begin_parallel_scan(old_inner_tuples[i]);
1367 while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1369 size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1373 /* Decide which partition it goes to in the new generation. */
1374 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1377 hashtable->batches[batchno].estimated_size += tuple_size;
1378 ++hashtable->batches[batchno].ntuples;
1379 ++hashtable->batches[i].old_ntuples;
1381 /* Store the tuple its new batch. */
1382 sts_puttuple(hashtable->batches[batchno].inner_tuples,
1385 CHECK_FOR_INTERRUPTS();
1387 sts_end_parallel_scan(old_inner_tuples[i]);
1390 pfree(old_inner_tuples);
1394 * Transfer the backend-local per-batch counters to the shared totals.
1397 ExecParallelHashMergeCounters(HashJoinTable hashtable)
1399 ParallelHashJoinState *pstate = hashtable->parallel_state;
1402 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1403 pstate->total_tuples = 0;
1404 for (i = 0; i < hashtable->nbatch; ++i)
1406 ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1408 batch->shared->size += batch->size;
1409 batch->shared->estimated_size += batch->estimated_size;
1410 batch->shared->ntuples += batch->ntuples;
1411 batch->shared->old_ntuples += batch->old_ntuples;
1413 batch->estimated_size = 0;
1415 batch->old_ntuples = 0;
1416 pstate->total_tuples += batch->shared->ntuples;
1418 LWLockRelease(&pstate->lock);
1422 * ExecHashIncreaseNumBuckets
1423 * increase the original number of buckets in order to reduce
1424 * number of tuples per bucket
1427 ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1429 HashMemoryChunk chunk;
1431 /* do nothing if not an increase (it's called increase for a reason) */
1432 if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1436 printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1437 hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1440 hashtable->nbuckets = hashtable->nbuckets_optimal;
1441 hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1443 Assert(hashtable->nbuckets > 1);
1444 Assert(hashtable->nbuckets <= (INT_MAX / 2));
1445 Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1448 * Just reallocate the proper number of buckets - we don't need to walk
1449 * through them - we can walk the dense-allocated chunks (just like in
1450 * ExecHashIncreaseNumBatches, but without all the copying into new
1453 hashtable->buckets.unshared =
1454 (HashJoinTuple *) repalloc(hashtable->buckets.unshared,
1455 hashtable->nbuckets * sizeof(HashJoinTuple));
1457 memset(hashtable->buckets.unshared, 0,
1458 hashtable->nbuckets * sizeof(HashJoinTuple));
1460 /* scan through all tuples in all chunks to rebuild the hash table */
1461 for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1463 /* process all tuples stored in this chunk */
1466 while (idx < chunk->used)
1468 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1472 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1473 &bucketno, &batchno);
1475 /* add the tuple to the proper bucket */
1476 hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1477 hashtable->buckets.unshared[bucketno] = hashTuple;
1479 /* advance index past the tuple */
1480 idx += MAXALIGN(HJTUPLE_OVERHEAD +
1481 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1484 /* allow this loop to be cancellable */
1485 CHECK_FOR_INTERRUPTS();
1490 ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1492 ParallelHashJoinState *pstate = hashtable->parallel_state;
1494 HashMemoryChunk chunk;
1495 dsa_pointer chunk_s;
1497 Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
1500 * It's unlikely, but we need to be prepared for new participants to show
1501 * up while we're in the middle of this operation so we need to switch on
1502 * barrier phase here.
1504 switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1506 case PHJ_GROW_BUCKETS_ELECTING:
1507 /* Elect one participant to prepare to increase nbuckets. */
1508 if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1509 WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING))
1512 dsa_pointer_atomic *buckets;
1514 /* Double the size of the bucket array. */
1515 pstate->nbuckets *= 2;
1516 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1517 hashtable->batches[0].shared->size += size / 2;
1518 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1519 hashtable->batches[0].shared->buckets =
1520 dsa_allocate(hashtable->area, size);
1521 buckets = (dsa_pointer_atomic *)
1522 dsa_get_address(hashtable->area,
1523 hashtable->batches[0].shared->buckets);
1524 for (i = 0; i < pstate->nbuckets; ++i)
1525 dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1527 /* Put the chunk list onto the work queue. */
1528 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1530 /* Clear the flag. */
1531 pstate->growth = PHJ_GROWTH_OK;
1535 case PHJ_GROW_BUCKETS_ALLOCATING:
1536 /* Wait for the above to complete. */
1537 BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1538 WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
1541 case PHJ_GROW_BUCKETS_REINSERTING:
1542 /* Reinsert all tuples into the hash table. */
1543 ExecParallelHashEnsureBatchAccessors(hashtable);
1544 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1545 while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1549 while (idx < chunk->used)
1551 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1552 dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1556 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1557 &bucketno, &batchno);
1558 Assert(batchno == 0);
1560 /* add the tuple to the proper bucket */
1561 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1564 /* advance index past the tuple */
1565 idx += MAXALIGN(HJTUPLE_OVERHEAD +
1566 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1569 /* allow this loop to be cancellable */
1570 CHECK_FOR_INTERRUPTS();
1572 BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1573 WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING);
1578 * ExecHashTableInsert
1579 * insert a tuple into the hash table depending on the hash value
1580 * it may just go to a temp file for later batches
1582 * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1583 * tuple; the minimal case in particular is certain to happen while reloading
1584 * tuples from batch files. We could save some cycles in the regular-tuple
1585 * case by not forcing the slot contents into minimal form; not clear if it's
1586 * worth the messiness required.
1589 ExecHashTableInsert(HashJoinTable hashtable,
1590 TupleTableSlot *slot,
1594 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1598 ExecHashGetBucketAndBatch(hashtable, hashvalue,
1599 &bucketno, &batchno);
1602 * decide whether to put the tuple in the hash table or a temp file
1604 if (batchno == hashtable->curbatch)
1607 * put the tuple in hash table
1609 HashJoinTuple hashTuple;
1611 double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1613 /* Create the HashJoinTuple */
1614 hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1615 hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1617 hashTuple->hashvalue = hashvalue;
1618 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1621 * We always reset the tuple-matched flag on insertion. This is okay
1622 * even when reloading a tuple from a batch file, since the tuple
1623 * could not possibly have been matched to an outer tuple before it
1624 * went into the batch file.
1626 HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1628 /* Push it onto the front of the bucket's list */
1629 hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1630 hashtable->buckets.unshared[bucketno] = hashTuple;
1633 * Increase the (optimal) number of buckets if we just exceeded the
1634 * NTUP_PER_BUCKET threshold, but only when there's still a single
1637 if (hashtable->nbatch == 1 &&
1638 ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1640 /* Guard against integer overflow and alloc size overflow */
1641 if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1642 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1644 hashtable->nbuckets_optimal *= 2;
1645 hashtable->log2_nbuckets_optimal += 1;
1649 /* Account for space used, and back off if we've used too much */
1650 hashtable->spaceUsed += hashTupleSize;
1651 if (hashtable->spaceUsed > hashtable->spacePeak)
1652 hashtable->spacePeak = hashtable->spaceUsed;
1653 if (hashtable->spaceUsed +
1654 hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1655 > hashtable->spaceAllowed)
1656 ExecHashIncreaseNumBatches(hashtable);
1661 * put the tuple into a temp file for later batches
1663 Assert(batchno > hashtable->curbatch);
1664 ExecHashJoinSaveTuple(tuple,
1666 &hashtable->innerBatchFile[batchno]);
1670 heap_free_minimal_tuple(tuple);
1674 * ExecHashTableParallelInsert
1675 * insert a tuple into a shared hash table or shared batch tuplestore
1678 ExecParallelHashTableInsert(HashJoinTable hashtable,
1679 TupleTableSlot *slot,
1683 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1689 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1693 HashJoinTuple hashTuple;
1695 /* Try to load it into memory. */
1696 Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1697 PHJ_BUILD_HASHING_INNER);
1698 hashTuple = ExecParallelHashTupleAlloc(hashtable,
1699 HJTUPLE_OVERHEAD + tuple->t_len,
1701 if (hashTuple == NULL)
1704 /* Store the hash value in the HashJoinTuple header. */
1705 hashTuple->hashvalue = hashvalue;
1706 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1708 /* Push it onto the front of the bucket's list */
1709 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1714 size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1716 Assert(batchno > 0);
1718 /* Try to preallocate space in the batch if necessary. */
1719 if (hashtable->batches[batchno].preallocated < tuple_size)
1721 if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1725 Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1726 hashtable->batches[batchno].preallocated -= tuple_size;
1727 sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1730 ++hashtable->batches[batchno].ntuples;
1733 heap_free_minimal_tuple(tuple);
1737 * Insert a tuple into the current hash table. Unlike
1738 * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1739 * to other batches or to run out of memory, and should only be called with
1740 * tuples that belong in the current batch once growth has been disabled.
1743 ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1744 TupleTableSlot *slot,
1748 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1749 HashJoinTuple hashTuple;
1754 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1755 Assert(batchno == hashtable->curbatch);
1756 hashTuple = ExecParallelHashTupleAlloc(hashtable,
1757 HJTUPLE_OVERHEAD + tuple->t_len,
1759 hashTuple->hashvalue = hashvalue;
1760 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1761 HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1762 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1766 heap_free_minimal_tuple(tuple);
1770 * ExecHashGetHashValue
1771 * Compute the hash value for a tuple
1773 * The tuple to be tested must be in either econtext->ecxt_outertuple or
1774 * econtext->ecxt_innertuple. Vars in the hashkeys expressions should have
1775 * varno either OUTER_VAR or INNER_VAR.
1777 * A true result means the tuple's hash value has been successfully computed
1778 * and stored at *hashvalue. A false result means the tuple cannot match
1779 * because it contains a null attribute, and hence it should be discarded
1780 * immediately. (If keep_nulls is true then false is never returned.)
1783 ExecHashGetHashValue(HashJoinTable hashtable,
1784 ExprContext *econtext,
1791 FmgrInfo *hashfunctions;
1794 MemoryContext oldContext;
1797 * We reset the eval context each time to reclaim any memory leaked in the
1798 * hashkey expressions.
1800 ResetExprContext(econtext);
1802 oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
1805 hashfunctions = hashtable->outer_hashfunctions;
1807 hashfunctions = hashtable->inner_hashfunctions;
1809 foreach(hk, hashkeys)
1811 ExprState *keyexpr = (ExprState *) lfirst(hk);
1815 /* rotate hashkey left 1 bit at each step */
1816 hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
1819 * Get the join attribute value of the tuple
1821 keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
1824 * If the attribute is NULL, and the join operator is strict, then
1825 * this tuple cannot pass the join qual so we can reject it
1826 * immediately (unless we're scanning the outside of an outer join, in
1827 * which case we must not reject it). Otherwise we act like the
1828 * hashcode of NULL is zero (this will support operators that act like
1829 * IS NOT DISTINCT, though not any more-random behavior). We treat
1830 * the hash support function as strict even if the operator is not.
1832 * Note: currently, all hashjoinable operators must be strict since
1833 * the hash index AM assumes that. However, it takes so little extra
1834 * code here to allow non-strict that we may as well do it.
1838 if (hashtable->hashStrict[i] && !keep_nulls)
1840 MemoryContextSwitchTo(oldContext);
1841 return false; /* cannot match */
1843 /* else, leave hashkey unmodified, equivalent to hashcode 0 */
1847 /* Compute the hash function */
1850 hkey = DatumGetUInt32(FunctionCall1(&hashfunctions[i], keyval));
1857 MemoryContextSwitchTo(oldContext);
1859 *hashvalue = hashkey;
1864 * ExecHashGetBucketAndBatch
1865 * Determine the bucket number and batch number for a hash value
1867 * Note: on-the-fly increases of nbatch must not change the bucket number
1868 * for a given hash code (since we don't move tuples to different hash
1869 * chains), and must only cause the batch number to remain the same or
1870 * increase. Our algorithm is
1871 * bucketno = hashvalue MOD nbuckets
1872 * batchno = (hashvalue DIV nbuckets) MOD nbatch
1873 * where nbuckets and nbatch are both expected to be powers of 2, so we can
1874 * do the computations by shifting and masking. (This assumes that all hash
1875 * functions are good about randomizing all their output bits, else we are
1876 * likely to have very skewed bucket or batch occupancy.)
1878 * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1879 * bucket count growth. Once we start batching, the value is fixed and does
1880 * not change over the course of the join (making it possible to compute batch
1881 * number the way we do here).
1883 * nbatch is always a power of 2; we increase it only by doubling it. This
1884 * effectively adds one more bit to the top of the batchno.
1887 ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1892 uint32 nbuckets = (uint32) hashtable->nbuckets;
1893 uint32 nbatch = (uint32) hashtable->nbatch;
1897 /* we can do MOD by masking, DIV by shifting */
1898 *bucketno = hashvalue & (nbuckets - 1);
1899 *batchno = (hashvalue >> hashtable->log2_nbuckets) & (nbatch - 1);
1903 *bucketno = hashvalue & (nbuckets - 1);
1909 * ExecScanHashBucket
1910 * scan a hash bucket for matches to the current outer tuple
1912 * The current outer tuple must be stored in econtext->ecxt_outertuple.
1914 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1915 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1919 ExecScanHashBucket(HashJoinState *hjstate,
1920 ExprContext *econtext)
1922 ExprState *hjclauses = hjstate->hashclauses;
1923 HashJoinTable hashtable = hjstate->hj_HashTable;
1924 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1925 uint32 hashvalue = hjstate->hj_CurHashValue;
1928 * hj_CurTuple is the address of the tuple last returned from the current
1929 * bucket, or NULL if it's time to start scanning a new bucket.
1931 * If the tuple hashed to a skew bucket then scan the skew bucket
1932 * otherwise scan the standard hashtable bucket.
1934 if (hashTuple != NULL)
1935 hashTuple = hashTuple->next.unshared;
1936 else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1937 hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1939 hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
1941 while (hashTuple != NULL)
1943 if (hashTuple->hashvalue == hashvalue)
1945 TupleTableSlot *inntuple;
1947 /* insert hashtable's tuple into exec slot so ExecQual sees it */
1948 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1949 hjstate->hj_HashTupleSlot,
1950 false); /* do not pfree */
1951 econtext->ecxt_innertuple = inntuple;
1953 if (ExecQualAndReset(hjclauses, econtext))
1955 hjstate->hj_CurTuple = hashTuple;
1960 hashTuple = hashTuple->next.unshared;
1970 * ExecParallelScanHashBucket
1971 * scan a hash bucket for matches to the current outer tuple
1973 * The current outer tuple must be stored in econtext->ecxt_outertuple.
1975 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1976 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1980 ExecParallelScanHashBucket(HashJoinState *hjstate,
1981 ExprContext *econtext)
1983 ExprState *hjclauses = hjstate->hashclauses;
1984 HashJoinTable hashtable = hjstate->hj_HashTable;
1985 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1986 uint32 hashvalue = hjstate->hj_CurHashValue;
1989 * hj_CurTuple is the address of the tuple last returned from the current
1990 * bucket, or NULL if it's time to start scanning a new bucket.
1992 if (hashTuple != NULL)
1993 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
1995 hashTuple = ExecParallelHashFirstTuple(hashtable,
1996 hjstate->hj_CurBucketNo);
1998 while (hashTuple != NULL)
2000 if (hashTuple->hashvalue == hashvalue)
2002 TupleTableSlot *inntuple;
2004 /* insert hashtable's tuple into exec slot so ExecQual sees it */
2005 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2006 hjstate->hj_HashTupleSlot,
2007 false); /* do not pfree */
2008 econtext->ecxt_innertuple = inntuple;
2010 if (ExecQualAndReset(hjclauses, econtext))
2012 hjstate->hj_CurTuple = hashTuple;
2017 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2027 * ExecPrepHashTableForUnmatched
2028 * set up for a series of ExecScanHashTableForUnmatched calls
2031 ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2034 * During this scan we use the HashJoinState fields as follows:
2036 * hj_CurBucketNo: next regular bucket to scan
2037 * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2038 * hj_CurTuple: last tuple returned, or NULL to start next bucket
2041 hjstate->hj_CurBucketNo = 0;
2042 hjstate->hj_CurSkewBucketNo = 0;
2043 hjstate->hj_CurTuple = NULL;
2047 * ExecScanHashTableForUnmatched
2048 * scan the hash table for unmatched inner tuples
2050 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2051 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2055 ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2057 HashJoinTable hashtable = hjstate->hj_HashTable;
2058 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2063 * hj_CurTuple is the address of the tuple last returned from the
2064 * current bucket, or NULL if it's time to start scanning a new
2067 if (hashTuple != NULL)
2068 hashTuple = hashTuple->next.unshared;
2069 else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2071 hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2072 hjstate->hj_CurBucketNo++;
2074 else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2076 int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2078 hashTuple = hashtable->skewBucket[j]->tuples;
2079 hjstate->hj_CurSkewBucketNo++;
2082 break; /* finished all buckets */
2084 while (hashTuple != NULL)
2086 if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2088 TupleTableSlot *inntuple;
2090 /* insert hashtable's tuple into exec slot */
2091 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2092 hjstate->hj_HashTupleSlot,
2093 false); /* do not pfree */
2094 econtext->ecxt_innertuple = inntuple;
2097 * Reset temp memory each time; although this function doesn't
2098 * do any qual eval, the caller will, so let's keep it
2099 * parallel to ExecScanHashBucket.
2101 ResetExprContext(econtext);
2103 hjstate->hj_CurTuple = hashTuple;
2107 hashTuple = hashTuple->next.unshared;
2110 /* allow this loop to be cancellable */
2111 CHECK_FOR_INTERRUPTS();
2115 * no more unmatched tuples
2121 * ExecHashTableReset
2123 * reset hash table header for new batch
2126 ExecHashTableReset(HashJoinTable hashtable)
2128 MemoryContext oldcxt;
2129 int nbuckets = hashtable->nbuckets;
2132 * Release all the hash buckets and tuples acquired in the prior pass, and
2133 * reinitialize the context for a new pass.
2135 MemoryContextReset(hashtable->batchCxt);
2136 oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2138 /* Reallocate and reinitialize the hash bucket headers. */
2139 hashtable->buckets.unshared = (HashJoinTuple *)
2140 palloc0(nbuckets * sizeof(HashJoinTuple));
2142 hashtable->spaceUsed = 0;
2144 MemoryContextSwitchTo(oldcxt);
2146 /* Forget the chunks (the memory was freed by the context reset above). */
2147 hashtable->chunks = NULL;
2151 * ExecHashTableResetMatchFlags
2152 * Clear all the HeapTupleHeaderHasMatch flags in the table
2155 ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2157 HashJoinTuple tuple;
2160 /* Reset all flags in the main table ... */
2161 for (i = 0; i < hashtable->nbuckets; i++)
2163 for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2164 tuple = tuple->next.unshared)
2165 HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2168 /* ... and the same for the skew buckets, if any */
2169 for (i = 0; i < hashtable->nSkewBuckets; i++)
2171 int j = hashtable->skewBucketNums[i];
2172 HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2174 for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2175 HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2181 ExecReScanHash(HashState *node)
2184 * if chgParam of subnode is not null then plan will be re-scanned by
2185 * first ExecProcNode.
2187 if (node->ps.lefttree->chgParam == NULL)
2188 ExecReScan(node->ps.lefttree);
2193 * ExecHashBuildSkewHash
2195 * Set up for skew optimization if we can identify the most common values
2196 * (MCVs) of the outer relation's join key. We make a skew hash bucket
2197 * for the hash value of each MCV, up to the number of slots allowed
2198 * based on available memory.
2201 ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
2203 HeapTupleData *statsTuple;
2206 /* Do nothing if planner didn't identify the outer relation's join key */
2207 if (!OidIsValid(node->skewTable))
2209 /* Also, do nothing if we don't have room for at least one skew bucket */
2214 * Try to find the MCV statistics for the outer relation's join key.
2216 statsTuple = SearchSysCache3(STATRELATTINH,
2217 ObjectIdGetDatum(node->skewTable),
2218 Int16GetDatum(node->skewColumn),
2219 BoolGetDatum(node->skewInherit));
2220 if (!HeapTupleIsValid(statsTuple))
2223 if (get_attstatsslot(&sslot, statsTuple,
2224 STATISTIC_KIND_MCV, InvalidOid,
2225 ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2229 FmgrInfo *hashfunctions;
2232 if (mcvsToUse > sslot.nvalues)
2233 mcvsToUse = sslot.nvalues;
2236 * Calculate the expected fraction of outer relation that will
2237 * participate in the skew optimization. If this isn't at least
2238 * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2241 for (i = 0; i < mcvsToUse; i++)
2242 frac += sslot.numbers[i];
2243 if (frac < SKEW_MIN_OUTER_FRACTION)
2245 free_attstatsslot(&sslot);
2246 ReleaseSysCache(statsTuple);
2251 * Okay, set up the skew hashtable.
2253 * skewBucket[] is an open addressing hashtable with a power of 2 size
2254 * that is greater than the number of MCV values. (This ensures there
2255 * will be at least one null entry, so searches will always
2258 * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2259 * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2260 * since we limit pg_statistic entries to much less than that.
2263 while (nbuckets <= mcvsToUse)
2265 /* use two more bits just to help avoid collisions */
2268 hashtable->skewEnabled = true;
2269 hashtable->skewBucketLen = nbuckets;
2272 * We allocate the bucket memory in the hashtable's batch context. It
2273 * is only needed during the first batch, and this ensures it will be
2274 * automatically removed once the first batch is done.
2276 hashtable->skewBucket = (HashSkewBucket **)
2277 MemoryContextAllocZero(hashtable->batchCxt,
2278 nbuckets * sizeof(HashSkewBucket *));
2279 hashtable->skewBucketNums = (int *)
2280 MemoryContextAllocZero(hashtable->batchCxt,
2281 mcvsToUse * sizeof(int));
2283 hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2284 + mcvsToUse * sizeof(int);
2285 hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2286 + mcvsToUse * sizeof(int);
2287 if (hashtable->spaceUsed > hashtable->spacePeak)
2288 hashtable->spacePeak = hashtable->spaceUsed;
2291 * Create a skew bucket for each MCV hash value.
2293 * Note: it is very important that we create the buckets in order of
2294 * decreasing MCV frequency. If we have to remove some buckets, they
2295 * must be removed in reverse order of creation (see notes in
2296 * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2299 hashfunctions = hashtable->outer_hashfunctions;
2301 for (i = 0; i < mcvsToUse; i++)
2306 hashvalue = DatumGetUInt32(FunctionCall1(&hashfunctions[0],
2310 * While we have not hit a hole in the hashtable and have not hit
2311 * the desired bucket, we have collided with some previous hash
2312 * value, so try the next bucket location. NB: this code must
2313 * match ExecHashGetSkewBucket.
2315 bucket = hashvalue & (nbuckets - 1);
2316 while (hashtable->skewBucket[bucket] != NULL &&
2317 hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2318 bucket = (bucket + 1) & (nbuckets - 1);
2321 * If we found an existing bucket with the same hashvalue, leave
2322 * it alone. It's okay for two MCVs to share a hashvalue.
2324 if (hashtable->skewBucket[bucket] != NULL)
2327 /* Okay, create a new skew bucket for this hashvalue. */
2328 hashtable->skewBucket[bucket] = (HashSkewBucket *)
2329 MemoryContextAlloc(hashtable->batchCxt,
2330 sizeof(HashSkewBucket));
2331 hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2332 hashtable->skewBucket[bucket]->tuples = NULL;
2333 hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2334 hashtable->nSkewBuckets++;
2335 hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2336 hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2337 if (hashtable->spaceUsed > hashtable->spacePeak)
2338 hashtable->spacePeak = hashtable->spaceUsed;
2341 free_attstatsslot(&sslot);
2344 ReleaseSysCache(statsTuple);
2348 * ExecHashGetSkewBucket
2350 * Returns the index of the skew bucket for this hashvalue,
2351 * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2352 * associated with any active skew bucket.
2355 ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2360 * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2361 * particular, this happens after the initial batch is done).
2363 if (!hashtable->skewEnabled)
2364 return INVALID_SKEW_BUCKET_NO;
2367 * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2369 bucket = hashvalue & (hashtable->skewBucketLen - 1);
2372 * While we have not hit a hole in the hashtable and have not hit the
2373 * desired bucket, we have collided with some other hash value, so try the
2374 * next bucket location.
2376 while (hashtable->skewBucket[bucket] != NULL &&
2377 hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2378 bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2381 * Found the desired bucket?
2383 if (hashtable->skewBucket[bucket] != NULL)
2387 * There must not be any hashtable entry for this hash value.
2389 return INVALID_SKEW_BUCKET_NO;
2393 * ExecHashSkewTableInsert
2395 * Insert a tuple into the skew hashtable.
2397 * This should generally match up with the current-batch case in
2398 * ExecHashTableInsert.
2401 ExecHashSkewTableInsert(HashJoinTable hashtable,
2402 TupleTableSlot *slot,
2407 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2408 HashJoinTuple hashTuple;
2411 /* Create the HashJoinTuple */
2412 hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2413 hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2415 hashTuple->hashvalue = hashvalue;
2416 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2417 HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2419 /* Push it onto the front of the skew bucket's list */
2420 hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2421 hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2422 Assert(hashTuple != hashTuple->next.unshared);
2424 /* Account for space used, and back off if we've used too much */
2425 hashtable->spaceUsed += hashTupleSize;
2426 hashtable->spaceUsedSkew += hashTupleSize;
2427 if (hashtable->spaceUsed > hashtable->spacePeak)
2428 hashtable->spacePeak = hashtable->spaceUsed;
2429 while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2430 ExecHashRemoveNextSkewBucket(hashtable);
2432 /* Check we are not over the total spaceAllowed, either */
2433 if (hashtable->spaceUsed > hashtable->spaceAllowed)
2434 ExecHashIncreaseNumBatches(hashtable);
2437 heap_free_minimal_tuple(tuple);
2441 * ExecHashRemoveNextSkewBucket
2443 * Remove the least valuable skew bucket by pushing its tuples into
2444 * the main hash table.
2447 ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2450 HashSkewBucket *bucket;
2454 HashJoinTuple hashTuple;
2456 /* Locate the bucket to remove */
2457 bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2458 bucket = hashtable->skewBucket[bucketToRemove];
2461 * Calculate which bucket and batch the tuples belong to in the main
2462 * hashtable. They all have the same hash value, so it's the same for all
2463 * of them. Also note that it's not possible for nbatch to increase while
2464 * we are processing the tuples.
2466 hashvalue = bucket->hashvalue;
2467 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2469 /* Process all tuples in the bucket */
2470 hashTuple = bucket->tuples;
2471 while (hashTuple != NULL)
2473 HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2478 * This code must agree with ExecHashTableInsert. We do not use
2479 * ExecHashTableInsert directly as ExecHashTableInsert expects a
2480 * TupleTableSlot while we already have HashJoinTuples.
2482 tuple = HJTUPLE_MINTUPLE(hashTuple);
2483 tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2485 /* Decide whether to put the tuple in the hash table or a temp file */
2486 if (batchno == hashtable->curbatch)
2488 /* Move the tuple to the main hash table */
2489 HashJoinTuple copyTuple;
2492 * We must copy the tuple into the dense storage, else it will not
2493 * be found by, eg, ExecHashIncreaseNumBatches.
2495 copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2496 memcpy(copyTuple, hashTuple, tupleSize);
2499 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2500 hashtable->buckets.unshared[bucketno] = copyTuple;
2502 /* We have reduced skew space, but overall space doesn't change */
2503 hashtable->spaceUsedSkew -= tupleSize;
2507 /* Put the tuple into a temp file for later batches */
2508 Assert(batchno > hashtable->curbatch);
2509 ExecHashJoinSaveTuple(tuple, hashvalue,
2510 &hashtable->innerBatchFile[batchno]);
2512 hashtable->spaceUsed -= tupleSize;
2513 hashtable->spaceUsedSkew -= tupleSize;
2516 hashTuple = nextHashTuple;
2518 /* allow this loop to be cancellable */
2519 CHECK_FOR_INTERRUPTS();
2523 * Free the bucket struct itself and reset the hashtable entry to NULL.
2525 * NOTE: this is not nearly as simple as it looks on the surface, because
2526 * of the possibility of collisions in the hashtable. Suppose that hash
2527 * values A and B collide at a particular hashtable entry, and that A was
2528 * entered first so B gets shifted to a different table entry. If we were
2529 * to remove A first then ExecHashGetSkewBucket would mistakenly start
2530 * reporting that B is not in the hashtable, because it would hit the NULL
2531 * before finding B. However, we always remove entries in the reverse
2532 * order of creation, so this failure cannot happen.
2534 hashtable->skewBucket[bucketToRemove] = NULL;
2535 hashtable->nSkewBuckets--;
2537 hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2538 hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2541 * If we have removed all skew buckets then give up on skew optimization.
2542 * Release the arrays since they aren't useful any more.
2544 if (hashtable->nSkewBuckets == 0)
2546 hashtable->skewEnabled = false;
2547 pfree(hashtable->skewBucket);
2548 pfree(hashtable->skewBucketNums);
2549 hashtable->skewBucket = NULL;
2550 hashtable->skewBucketNums = NULL;
2551 hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2552 hashtable->spaceUsedSkew = 0;
2557 * Reserve space in the DSM segment for instrumentation data.
2560 ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2564 /* don't need this if not instrumenting or no workers */
2565 if (!node->ps.instrument || pcxt->nworkers == 0)
2568 size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2569 size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2570 shm_toc_estimate_chunk(&pcxt->estimator, size);
2571 shm_toc_estimate_keys(&pcxt->estimator, 1);
2575 * Set up a space in the DSM for all workers to record instrumentation data
2576 * about their hash table.
2579 ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2583 /* don't need this if not instrumenting or no workers */
2584 if (!node->ps.instrument || pcxt->nworkers == 0)
2587 size = offsetof(SharedHashInfo, hinstrument) +
2588 pcxt->nworkers * sizeof(HashInstrumentation);
2589 node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2590 memset(node->shared_info, 0, size);
2591 node->shared_info->num_workers = pcxt->nworkers;
2592 shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2597 * Locate the DSM space for hash table instrumentation data that we'll write
2598 * to at shutdown time.
2601 ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2603 SharedHashInfo *shared_info;
2605 /* don't need this if not instrumenting */
2606 if (!node->ps.instrument)
2609 shared_info = (SharedHashInfo *)
2610 shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2611 node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2615 * Copy instrumentation data from this worker's hash table (if it built one)
2616 * to DSM memory so the leader can retrieve it. This must be done in an
2617 * ExecShutdownHash() rather than ExecEndHash() because the latter runs after
2618 * we've detached from the DSM segment.
2621 ExecShutdownHash(HashState *node)
2623 if (node->hinstrument && node->hashtable)
2624 ExecHashGetInstrumentation(node->hinstrument, node->hashtable);
2628 * Retrieve instrumentation data from workers before the DSM segment is
2629 * detached, so that EXPLAIN can access it.
2632 ExecHashRetrieveInstrumentation(HashState *node)
2634 SharedHashInfo *shared_info = node->shared_info;
2637 if (shared_info == NULL)
2640 /* Replace node->shared_info with a copy in backend-local memory. */
2641 size = offsetof(SharedHashInfo, hinstrument) +
2642 shared_info->num_workers * sizeof(HashInstrumentation);
2643 node->shared_info = palloc(size);
2644 memcpy(node->shared_info, shared_info, size);
2648 * Copy the instrumentation data from 'hashtable' into a HashInstrumentation
2652 ExecHashGetInstrumentation(HashInstrumentation *instrument,
2653 HashJoinTable hashtable)
2655 instrument->nbuckets = hashtable->nbuckets;
2656 instrument->nbuckets_original = hashtable->nbuckets_original;
2657 instrument->nbatch = hashtable->nbatch;
2658 instrument->nbatch_original = hashtable->nbatch_original;
2659 instrument->space_peak = hashtable->spacePeak;
2663 * Allocate 'size' bytes from the currently active HashMemoryChunk
2666 dense_alloc(HashJoinTable hashtable, Size size)
2668 HashMemoryChunk newChunk;
2671 /* just in case the size is not already aligned properly */
2672 size = MAXALIGN(size);
2675 * If tuple size is larger than threshold, allocate a separate chunk.
2677 if (size > HASH_CHUNK_THRESHOLD)
2679 /* allocate new chunk and put it at the beginning of the list */
2680 newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2681 HASH_CHUNK_HEADER_SIZE + size);
2682 newChunk->maxlen = size;
2683 newChunk->used = size;
2684 newChunk->ntuples = 1;
2687 * Add this chunk to the list after the first existing chunk, so that
2688 * we don't lose the remaining space in the "current" chunk.
2690 if (hashtable->chunks != NULL)
2692 newChunk->next = hashtable->chunks->next;
2693 hashtable->chunks->next.unshared = newChunk;
2697 newChunk->next.unshared = hashtable->chunks;
2698 hashtable->chunks = newChunk;
2701 return HASH_CHUNK_DATA(newChunk);
2705 * See if we have enough space for it in the current chunk (if any). If
2706 * not, allocate a fresh chunk.
2708 if ((hashtable->chunks == NULL) ||
2709 (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2711 /* allocate new chunk and put it at the beginning of the list */
2712 newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2713 HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2715 newChunk->maxlen = HASH_CHUNK_SIZE;
2716 newChunk->used = size;
2717 newChunk->ntuples = 1;
2719 newChunk->next.unshared = hashtable->chunks;
2720 hashtable->chunks = newChunk;
2722 return HASH_CHUNK_DATA(newChunk);
2725 /* There is enough space in the current chunk, let's add the tuple */
2726 ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2727 hashtable->chunks->used += size;
2728 hashtable->chunks->ntuples += 1;
2730 /* return pointer to the start of the tuple memory */
2735 * Allocate space for a tuple in shared dense storage. This is equivalent to
2736 * dense_alloc but for Parallel Hash using shared memory.
2738 * While loading a tuple into shared memory, we might run out of memory and
2739 * decide to repartition, or determine that the load factor is too high and
2740 * decide to expand the bucket array, or discover that another participant has
2741 * commanded us to help do that. Return NULL if number of buckets or batches
2742 * has changed, indicating that the caller must retry (considering the
2743 * possibility that the tuple no longer belongs in the same batch).
2745 static HashJoinTuple
2746 ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2747 dsa_pointer *shared)
2749 ParallelHashJoinState *pstate = hashtable->parallel_state;
2750 dsa_pointer chunk_shared;
2751 HashMemoryChunk chunk;
2753 HashJoinTuple result;
2754 int curbatch = hashtable->curbatch;
2756 size = MAXALIGN(size);
2759 * Fast path: if there is enough space in this backend's current chunk,
2760 * then we can allocate without any locking.
2762 chunk = hashtable->current_chunk;
2763 if (chunk != NULL &&
2764 size <= HASH_CHUNK_THRESHOLD &&
2765 chunk->maxlen - chunk->used >= size)
2768 chunk_shared = hashtable->current_chunk_shared;
2769 Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2770 *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2771 result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2772 chunk->used += size;
2774 Assert(chunk->used <= chunk->maxlen);
2775 Assert(result == dsa_get_address(hashtable->area, *shared));
2780 /* Slow path: try to allocate a new chunk. */
2781 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
2784 * Check if we need to help increase the number of buckets or batches.
2786 if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
2787 pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2789 ParallelHashGrowth growth = pstate->growth;
2791 hashtable->current_chunk = NULL;
2792 LWLockRelease(&pstate->lock);
2794 /* Another participant has commanded us to help grow. */
2795 if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
2796 ExecParallelHashIncreaseNumBatches(hashtable);
2797 else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2798 ExecParallelHashIncreaseNumBuckets(hashtable);
2800 /* The caller must retry. */
2804 /* Oversized tuples get their own chunk. */
2805 if (size > HASH_CHUNK_THRESHOLD)
2806 chunk_size = size + HASH_CHUNK_HEADER_SIZE;
2808 chunk_size = HASH_CHUNK_SIZE;
2810 /* Check if it's time to grow batches or buckets. */
2811 if (pstate->growth != PHJ_GROWTH_DISABLED)
2813 Assert(curbatch == 0);
2814 Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
2817 * Check if our space limit would be exceeded. To avoid choking on
2818 * very large tuples or very low work_mem setting, we'll always allow
2819 * each backend to allocate at least one chunk.
2821 if (hashtable->batches[0].at_least_one_chunk &&
2822 hashtable->batches[0].shared->size +
2823 chunk_size > pstate->space_allowed)
2825 pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
2826 hashtable->batches[0].shared->space_exhausted = true;
2827 LWLockRelease(&pstate->lock);
2832 /* Check if our load factor limit would be exceeded. */
2833 if (hashtable->nbatch == 1)
2835 hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
2836 hashtable->batches[0].ntuples = 0;
2837 /* Guard against integer overflow and alloc size overflow */
2838 if (hashtable->batches[0].shared->ntuples + 1 >
2839 hashtable->nbuckets * NTUP_PER_BUCKET &&
2840 hashtable->nbuckets < (INT_MAX / 2) &&
2841 hashtable->nbuckets * 2 <=
2842 MaxAllocSize / sizeof(dsa_pointer_atomic))
2844 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
2845 LWLockRelease(&pstate->lock);
2852 /* We are cleared to allocate a new chunk. */
2853 chunk_shared = dsa_allocate(hashtable->area, chunk_size);
2854 hashtable->batches[curbatch].shared->size += chunk_size;
2855 hashtable->batches[curbatch].at_least_one_chunk = true;
2857 /* Set up the chunk. */
2858 chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
2859 *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
2860 chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
2864 * Push it onto the list of chunks, so that it can be found if we need to
2865 * increase the number of buckets or batches (batch 0 only) and later for
2866 * freeing the memory (all batches).
2868 chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
2869 hashtable->batches[curbatch].shared->chunks = chunk_shared;
2871 if (size <= HASH_CHUNK_THRESHOLD)
2874 * Make this the current chunk so that we can use the fast path to
2875 * fill the rest of it up in future calls.
2877 hashtable->current_chunk = chunk;
2878 hashtable->current_chunk_shared = chunk_shared;
2880 LWLockRelease(&pstate->lock);
2882 Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
2883 result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
2889 * One backend needs to set up the shared batch state including tuplestores.
2890 * Other backends will ensure they have correctly configured accessors by
2891 * called ExecParallelHashEnsureBatchAccessors().
2894 ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
2896 ParallelHashJoinState *pstate = hashtable->parallel_state;
2897 ParallelHashJoinBatch *batches;
2898 MemoryContext oldcxt;
2901 Assert(hashtable->batches == NULL);
2903 /* Allocate space. */
2905 dsa_allocate0(hashtable->area,
2906 EstimateParallelHashJoinBatch(hashtable) * nbatch);
2907 pstate->nbatch = nbatch;
2908 batches = dsa_get_address(hashtable->area, pstate->batches);
2910 /* Use hash join memory context. */
2911 oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
2913 /* Allocate this backend's accessor array. */
2914 hashtable->nbatch = nbatch;
2915 hashtable->batches = (ParallelHashJoinBatchAccessor *)
2916 palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
2918 /* Set up the shared state, tuplestores and backend-local accessors. */
2919 for (i = 0; i < hashtable->nbatch; ++i)
2921 ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
2922 ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
2923 char name[MAXPGPATH];
2926 * All members of shared were zero-initialized. We just need to set
2929 BarrierInit(&shared->batch_barrier, 0);
2932 /* Batch 0 doesn't need to be loaded. */
2933 BarrierAttach(&shared->batch_barrier);
2934 while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING)
2935 BarrierArriveAndWait(&shared->batch_barrier, 0);
2936 BarrierDetach(&shared->batch_barrier);
2939 /* Initialize accessor state. All members were zero-initialized. */
2940 accessor->shared = shared;
2942 /* Initialize the shared tuplestores. */
2943 snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
2944 accessor->inner_tuples =
2945 sts_initialize(ParallelHashJoinBatchInner(shared),
2946 pstate->nparticipants,
2947 ParallelWorkerNumber + 1,
2949 SHARED_TUPLESTORE_SINGLE_PASS,
2952 snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
2953 accessor->outer_tuples =
2954 sts_initialize(ParallelHashJoinBatchOuter(shared,
2955 pstate->nparticipants),
2956 pstate->nparticipants,
2957 ParallelWorkerNumber + 1,
2959 SHARED_TUPLESTORE_SINGLE_PASS,
2964 MemoryContextSwitchTo(oldcxt);
2968 * Free the current set of ParallelHashJoinBatchAccessor objects.
2971 ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
2975 for (i = 0; i < hashtable->nbatch; ++i)
2977 /* Make sure no files are left open. */
2978 sts_end_write(hashtable->batches[i].inner_tuples);
2979 sts_end_write(hashtable->batches[i].outer_tuples);
2980 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
2981 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
2983 pfree(hashtable->batches);
2984 hashtable->batches = NULL;
2988 * Make sure this backend has up-to-date accessors for the current set of
2992 ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
2994 ParallelHashJoinState *pstate = hashtable->parallel_state;
2995 ParallelHashJoinBatch *batches;
2996 MemoryContext oldcxt;
2999 if (hashtable->batches != NULL)
3001 if (hashtable->nbatch == pstate->nbatch)
3003 ExecParallelHashCloseBatchAccessors(hashtable);
3007 * It's possible for a backend to start up very late so that the whole
3008 * join is finished and the shm state for tracking batches has already
3009 * been freed by ExecHashTableDetach(). In that case we'll just leave
3010 * hashtable->batches as NULL so that ExecParallelHashJoinNewBatch() gives
3013 if (!DsaPointerIsValid(pstate->batches))
3016 /* Use hash join memory context. */
3017 oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
3019 /* Allocate this backend's accessor array. */
3020 hashtable->nbatch = pstate->nbatch;
3021 hashtable->batches = (ParallelHashJoinBatchAccessor *)
3022 palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
3024 /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3025 batches = (ParallelHashJoinBatch *)
3026 dsa_get_address(hashtable->area, pstate->batches);
3028 /* Set up the accessor array and attach to the tuplestores. */
3029 for (i = 0; i < hashtable->nbatch; ++i)
3031 ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3032 ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3034 accessor->shared = shared;
3035 accessor->preallocated = 0;
3036 accessor->done = false;
3037 accessor->inner_tuples =
3038 sts_attach(ParallelHashJoinBatchInner(shared),
3039 ParallelWorkerNumber + 1,
3041 accessor->outer_tuples =
3042 sts_attach(ParallelHashJoinBatchOuter(shared,
3043 pstate->nparticipants),
3044 ParallelWorkerNumber + 1,
3048 MemoryContextSwitchTo(oldcxt);
3052 * Allocate an empty shared memory hash table for a given batch.
3055 ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3057 ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3058 dsa_pointer_atomic *buckets;
3059 int nbuckets = hashtable->parallel_state->nbuckets;
3063 dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3064 buckets = (dsa_pointer_atomic *)
3065 dsa_get_address(hashtable->area, batch->buckets);
3066 for (i = 0; i < nbuckets; ++i)
3067 dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3071 * If we are currently attached to a shared hash join batch, detach. If we
3072 * are last to detach, clean up.
3075 ExecHashTableDetachBatch(HashJoinTable hashtable)
3077 if (hashtable->parallel_state != NULL &&
3078 hashtable->curbatch >= 0)
3080 int curbatch = hashtable->curbatch;
3081 ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3083 /* Make sure any temporary files are closed. */
3084 sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3085 sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3087 /* Detach from the batch we were last working on. */
3088 if (BarrierArriveAndDetach(&batch->batch_barrier))
3091 * Technically we shouldn't access the barrier because we're no
3092 * longer attached, but since there is no way it's moving after
3093 * this point it seems safe to make the following assertion.
3095 Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE);
3097 /* Free shared chunks and buckets. */
3098 while (DsaPointerIsValid(batch->chunks))
3100 HashMemoryChunk chunk =
3101 dsa_get_address(hashtable->area, batch->chunks);
3102 dsa_pointer next = chunk->next.shared;
3104 dsa_free(hashtable->area, batch->chunks);
3105 batch->chunks = next;
3107 if (DsaPointerIsValid(batch->buckets))
3109 dsa_free(hashtable->area, batch->buckets);
3110 batch->buckets = InvalidDsaPointer;
3115 * Track the largest batch we've been attached to. Though each
3116 * backend might see a different subset of batches, explain.c will
3117 * scan the results from all backends to find the largest value.
3119 hashtable->spacePeak =
3120 Max(hashtable->spacePeak,
3121 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3123 /* Remember that we are not attached to a batch. */
3124 hashtable->curbatch = -1;
3129 * Detach from all shared resources. If we are last to detach, clean up.
3132 ExecHashTableDetach(HashJoinTable hashtable)
3134 if (hashtable->parallel_state)
3136 ParallelHashJoinState *pstate = hashtable->parallel_state;
3139 /* Make sure any temporary files are closed. */
3140 if (hashtable->batches)
3142 for (i = 0; i < hashtable->nbatch; ++i)
3144 sts_end_write(hashtable->batches[i].inner_tuples);
3145 sts_end_write(hashtable->batches[i].outer_tuples);
3146 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3147 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3151 /* If we're last to detach, clean up shared memory. */
3152 if (BarrierDetach(&pstate->build_barrier))
3154 if (DsaPointerIsValid(pstate->batches))
3156 dsa_free(hashtable->area, pstate->batches);
3157 pstate->batches = InvalidDsaPointer;
3161 hashtable->parallel_state = NULL;
3166 * Get the first tuple in a given bucket identified by number.
3168 static inline HashJoinTuple
3169 ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3171 HashJoinTuple tuple;
3174 Assert(hashtable->parallel_state);
3175 p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3176 tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3182 * Get the next tuple in the same bucket as 'tuple'.
3184 static inline HashJoinTuple
3185 ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3189 Assert(hashtable->parallel_state);
3190 next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3196 * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3199 ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3200 HashJoinTuple tuple,
3201 dsa_pointer tuple_shared)
3205 tuple->next.shared = dsa_pointer_atomic_read(head);
3206 if (dsa_pointer_atomic_compare_exchange(head,
3207 &tuple->next.shared,
3214 * Prepare to work on a given batch.
3217 ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3219 Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3221 hashtable->curbatch = batchno;
3222 hashtable->buckets.shared = (dsa_pointer_atomic *)
3223 dsa_get_address(hashtable->area,
3224 hashtable->batches[batchno].shared->buckets);
3225 hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3226 hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3227 hashtable->current_chunk = NULL;
3228 hashtable->current_chunk_shared = InvalidDsaPointer;
3229 hashtable->batches[batchno].at_least_one_chunk = false;
3233 * Take the next available chunk from the queue of chunks being worked on in
3234 * parallel. Return NULL if there are none left. Otherwise return a pointer
3235 * to the chunk, and set *shared to the DSA pointer to the chunk.
3237 static HashMemoryChunk
3238 ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3240 ParallelHashJoinState *pstate = hashtable->parallel_state;
3241 HashMemoryChunk chunk;
3243 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3244 if (DsaPointerIsValid(pstate->chunk_work_queue))
3246 *shared = pstate->chunk_work_queue;
3247 chunk = (HashMemoryChunk)
3248 dsa_get_address(hashtable->area, *shared);
3249 pstate->chunk_work_queue = chunk->next.shared;
3253 LWLockRelease(&pstate->lock);
3259 * Increase the space preallocated in this backend for a given inner batch by
3260 * at least a given amount. This allows us to track whether a given batch
3261 * would fit in memory when loaded back in. Also increase the number of
3262 * batches or buckets if required.
3264 * This maintains a running estimation of how much space will be taken when we
3265 * load the batch back into memory by simulating the way chunks will be handed
3266 * out to workers. It's not perfectly accurate because the tuples will be
3267 * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3268 * it should be pretty close. It tends to overestimate by a fraction of a
3269 * chunk per worker since all workers gang up to preallocate during hashing,
3270 * but workers tend to reload batches alone if there are enough to go around,
3271 * leaving fewer partially filled chunks. This effect is bounded by
3274 * Return false if the number of batches or buckets has changed, and the
3275 * caller should reconsider which batch a given tuple now belongs in and call
3279 ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3281 ParallelHashJoinState *pstate = hashtable->parallel_state;
3282 ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3283 size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3285 Assert(batchno > 0);
3286 Assert(batchno < hashtable->nbatch);
3287 Assert(size == MAXALIGN(size));
3289 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3291 /* Has another participant commanded us to help grow? */
3292 if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3293 pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3295 ParallelHashGrowth growth = pstate->growth;
3297 LWLockRelease(&pstate->lock);
3298 if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3299 ExecParallelHashIncreaseNumBatches(hashtable);
3300 else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3301 ExecParallelHashIncreaseNumBuckets(hashtable);
3306 if (pstate->growth != PHJ_GROWTH_DISABLED &&
3307 batch->at_least_one_chunk &&
3308 (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3309 > pstate->space_allowed))
3312 * We have determined that this batch would exceed the space budget if
3313 * loaded into memory. Command all participants to help repartition.
3315 batch->shared->space_exhausted = true;
3316 pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3317 LWLockRelease(&pstate->lock);
3322 batch->at_least_one_chunk = true;
3323 batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3324 batch->preallocated = want;
3325 LWLockRelease(&pstate->lock);