void *coordinate);
</programlisting>
Initialize the dynamic shared memory that will be required for parallel
- operation; <literal>coordinate</> points to an amount of allocated space
- equal to the return value of <function>EstimateDSMCustomScan</>.
+ operation. <literal>coordinate</> points to a shared memory area of
+ size equal to the return value of <function>EstimateDSMCustomScan</>.
This callback is optional, and need only be supplied if this custom
scan provider supports parallel execution.
</para>
<para>
<programlisting>
+void (*ReInitializeDSMCustomScan) (CustomScanState *node,
+ ParallelContext *pcxt,
+ void *coordinate);
+</programlisting>
+ Re-initialize the dynamic shared memory required for parallel operation
+ when the custom-scan plan node is about to be re-scanned.
+ This callback is optional, and need only be supplied if this custom
+ scan provider supports parallel execution.
+ Recommended practice is that this callback reset only shared state,
+ while the <function>ReScanCustomScan</> callback resets only local
+ state. Currently, this callback will be called
+ before <function>ReScanCustomScan</>, but it's best not to rely on
+ that ordering.
+ </para>
+
+ <para>
+<programlisting>
void (*InitializeWorkerCustomScan) (CustomScanState *node,
shm_toc *toc,
void *coordinate);
</programlisting>
- Initialize a parallel worker's custom state based on the shared state
- set up in the leader by <literal>InitializeDSMCustomScan</>.
- This callback is optional, and needs only be supplied if this
- custom path supports parallel execution.
+ Initialize a parallel worker's local state based on the shared state
+ set up by the leader during <function>InitializeDSMCustomScan</>.
+ This callback is optional, and need only be supplied if this custom
+ scan provider supports parallel execution.
</para>
<para>
<para>
A <structname>ForeignScan</> node can, optionally, support parallel
execution. A parallel <structname>ForeignScan</> will be executed
- in multiple processes and should return each row only once across
+ in multiple processes and must return each row exactly once across
all cooperating processes. To do this, processes can coordinate through
- fixed size chunks of dynamic shared memory. This shared memory is not
- guaranteed to be mapped at the same address in every process, so pointers
- may not be used. The following callbacks are all optional in general,
- but required if parallel execution is to be supported.
+ fixed-size chunks of dynamic shared memory. This shared memory is not
+ guaranteed to be mapped at the same address in every process, so it
+ must not contain pointers. The following functions are all optional,
+ but most are required if parallel execution is to be supported.
</para>
<para>
</para>
<para>
- If this callback is not defined, it is assumed that the scan must take
+ If this function is not defined, it is assumed that the scan must take
place within the parallel leader. Note that returning true does not mean
that the scan itself can be done in parallel, only that the scan can be
performed within a parallel worker. Therefore, it can be useful to define
Estimate the amount of dynamic shared memory that will be required
for parallel operation. This may be higher than the amount that will
actually be used, but it must not be lower. The return value is in bytes.
+ This function is optional, and can be omitted if not needed; but if it
+ is omitted, the next three functions must be omitted as well, because
+ no shared memory will be allocated for the FDW's use.
</para>
<para>
void *coordinate);
</programlisting>
Initialize the dynamic shared memory that will be required for parallel
- operation; <literal>coordinate</> points to an amount of allocated space
- equal to the return value of <function>EstimateDSMForeignScan</>.
+ operation. <literal>coordinate</> points to a shared memory area of
+ size equal to the return value of <function>EstimateDSMForeignScan</>.
+ This function is optional, and can be omitted if not needed.
+ </para>
+
+ <para>
+<programlisting>
+void
+ReInitializeDSMForeignScan(ForeignScanState *node, ParallelContext *pcxt,
+ void *coordinate);
+</programlisting>
+ Re-initialize the dynamic shared memory required for parallel operation
+ when the foreign-scan plan node is about to be re-scanned.
+ This function is optional, and can be omitted if not needed.
+ Recommended practice is that this function reset only shared state,
+ while the <function>ReScanForeignScan</> function resets only local
+ state. Currently, this function will be called
+ before <function>ReScanForeignScan</>, but it's best not to rely on
+ that ordering.
</para>
<para>
InitializeWorkerForeignScan(ForeignScanState *node, shm_toc *toc,
void *coordinate);
</programlisting>
- Initialize a parallel worker's custom state based on the shared state
- set up in the leader by <literal>InitializeDSMForeignScan</>.
- This callback is optional, and needs only be supplied if this
- custom path supports parallel execution.
+ Initialize a parallel worker's local state based on the shared state
+ set up by the leader during <function>InitializeDSMForeignScan</>.
+ This function is optional, and can be omitted if not needed.
</para>
<para>
* reinitialize scan descriptor
*/
initscan(scan, key, true);
-
- /*
- * reset parallel scan, if present
- */
- if (scan->rs_parallel != NULL)
- {
- ParallelHeapScanDesc parallel_scan;
-
- /*
- * Caller is responsible for making sure that all workers have
- * finished the scan before calling this, so it really shouldn't be
- * necessary to acquire the mutex at all. We acquire it anyway, just
- * to be tidy.
- */
- parallel_scan = scan->rs_parallel;
- SpinLockAcquire(¶llel_scan->phs_mutex);
- parallel_scan->phs_cblock = parallel_scan->phs_startblock;
- SpinLockRelease(¶llel_scan->phs_mutex);
- }
}
/* ----------------
SerializeSnapshot(snapshot, target->phs_snapshot_data);
}
+/* ----------------
+ * heap_parallelscan_reinitialize - reset a parallel scan
+ *
+ * Call this in the leader process. Caller is responsible for
+ * making sure that all workers have finished the scan beforehand.
+ * ----------------
+ */
+void
+heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan)
+{
+ /*
+ * It shouldn't be necessary to acquire the mutex here, but we do it
+ * anyway, just to be tidy.
+ */
+ SpinLockAcquire(¶llel_scan->phs_mutex);
+ parallel_scan->phs_cblock = parallel_scan->phs_startblock;
+ SpinLockRelease(¶llel_scan->phs_mutex);
+}
+
/* ----------------
* heap_beginscan_parallel - join a parallel scan
*
ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize);
+static bool ExecParallelReInitializeDSM(PlanState *planstate,
+ ParallelContext *pcxt);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation);
return responseq;
}
-/*
- * Re-initialize the parallel executor info such that it can be reused by
- * workers.
- */
-void
-ExecParallelReinitialize(ParallelExecutorInfo *pei)
-{
- ReinitializeParallelDSM(pei->pcxt);
- pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
- pei->finished = false;
-}
-
/*
* Sets up the required infrastructure for backend workers to perform
* execution and return results to the main backend.
ExecParallelInitializeDSM(planstate, &d);
/*
- * Make sure that the world hasn't shifted under our feat. This could
+ * Make sure that the world hasn't shifted under our feet. This could
* probably just be an Assert(), but let's be conservative for now.
*/
if (e.nnodes != d.nnodes)
return pei;
}
+/*
+ * Re-initialize the parallel executor shared memory state before launching
+ * a fresh batch of workers.
+ */
+void
+ExecParallelReinitialize(PlanState *planstate,
+ ParallelExecutorInfo *pei)
+{
+ /* Old workers must already be shut down */
+ Assert(pei->finished);
+
+ ReinitializeParallelDSM(pei->pcxt);
+ pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
+ pei->finished = false;
+
+ /* Traverse plan tree and let each child node reset associated state. */
+ ExecParallelReInitializeDSM(planstate, pei->pcxt);
+}
+
+/*
+ * Traverse plan tree to reinitialize per-node dynamic shared memory state
+ */
+static bool
+ExecParallelReInitializeDSM(PlanState *planstate,
+ ParallelContext *pcxt)
+{
+ if (planstate == NULL)
+ return false;
+
+ /*
+ * Call reinitializers for DSM-using plan nodes.
+ */
+ if (planstate->plan->parallel_aware)
+ {
+ switch (nodeTag(planstate))
+ {
+ case T_SeqScanState:
+ ExecSeqScanReInitializeDSM((SeqScanState *) planstate,
+ pcxt);
+ break;
+ case T_IndexScanState:
+ ExecIndexScanReInitializeDSM((IndexScanState *) planstate,
+ pcxt);
+ break;
+ case T_IndexOnlyScanState:
+ ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate,
+ pcxt);
+ break;
+ case T_ForeignScanState:
+ ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
+ pcxt);
+ break;
+ case T_CustomScanState:
+ ExecCustomScanReInitializeDSM((CustomScanState *) planstate,
+ pcxt);
+ break;
+ case T_BitmapHeapScanState:
+ ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate,
+ pcxt);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
+}
+
/*
* Copy instrumentation information about this node and its descendants from
* dynamic shared memory.
node->shared_tbmiterator = NULL;
node->shared_prefetch_iterator = NULL;
- /* Reset parallel bitmap state, if present */
- if (node->pstate)
- {
- dsa_area *dsa = node->ss.ps.state->es_query_dsa;
-
- node->pstate->state = BM_INITIAL;
-
- if (DsaPointerIsValid(node->pstate->tbmiterator))
- tbm_free_shared_area(dsa, node->pstate->tbmiterator);
-
- if (DsaPointerIsValid(node->pstate->prefetch_iterator))
- tbm_free_shared_area(dsa, node->pstate->prefetch_iterator);
-
- node->pstate->tbmiterator = InvalidDsaPointer;
- node->pstate->prefetch_iterator = InvalidDsaPointer;
- }
-
ExecScanReScan(&node->ss);
/*
node->pstate = pstate;
}
+/* ----------------------------------------------------------------
+ * ExecBitmapHeapReInitializeDSM
+ *
+ * Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node,
+ ParallelContext *pcxt)
+{
+ ParallelBitmapHeapState *pstate = node->pstate;
+ dsa_area *dsa = node->ss.ps.state->es_query_dsa;
+
+ pstate->state = BM_INITIAL;
+
+ if (DsaPointerIsValid(pstate->tbmiterator))
+ tbm_free_shared_area(dsa, pstate->tbmiterator);
+
+ if (DsaPointerIsValid(pstate->prefetch_iterator))
+ tbm_free_shared_area(dsa, pstate->prefetch_iterator);
+
+ pstate->tbmiterator = InvalidDsaPointer;
+ pstate->prefetch_iterator = InvalidDsaPointer;
+}
+
/* ----------------------------------------------------------------
* ExecBitmapHeapInitializeWorker
*
}
}
+void
+ExecCustomScanReInitializeDSM(CustomScanState *node, ParallelContext *pcxt)
+{
+ const CustomExecMethods *methods = node->methods;
+
+ if (methods->ReInitializeDSMCustomScan)
+ {
+ int plan_node_id = node->ss.ps.plan->plan_node_id;
+ void *coordinate;
+
+ coordinate = shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ methods->ReInitializeDSMCustomScan(node, pcxt, coordinate);
+ }
+}
+
void
ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc)
{
}
/* ----------------------------------------------------------------
- * ExecForeignScanInitializeDSM
+ * ExecForeignScanReInitializeDSM
+ *
+ * Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecForeignScanReInitializeDSM(ForeignScanState *node, ParallelContext *pcxt)
+{
+ FdwRoutine *fdwroutine = node->fdwroutine;
+
+ if (fdwroutine->ReInitializeDSMForeignScan)
+ {
+ int plan_node_id = node->ss.ps.plan->plan_node_id;
+ void *coordinate;
+
+ coordinate = shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ fdwroutine->ReInitializeDSMForeignScan(node, pcxt, coordinate);
+ }
+}
+
+/* ----------------------------------------------------------------
+ * ExecForeignScanInitializeWorker
*
* Initialization according to the parallel coordination information
* ----------------------------------------------------------------
{
ParallelContext *pcxt;
- /* Initialize the workers required to execute Gather node. */
+ /* Initialize, or re-initialize, shared state needed by workers. */
if (!node->pei)
node->pei = ExecInitParallelPlan(node->ps.lefttree,
estate,
gather->num_workers);
+ else
+ ExecParallelReinitialize(node->ps.lefttree,
+ node->pei);
/*
* Register backend workers. We might not get as many as we
/* ----------------------------------------------------------------
* ExecReScanGather
*
- * Re-initialize the workers and rescans a relation via them.
+ * Prepare to re-scan the result of a Gather.
* ----------------------------------------------------------------
*/
void
Gather *gather = (Gather *) node->ps.plan;
PlanState *outerPlan = outerPlanState(node);
- /*
- * Re-initialize the parallel workers to perform rescan of relation. We
- * want to gracefully shutdown all the workers so that they should be able
- * to propagate any error or other information to master backend before
- * dying. Parallel context will be reused for rescan.
- */
+ /* Make sure any existing workers are gracefully shut down */
ExecShutdownGatherWorkers(node);
+ /* Mark node so that shared state will be rebuilt at next call */
node->initialized = false;
- if (node->pei)
- ExecParallelReinitialize(node->pei);
-
/*
* Set child node's chgParam to tell it that the next scan might deliver a
* different set of rows within the leader process. (The overall rowset
outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
gather->rescan_param);
-
/*
- * if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode.
+ * If chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode. Note: because this does nothing if we have a
+ * rescan_param, it's currently guaranteed that parallel-aware child nodes
+ * will not see a ReScan call until after they get a ReInitializeDSM call.
+ * That ordering might not be something to rely on, though. A good rule
+ * of thumb is that ReInitializeDSM should reset only shared state, ReScan
+ * should reset only local state, and anything that depends on both of
+ * those steps being finished must wait until the first ExecProcNode call.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
{
ParallelContext *pcxt;
- /* Initialize data structures for workers. */
+ /* Initialize, or re-initialize, shared state needed by workers. */
if (!node->pei)
node->pei = ExecInitParallelPlan(node->ps.lefttree,
estate,
gm->num_workers);
+ else
+ ExecParallelReinitialize(node->ps.lefttree,
+ node->pei);
/* Try to launch workers. */
pcxt = node->pei->pcxt;
/* ----------------------------------------------------------------
* ExecReScanGatherMerge
*
- * Re-initialize the workers and rescans a relation via them.
+ * Prepare to re-scan the result of a GatherMerge.
* ----------------------------------------------------------------
*/
void
GatherMerge *gm = (GatherMerge *) node->ps.plan;
PlanState *outerPlan = outerPlanState(node);
- /*
- * Re-initialize the parallel workers to perform rescan of relation. We
- * want to gracefully shutdown all the workers so that they should be able
- * to propagate any error or other information to master backend before
- * dying. Parallel context will be reused for rescan.
- */
+ /* Make sure any existing workers are gracefully shut down */
ExecShutdownGatherMergeWorkers(node);
+ /* Mark node so that shared state will be rebuilt at next call */
node->initialized = false;
node->gm_initialized = false;
- if (node->pei)
- ExecParallelReinitialize(node->pei);
-
/*
* Set child node's chgParam to tell it that the next scan might deliver a
* different set of rows within the leader process. (The overall rowset
outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
gm->rescan_param);
-
/*
- * if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode.
+ * If chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode. Note: because this does nothing if we have a
+ * rescan_param, it's currently guaranteed that parallel-aware child nodes
+ * will not see a ReScan call until after they get a ReInitializeDSM call.
+ * That ordering might not be something to rely on, though. A good rule
+ * of thumb is that ReInitializeDSM should reset only shared state, ReScan
+ * should reset only local state, and anything that depends on both of
+ * those steps being finished must wait until the first ExecProcNode call.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
* parallel index-only scan
* ExecIndexOnlyScanInitializeDSM initialize DSM for parallel
* index-only scan
+ * ExecIndexOnlyScanReInitializeDSM reinitialize DSM for fresh scan
* ExecIndexOnlyScanInitializeWorker attach to DSM info in parallel worker
*/
#include "postgres.h"
void
ExecReScanIndexOnlyScan(IndexOnlyScanState *node)
{
- bool reset_parallel_scan = true;
-
- /*
- * If we are here to just update the scan keys, then don't reset parallel
- * scan. For detailed reason behind this look in the comments for
- * ExecReScanIndexScan.
- */
- if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady)
- reset_parallel_scan = false;
-
/*
* If we are doing runtime key calculations (ie, any of the index key
* values weren't simple Consts), compute the new key values. But first,
/* reset index scan */
if (node->ioss_ScanDesc)
- {
-
index_rescan(node->ioss_ScanDesc,
node->ioss_ScanKeys, node->ioss_NumScanKeys,
node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
- if (reset_parallel_scan && node->ioss_ScanDesc->parallel_scan)
- index_parallelrescan(node->ioss_ScanDesc);
- }
ExecScanReScan(&node->ss);
}
node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
}
+/* ----------------------------------------------------------------
+ * ExecIndexOnlyScanReInitializeDSM
+ *
+ * Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node,
+ ParallelContext *pcxt)
+{
+ index_parallelrescan(node->ioss_ScanDesc);
+}
+
/* ----------------------------------------------------------------
* ExecIndexOnlyScanInitializeWorker
*
* ExecIndexRestrPos restores scan position.
* ExecIndexScanEstimate estimates DSM space needed for parallel index scan
* ExecIndexScanInitializeDSM initialize DSM for parallel indexscan
+ * ExecIndexScanReInitializeDSM reinitialize DSM for fresh scan
* ExecIndexScanInitializeWorker attach to DSM info in parallel worker
*/
#include "postgres.h"
void
ExecReScanIndexScan(IndexScanState *node)
{
- bool reset_parallel_scan = true;
-
- /*
- * If we are here to just update the scan keys, then don't reset parallel
- * scan. We don't want each of the participating process in the parallel
- * scan to update the shared parallel scan state at the start of the scan.
- * It is quite possible that one of the participants has already begun
- * scanning the index when another has yet to start it.
- */
- if (node->iss_NumRuntimeKeys != 0 && !node->iss_RuntimeKeysReady)
- reset_parallel_scan = false;
-
/*
* If we are doing runtime key calculations (ie, any of the index key
* values weren't simple Consts), compute the new key values. But first,
reorderqueue_pop(node);
}
- /*
- * Reset (parallel) index scan. For parallel-aware nodes, the scan
- * descriptor is initialized during actual execution of node and we can
- * reach here before that (ex. during execution of nest loop join). So,
- * avoid updating the scan descriptor at that time.
- */
+ /* reset index scan */
if (node->iss_ScanDesc)
- {
index_rescan(node->iss_ScanDesc,
node->iss_ScanKeys, node->iss_NumScanKeys,
node->iss_OrderByKeys, node->iss_NumOrderByKeys);
-
- if (reset_parallel_scan && node->iss_ScanDesc->parallel_scan)
- index_parallelrescan(node->iss_ScanDesc);
- }
node->iss_ReachedEnd = false;
ExecScanReScan(&node->ss);
node->iss_OrderByKeys, node->iss_NumOrderByKeys);
}
+/* ----------------------------------------------------------------
+ * ExecIndexScanReInitializeDSM
+ *
+ * Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexScanReInitializeDSM(IndexScanState *node,
+ ParallelContext *pcxt)
+{
+ index_parallelrescan(node->iss_ScanDesc);
+}
+
/* ----------------------------------------------------------------
* ExecIndexScanInitializeWorker
*
*
* ExecSeqScanEstimate estimates DSM space needed for parallel scan
* ExecSeqScanInitializeDSM initialize DSM for parallel scan
+ * ExecSeqScanReInitializeDSM reinitialize DSM for fresh parallel scan
* ExecSeqScanInitializeWorker attach to DSM info in parallel worker
*/
#include "postgres.h"
heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
}
+/* ----------------------------------------------------------------
+ * ExecSeqScanReInitializeDSM
+ *
+ * Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecSeqScanReInitializeDSM(SeqScanState *node,
+ ParallelContext *pcxt)
+{
+ HeapScanDesc scan = node->ss.ss_currentScanDesc;
+
+ heap_parallelscan_reinitialize(scan->rs_parallel);
+}
+
/* ----------------------------------------------------------------
* ExecSeqScanInitializeWorker
*
extern Size heap_parallelscan_estimate(Snapshot snapshot);
extern void heap_parallelscan_initialize(ParallelHeapScanDesc target,
Relation relation, Snapshot snapshot);
+extern void heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan);
extern HeapScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc);
extern bool heap_fetch(Relation relation, Snapshot snapshot,
EState *estate, int nworkers);
extern void ExecParallelFinish(ParallelExecutorInfo *pei);
extern void ExecParallelCleanup(ParallelExecutorInfo *pei);
-extern void ExecParallelReinitialize(ParallelExecutorInfo *pei);
+extern void ExecParallelReinitialize(PlanState *planstate,
+ ParallelExecutorInfo *pei);
extern void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
ParallelContext *pcxt);
extern void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node,
ParallelContext *pcxt);
+extern void ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node,
+ ParallelContext *pcxt);
extern void ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node,
shm_toc *toc);
ParallelContext *pcxt);
extern void ExecCustomScanInitializeDSM(CustomScanState *node,
ParallelContext *pcxt);
+extern void ExecCustomScanReInitializeDSM(CustomScanState *node,
+ ParallelContext *pcxt);
extern void ExecCustomScanInitializeWorker(CustomScanState *node,
shm_toc *toc);
extern void ExecShutdownCustomScan(CustomScanState *node);
ParallelContext *pcxt);
extern void ExecForeignScanInitializeDSM(ForeignScanState *node,
ParallelContext *pcxt);
+extern void ExecForeignScanReInitializeDSM(ForeignScanState *node,
+ ParallelContext *pcxt);
extern void ExecForeignScanInitializeWorker(ForeignScanState *node,
shm_toc *toc);
extern void ExecShutdownForeignScan(ForeignScanState *node);
ParallelContext *pcxt);
extern void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
ParallelContext *pcxt);
+extern void ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node,
+ ParallelContext *pcxt);
extern void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
shm_toc *toc);
extern void ExecReScanIndexScan(IndexScanState *node);
extern void ExecIndexScanEstimate(IndexScanState *node, ParallelContext *pcxt);
extern void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt);
+extern void ExecIndexScanReInitializeDSM(IndexScanState *node, ParallelContext *pcxt);
extern void ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc);
/*
/* parallel scan support */
extern void ExecSeqScanEstimate(SeqScanState *node, ParallelContext *pcxt);
extern void ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt);
+extern void ExecSeqScanReInitializeDSM(SeqScanState *node, ParallelContext *pcxt);
extern void ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc);
#endif /* NODESEQSCAN_H */
typedef void (*InitializeDSMForeignScan_function) (ForeignScanState *node,
ParallelContext *pcxt,
void *coordinate);
+typedef void (*ReInitializeDSMForeignScan_function) (ForeignScanState *node,
+ ParallelContext *pcxt,
+ void *coordinate);
typedef void (*InitializeWorkerForeignScan_function) (ForeignScanState *node,
shm_toc *toc,
void *coordinate);
IsForeignScanParallelSafe_function IsForeignScanParallelSafe;
EstimateDSMForeignScan_function EstimateDSMForeignScan;
InitializeDSMForeignScan_function InitializeDSMForeignScan;
+ ReInitializeDSMForeignScan_function ReInitializeDSMForeignScan;
InitializeWorkerForeignScan_function InitializeWorkerForeignScan;
ShutdownForeignScan_function ShutdownForeignScan;
} FdwRoutine;
void (*InitializeDSMCustomScan) (CustomScanState *node,
ParallelContext *pcxt,
void *coordinate);
+ void (*ReInitializeDSMCustomScan) (CustomScanState *node,
+ ParallelContext *pcxt,
+ void *coordinate);
void (*InitializeWorkerCustomScan) (CustomScanState *node,
shm_toc *toc,
void *coordinate);