void
blcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *qinfos;
*indexTotalCost = costs.indexTotalCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
+ *indexPages = costs.numIndexPages;
}
extern void blcostestimate(PlannerInfo *root, IndexPath *path,
double loop_count, Cost *indexStartupCost,
Cost *indexTotalCost, Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation, double *indexPages);
#endif
amroutine->amstorage = false;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = blbuild;
bool amclusterable;
/* does AM handle predicate locks? */
bool ampredlocks;
+ /* does AM support parallel scan? */
+ bool amcanparallel;
/* type of data stored in index, or InvalidOid if variable */
Oid amkeytype;
amroutine->amstorage = true;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = brinbuild;
amroutine->amstorage = true;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = ginbuild;
amroutine->amstorage = true;
amroutine->amclusterable = true;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = gistbuild;
amroutine->amstorage = false;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = INT4OID;
amroutine->ambuild = hashbuild;
amroutine->amstorage = false;
amroutine->amclusterable = true;
amroutine->ampredlocks = true;
+ amroutine->amcanparallel = true;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = btbuild;
amroutine->amstorage = false;
amroutine->amclusterable = false;
amroutine->ampredlocks = false;
+ amroutine->amcanparallel = false;
amroutine->amkeytype = InvalidOid;
amroutine->ambuild = spgbuild;
#include "executor/nodeCustom.h"
#include "executor/nodeForeignscan.h"
#include "executor/nodeSeqscan.h"
+#include "executor/nodeIndexscan.h"
#include "executor/tqueue.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/planmain.h"
ExecSeqScanEstimate((SeqScanState *) planstate,
e->pcxt);
break;
+ case T_IndexScanState:
+ ExecIndexScanEstimate((IndexScanState *) planstate,
+ e->pcxt);
+ break;
case T_ForeignScanState:
ExecForeignScanEstimate((ForeignScanState *) planstate,
e->pcxt);
ExecSeqScanInitializeDSM((SeqScanState *) planstate,
d->pcxt);
break;
+ case T_IndexScanState:
+ ExecIndexScanInitializeDSM((IndexScanState *) planstate,
+ d->pcxt);
+ break;
case T_ForeignScanState:
ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
d->pcxt);
case T_SeqScanState:
ExecSeqScanInitializeWorker((SeqScanState *) planstate, toc);
break;
+ case T_IndexScanState:
+ ExecIndexScanInitializeWorker((IndexScanState *) planstate, toc);
+ break;
case T_ForeignScanState:
ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
toc);
* ExecEndIndexScan releases all storage.
* ExecIndexMarkPos marks scan position.
* ExecIndexRestrPos restores scan position.
+ * ExecIndexScanEstimate estimates DSM space needed for parallel index scan
+ * ExecIndexScanInitializeDSM initialize DSM for parallel indexscan
+ * ExecIndexScanInitializeWorker attach to DSM info in parallel worker
*/
#include "postgres.h"
void
ExecReScanIndexScan(IndexScanState *node)
{
+ bool reset_parallel_scan = true;
+
+ /*
+ * If we are here to just update the scan keys, then don't reset parallel
+ * scan. We don't want each of the participating process in the parallel
+ * scan to update the shared parallel scan state at the start of the scan.
+ * It is quite possible that one of the participants has already begun
+ * scanning the index when another has yet to start it.
+ */
+ if (node->iss_NumRuntimeKeys != 0 && !node->iss_RuntimeKeysReady)
+ reset_parallel_scan = false;
+
/*
* If we are doing runtime key calculations (ie, any of the index key
* values weren't simple Consts), compute the new key values. But first,
reorderqueue_pop(node);
}
- /* reset index scan */
- index_rescan(node->iss_ScanDesc,
- node->iss_ScanKeys, node->iss_NumScanKeys,
- node->iss_OrderByKeys, node->iss_NumOrderByKeys);
+ /*
+ * Reset (parallel) index scan. For parallel-aware nodes, the scan
+ * descriptor is initialized during actual execution of node and we can
+ * reach here before that (ex. during execution of nest loop join). So,
+ * avoid updating the scan descriptor at that time.
+ */
+ if (node->iss_ScanDesc)
+ {
+ index_rescan(node->iss_ScanDesc,
+ node->iss_ScanKeys, node->iss_NumScanKeys,
+ node->iss_OrderByKeys, node->iss_NumOrderByKeys);
+
+ if (reset_parallel_scan && node->iss_ScanDesc->parallel_scan)
+ index_parallelrescan(node->iss_ScanDesc);
+ }
node->iss_ReachedEnd = false;
ExecScanReScan(&node->ss);
}
/*
- * Initialize scan descriptor.
+ * for parallel-aware node, we initialize the scan descriptor after
+ * initializing the shared memory for parallel execution.
*/
- indexstate->iss_ScanDesc = index_beginscan(currentRelation,
- indexstate->iss_RelationDesc,
- estate->es_snapshot,
- indexstate->iss_NumScanKeys,
+ if (!node->scan.plan.parallel_aware)
+ {
+ /*
+ * Initialize scan descriptor.
+ */
+ indexstate->iss_ScanDesc = index_beginscan(currentRelation,
+ indexstate->iss_RelationDesc,
+ estate->es_snapshot,
+ indexstate->iss_NumScanKeys,
indexstate->iss_NumOrderByKeys);
- /*
- * If no run-time keys to calculate, go ahead and pass the scankeys to the
- * index AM.
- */
- if (indexstate->iss_NumRuntimeKeys == 0)
- index_rescan(indexstate->iss_ScanDesc,
- indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys,
+ /*
+ * If no run-time keys to calculate, go ahead and pass the scankeys to
+ * the index AM.
+ */
+ if (indexstate->iss_NumRuntimeKeys == 0)
+ index_rescan(indexstate->iss_ScanDesc,
+ indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys,
indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys);
+ }
/*
* all done.
else if (n_array_keys != 0)
elog(ERROR, "ScalarArrayOpExpr index qual found where not allowed");
}
+
+/* ----------------------------------------------------------------
+ * Parallel Scan Support
+ * ----------------------------------------------------------------
+ */
+
+/* ----------------------------------------------------------------
+ * ExecIndexScanEstimate
+ *
+ * estimates the space required to serialize indexscan node.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexScanEstimate(IndexScanState *node,
+ ParallelContext *pcxt)
+{
+ EState *estate = node->ss.ps.state;
+
+ node->iss_PscanLen = index_parallelscan_estimate(node->iss_RelationDesc,
+ estate->es_snapshot);
+ shm_toc_estimate_chunk(&pcxt->estimator, node->iss_PscanLen);
+ shm_toc_estimate_keys(&pcxt->estimator, 1);
+}
+
+/* ----------------------------------------------------------------
+ * ExecIndexScanInitializeDSM
+ *
+ * Set up a parallel index scan descriptor.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexScanInitializeDSM(IndexScanState *node,
+ ParallelContext *pcxt)
+{
+ EState *estate = node->ss.ps.state;
+ ParallelIndexScanDesc piscan;
+
+ piscan = shm_toc_allocate(pcxt->toc, node->iss_PscanLen);
+ index_parallelscan_initialize(node->ss.ss_currentRelation,
+ node->iss_RelationDesc,
+ estate->es_snapshot,
+ piscan);
+ shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan);
+ node->iss_ScanDesc =
+ index_beginscan_parallel(node->ss.ss_currentRelation,
+ node->iss_RelationDesc,
+ node->iss_NumScanKeys,
+ node->iss_NumOrderByKeys,
+ piscan);
+
+ /*
+ * If no run-time keys to calculate, go ahead and pass the scankeys to the
+ * index AM.
+ */
+ if (node->iss_NumRuntimeKeys == 0)
+ index_rescan(node->iss_ScanDesc,
+ node->iss_ScanKeys, node->iss_NumScanKeys,
+ node->iss_OrderByKeys, node->iss_NumOrderByKeys);
+}
+
+/* ----------------------------------------------------------------
+ * ExecIndexScanInitializeWorker
+ *
+ * Copy relevant information from TOC into planstate.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc)
+{
+ ParallelIndexScanDesc piscan;
+
+ piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ node->iss_ScanDesc =
+ index_beginscan_parallel(node->ss.ss_currentRelation,
+ node->iss_RelationDesc,
+ node->iss_NumScanKeys,
+ node->iss_NumOrderByKeys,
+ piscan);
+
+ /*
+ * If no run-time keys to calculate, go ahead and pass the scankeys to the
+ * index AM.
+ */
+ if (node->iss_NumRuntimeKeys == 0)
+ index_rescan(node->iss_ScanDesc,
+ node->iss_ScanKeys, node->iss_NumScanKeys,
+ node->iss_OrderByKeys, node->iss_NumOrderByKeys);
+}
static void recurse_push_qual(Node *setOp, Query *topquery,
RangeTblEntry *rte, Index rti, Node *qual);
static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel);
-static int compute_parallel_worker(RelOptInfo *rel, BlockNumber heap_pages,
- BlockNumber index_pages);
/*
* "heap_pages" is the number of pages from the table that we expect to scan.
* "index_pages" is the number of pages from the index that we expect to scan.
*/
-static int
+int
compute_parallel_worker(RelOptInfo *rel, BlockNumber heap_pages,
BlockNumber index_pages)
{
* we have to fetch from the table, so they don't reduce the scan cost.
*/
void
-cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
+cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
+ bool partial_path)
{
IndexOptInfo *index = path->indexinfo;
RelOptInfo *baserel = index->rel;
List *qpquals;
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost cpu_run_cost = 0;
Cost indexStartupCost;
Cost indexTotalCost;
Selectivity indexSelectivity;
Cost cpu_per_tuple;
double tuples_fetched;
double pages_fetched;
+ double rand_heap_pages;
+ double index_pages;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo) &&
amcostestimate = (amcostestimate_function) index->amcostestimate;
amcostestimate(root, path, loop_count,
&indexStartupCost, &indexTotalCost,
- &indexSelectivity, &indexCorrelation);
+ &indexSelectivity, &indexCorrelation,
+ &index_pages);
/*
* Save amcostestimate's results for possible use in bitmap scan planning.
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
/*
if (indexonly)
pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
+ rand_heap_pages = pages_fetched;
+
/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
max_IO_cost = pages_fetched * spc_random_page_cost;
min_IO_cost = 0;
}
+ if (partial_path)
+ {
+ /*
+ * Estimate the number of parallel workers required to scan index. Use
+ * the number of heap pages computed considering heap fetches won't be
+ * sequential as for parallel scans the pages are accessed in random
+ * order.
+ */
+ path->path.parallel_workers = compute_parallel_worker(baserel,
+ (BlockNumber) rand_heap_pages,
+ (BlockNumber) index_pages);
+
+ /*
+ * Fall out if workers can't be assigned for parallel scan, because in
+ * such a case this path will be rejected. So there is no benefit in
+ * doing extra computation.
+ */
+ if (path->path.parallel_workers <= 0)
+ return;
+
+ path->path.parallel_aware = true;
+ }
+
/*
* Now interpolate based on estimated index order correlation to get total
* disk I/O cost for main table accesses.
startup_cost += qpqual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
- run_cost += cpu_per_tuple * tuples_fetched;
+ cpu_run_cost += cpu_per_tuple * tuples_fetched;
/* tlist eval costs are paid per output row, not per tuple scanned */
startup_cost += path->path.pathtarget->cost.startup;
- run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+ cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
+
+ /* Adjust costing for parallelism, if used. */
+ if (path->path.parallel_workers > 0)
+ {
+ double parallel_divisor = get_parallel_divisor(&path->path);
+
+ path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
+
+ /* The CPU cost is divided among all the workers. */
+ cpu_run_cost /= parallel_divisor;
+ }
+
+ run_cost += cpu_run_cost;
path->path.startup_cost = startup_cost;
path->path.total_cost = startup_cost + run_cost;
/*
* build_index_paths
* Given an index and a set of index clauses for it, construct zero
- * or more IndexPaths.
+ * or more IndexPaths. It also constructs zero or more partial IndexPaths.
*
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
NoMovementScanDirection,
index_only_scan,
outer_relids,
- loop_count);
+ loop_count,
+ false);
result = lappend(result, ipath);
+
+ /*
+ * If appropriate, consider parallel index scan. We don't allow
+ * parallel index scan for bitmap or index only scans.
+ */
+ if (index->amcanparallel && !index_only_scan &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ clause_columns,
+ orderbyclauses,
+ orderbyclausecols,
+ useful_pathkeys,
+ index_is_ordered ?
+ ForwardScanDirection :
+ NoMovementScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth
+ * using parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
}
/*
BackwardScanDirection,
index_only_scan,
outer_relids,
- loop_count);
+ loop_count,
+ false);
result = lappend(result, ipath);
+
+ /* If appropriate, consider parallel index scan */
+ if (index->amcanparallel && !index_only_scan &&
+ rel->consider_parallel && outer_relids == NULL &&
+ scantype != ST_BITMAPSCAN)
+ {
+ ipath = create_index_path(root, index,
+ index_clauses,
+ clause_columns,
+ NIL,
+ NIL,
+ useful_pathkeys,
+ BackwardScanDirection,
+ index_only_scan,
+ outer_relids,
+ loop_count,
+ true);
+
+ /*
+ * if, after costing the path, we find that it's not worth
+ * using parallel workers, just free it.
+ */
+ if (ipath->path.parallel_workers > 0)
+ add_partial_path(rel, (Path *) ipath);
+ else
+ pfree(ipath);
+ }
}
}
indexScanPath = create_index_path(root, indexInfo,
NIL, NIL, NIL, NIL, NIL,
ForwardScanDirection, false,
- NULL, 1.0);
+ NULL, 1.0, false);
return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
}
* As with add_path, we pfree paths that are found to be dominated by
* another partial path; this requires that there be no other references to
* such paths yet. Hence, GatherPaths must not be created for a rel until
- * we're done creating all partial paths for it. We do not currently build
- * partial indexscan paths, so there is no need for an exception for
- * IndexPaths here; for safety, we instead Assert that a path to be freed
- * isn't an IndexPath.
+ * we're done creating all partial paths for it. Unlike add_path, we don't
+ * take an exception for IndexPaths as partial index paths won't be
+ * referenced by partial BitmapHeapPaths.
*/
void
add_partial_path(RelOptInfo *parent_rel, Path *new_path)
{
parent_rel->partial_pathlist =
list_delete_cell(parent_rel->partial_pathlist, p1, p1_prev);
- /* we should not see IndexPaths here, so always safe to delete */
- Assert(!IsA(old_path, IndexPath));
pfree(old_path);
/* p1_prev does not advance */
}
}
else
{
- /* we should not see IndexPaths here, so always safe to delete */
- Assert(!IsA(new_path, IndexPath));
/* Reject and recycle the new path */
pfree(new_path);
}
* 'required_outer' is the set of outer relids for a parameterized path.
* 'loop_count' is the number of repetitions of the indexscan to factor into
* estimates of caching behavior.
+ * 'partial_path' is true if constructing a parallel index scan path.
*
* Returns the new path node.
*/
ScanDirection indexscandir,
bool indexonly,
Relids required_outer,
- double loop_count)
+ double loop_count,
+ bool partial_path)
{
IndexPath *pathnode = makeNode(IndexPath);
RelOptInfo *rel = index->rel;
pathnode->indexorderbycols = indexorderbycols;
pathnode->indexscandir = indexscandir;
- cost_index(pathnode, root, loop_count);
+ cost_index(pathnode, root, loop_count, partial_path);
return pathnode;
}
memcpy(newpath, ipath, sizeof(IndexPath));
newpath->path.param_info =
get_baserel_parampathinfo(root, rel, required_outer);
- cost_index(newpath, root, loop_count);
+ cost_index(newpath, root, loop_count, false);
return (Path *) newpath;
}
case T_BitmapHeapScan:
info->amoptionalkey = amroutine->amoptionalkey;
info->amsearcharray = amroutine->amsearcharray;
info->amsearchnulls = amroutine->amsearchnulls;
+ info->amcanparallel = amroutine->amcanparallel;
info->amhasgettuple = (amroutine->amgettuple != NULL);
info->amhasgetbitmap = (amroutine->amgetbitmap != NULL);
info->amcostestimate = amroutine->amcostestimate;
void
btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *qinfos;
*indexTotalCost = costs.indexTotalCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
+ *indexPages = costs.numIndexPages;
}
void
hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
List *qinfos;
GenericCosts costs;
*indexTotalCost = costs.indexTotalCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
+ *indexPages = costs.numIndexPages;
}
void
gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *qinfos;
*indexTotalCost = costs.indexTotalCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
+ *indexPages = costs.numIndexPages;
}
void
spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *qinfos;
*indexTotalCost = costs.indexTotalCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
+ *indexPages = costs.numIndexPages;
}
void
gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *indexQuals = path->indexquals;
*indexStartupCost += qual_arg_cost;
*indexTotalCost += qual_arg_cost;
*indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
+ *indexPages = dataPagesFetched;
}
/*
void
brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
- Selectivity *indexSelectivity, double *indexCorrelation)
+ Selectivity *indexSelectivity, double *indexCorrelation,
+ double *indexPages)
{
IndexOptInfo *index = path->indexinfo;
List *indexQuals = path->indexquals;
*indexStartupCost += qual_arg_cost;
*indexTotalCost += qual_arg_cost;
*indexTotalCost += (numTuples * *indexSelectivity) * (cpu_index_tuple_cost + qual_op_cost);
+ *indexPages = index->pages;
/* XXX what about pages_per_range? */
}
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
/* parse index reloptions */
typedef bytea *(*amoptions_function) (Datum reloptions,
bool amclusterable;
/* does AM handle predicate locks? */
bool ampredlocks;
+ /* does AM support parallel scan? */
+ bool amcanparallel;
/* type of data stored in index, or InvalidOid if variable */
Oid amkeytype;
#ifndef NODEINDEXSCAN_H
#define NODEINDEXSCAN_H
+#include "access/parallel.h"
#include "nodes/execnodes.h"
extern IndexScanState *ExecInitIndexScan(IndexScan *node, EState *estate, int eflags);
extern void ExecIndexMarkPos(IndexScanState *node);
extern void ExecIndexRestrPos(IndexScanState *node);
extern void ExecReScanIndexScan(IndexScanState *node);
+extern void ExecIndexScanEstimate(IndexScanState *node, ParallelContext *pcxt);
+extern void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt);
+extern void ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc);
/*
* These routines are exported to share code with nodeIndexonlyscan.c and
* SortSupport for reordering ORDER BY exprs
* OrderByTypByVals is the datatype of order by expression pass-by-value?
* OrderByTypLens typlens of the datatypes of order by expressions
+ * pscan_len size of parallel index scan descriptor
* ----------------
*/
typedef struct IndexScanState
SortSupport iss_SortSupport;
bool *iss_OrderByTypByVals;
int16 *iss_OrderByTypLens;
+ Size iss_PscanLen;
} IndexScanState;
/* ----------------
bool amsearchnulls; /* can AM search for NULL/NOT NULL entries? */
bool amhasgettuple; /* does AM have amgettuple interface? */
bool amhasgetbitmap; /* does AM have amgetbitmap interface? */
+ bool amcanparallel; /* does AM support parallel scan? */
/* Rather than include amapi.h here, we declare amcostestimate like this */
void (*amcostestimate) (); /* AM's cost estimator */
} IndexOptInfo;
extern void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info);
extern void cost_index(IndexPath *path, PlannerInfo *root,
- double loop_count);
+ double loop_count, bool partial_path);
extern void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info,
Path *bitmapqual, double loop_count);
ScanDirection indexscandir,
bool indexonly,
Relids required_outer,
- double loop_count);
+ double loop_count,
+ bool partial_path);
extern BitmapHeapPath *create_bitmap_heap_path(PlannerInfo *root,
RelOptInfo *rel,
Path *bitmapqual,
List *initial_rels);
extern void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel);
+extern int compute_parallel_worker(RelOptInfo *rel, BlockNumber heap_pages,
+ BlockNumber index_pages);
#ifdef OPTIMIZER_DEBUG
extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel);
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
extern void btcostestimate(struct PlannerInfo *root,
struct IndexPath *path,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
extern void hashcostestimate(struct PlannerInfo *root,
struct IndexPath *path,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
extern void gistcostestimate(struct PlannerInfo *root,
struct IndexPath *path,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
extern void spgcostestimate(struct PlannerInfo *root,
struct IndexPath *path,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
extern void gincostestimate(struct PlannerInfo *root,
struct IndexPath *path,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
Selectivity *indexSelectivity,
- double *indexCorrelation);
+ double *indexCorrelation,
+ double *indexPages);
#endif /* INDEX_SELFUNCS_H */
(1 row)
alter table tenk2 reset (parallel_workers);
+-- test parallel index scans.
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+explain (costs off)
+ select count((unique1)) from tenk1 where hundred > 1;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Finalize Aggregate
+ -> Gather
+ Workers Planned: 4
+ -> Partial Aggregate
+ -> Parallel Index Scan using tenk1_hundred on tenk1
+ Index Cond: (hundred > 1)
+(6 rows)
+
+select count((unique1)) from tenk1 where hundred > 1;
+ count
+-------
+ 9800
+(1 row)
+
+reset enable_seqscan;
+reset enable_bitmapscan;
set force_parallel_mode=1;
explain (costs off)
select stringu1::int2 from tenk1 where unique1 = 1;
(select hundred, thousand from tenk2 where thousand > 100);
alter table tenk2 reset (parallel_workers);
+-- test parallel index scans.
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+
+explain (costs off)
+ select count((unique1)) from tenk1 where hundred > 1;
+select count((unique1)) from tenk1 where hundred > 1;
+
+reset enable_seqscan;
+reset enable_bitmapscan;
+
set force_parallel_mode=1;
explain (costs off)