bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
!(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
{
- path = (Path *) create_material_path(rel, path);
+ path = (Path *) create_material_path(rel, path, true);
}
add_path(rel, path);
double spc_seq_page_cost;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = PGS_SEQSCAN;
/* Should only be applied to base relations */
Assert(baserel->relid > 0);
*/
path->rows = clamp_row_est(path->rows / parallel_divisor);
}
+ else
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
- path->disabled_nodes = enable_seqscan ? 0 : 1;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
}
spc_page_cost;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations with tablesample clauses */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
startup_cost += parallel_setup_cost;
run_cost += parallel_tuple_cost * path->path.rows;
- path->path.disabled_nodes = path->subpath->disabled_nodes;
+ path->path.disabled_nodes = path->subpath->disabled_nodes
+ + ((rel->pgs_mask & PGS_GATHER) != 0 ? 0 : 1);
path->path.startup_cost = startup_cost;
path->path.total_cost = (startup_cost + run_cost);
}
startup_cost += parallel_setup_cost;
run_cost += parallel_tuple_cost * path->path.rows * 1.05;
- path->path.disabled_nodes = input_disabled_nodes
- + (enable_gathermerge ? 0 : 1);
+ path->path.disabled_nodes = path->subpath->disabled_nodes
+ + ((rel->pgs_mask & PGS_GATHER_MERGE) != 0 ? 0 : 1);
path->path.startup_cost = startup_cost + input_startup_cost;
path->path.total_cost = (startup_cost + run_cost + input_total_cost);
}
double pages_fetched;
double rand_heap_pages;
double index_pages;
+ uint64 enable_mask;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo) &&
path->indexclauses);
}
- /* we don't need to check enable_indexonlyscan; indxpath.c does that */
- path->path.disabled_nodes = enable_indexscan ? 0 : 1;
+ /* is this scan type disabled? */
+ enable_mask = (indexonly ? PGS_INDEXONLYSCAN : PGS_INDEXSCAN)
+ | (path->path.parallel_workers == 0 ? PGS_CONSIDER_NONPARTIAL : 0);
+ path->path.disabled_nodes =
+ (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
/*
* Call index-access-method-specific code to estimate the processing cost
double spc_seq_page_cost,
spc_random_page_cost;
double T;
+ uint64 enable_mask = PGS_BITMAPSCAN;
/* Should only be applied to base relations */
Assert(IsA(baserel, RelOptInfo));
path->rows = clamp_row_est(path->rows / parallel_divisor);
}
+ else
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
run_cost += cpu_run_cost;
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = enable_bitmapscan ? 0 : 1;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
double ntuples;
ListCell *l;
double spc_random_page_cost;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations */
Assert(baserel->relid > 0);
/*
* We must use a TID scan for CurrentOfExpr; in any other case, we
- * should be generating a TID scan only if enable_tidscan=true. Also,
- * if CurrentOfExpr is the qual, there should be only one.
+ * should be generating a TID scan only if TID scans are allowed.
+ * Also, if CurrentOfExpr is the qual, there should be only one.
*/
- Assert(enable_tidscan || IsA(qual, CurrentOfExpr));
+ Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0 || IsA(qual, CurrentOfExpr));
Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
if (IsA(qual, ScalarArrayOpExpr))
/*
* There are assertions above verifying that we only reach this function
- * either when enable_tidscan=true or when the TID scan is the only legal
- * path, so it's safe to set disabled_nodes to zero here.
+ * either when baserel->pgs_mask includes PGS_TIDSCAN or when the TID scan
+ * is the only legal path, so we only need to consider the effects of
+ * PGS_CONSIDER_NONPARTIAL here.
*/
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
double nseqpages;
double spc_random_page_cost;
double spc_seq_page_cost;
+ uint64 enable_mask = PGS_TIDSCAN;
/* Should only be applied to base relations */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- /* we should not generate this path type when enable_tidscan=false */
- Assert(enable_tidscan);
+ /*
+ * We should not generate this path type when PGS_TIDSCAN is unset, but we
+ * might need to disable this path due to PGS_CONSIDER_NONPARTIAL.
+ */
+ Assert((baserel->pgs_mask & PGS_TIDSCAN) != 0);
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->disabled_nodes = 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
List *qpquals;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are subqueries */
Assert(baserel->relid > 0);
* SubqueryScan node, plus cpu_tuple_cost to account for selection and
* projection overhead.
*/
- path->path.disabled_nodes = path->subpath->disabled_nodes;
+ if (path->path.parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->path.disabled_nodes = path->subpath->disabled_nodes
+ + (((baserel->pgs_mask & enable_mask) != enable_mask) ? 1 : 0);
path->path.startup_cost = path->subpath->startup_cost;
path->path.total_cost = path->subpath->total_cost;
Cost cpu_per_tuple;
RangeTblEntry *rte;
QualCost exprcost;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are functions */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost cpu_per_tuple;
RangeTblEntry *rte;
QualCost exprcost;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are functions */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost run_cost = 0;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are values lists */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost run_cost = 0;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are CTEs */
Assert(baserel->relid > 0);
startup_cost += path->pathtarget->cost.startup;
run_cost += path->pathtarget->cost.per_tuple * path->rows;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost run_cost = 0;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to base relations that are Tuplestores */
Assert(baserel->relid > 0);
cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
run_cost += cpu_per_tuple * baserel->tuples;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost run_cost = 0;
QualCost qpqual_cost;
Cost cpu_per_tuple;
+ uint64 enable_mask = 0;
/* Should only be applied to RTE_RESULT base relations */
Assert(baserel->relid > 0);
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
run_cost += cpu_per_tuple * baserel->tuples;
- path->disabled_nodes = 0;
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ path->disabled_nodes =
+ (baserel->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
Cost startup_cost;
Cost total_cost;
double total_rows;
+ uint64 enable_mask = 0;
/* We probably have decent estimates for the non-recursive term */
startup_cost = nrterm->startup_cost;
*/
total_cost += cpu_tuple_cost * total_rows;
- runion->disabled_nodes = nrterm->disabled_nodes + rterm->disabled_nodes;
+ if (runion->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ runion->disabled_nodes =
+ (runion->parent->pgs_mask & enable_mask) != enable_mask ? 1 : 0;
runion->startup_cost = startup_cost;
runion->total_cost = total_cost;
runion->rows = total_rows;
path->rows = input_tuples;
- /* should not generate these paths when enable_incremental_sort=false */
+ /*
+ * We should not generate these paths when enable_incremental_sort=false.
+ * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
+ * it will have already affected the input path.
+ */
Assert(enable_incremental_sort);
path->disabled_nodes = input_disabled_nodes;
startup_cost += input_cost;
+ /*
+ * We can ignore PGS_CONSIDER_NONPARTIAL here, because if it's relevant,
+ * it will have already affected the input path.
+ */
path->rows = tuples;
path->disabled_nodes = input_disabled_nodes + (enable_sort ? 0 : 1);
path->startup_cost = startup_cost;
void
cost_append(AppendPath *apath, PlannerInfo *root)
{
+ RelOptInfo *rel = apath->path.parent;
ListCell *l;
+ uint64 enable_mask = PGS_APPEND;
+
+ if (apath->path.parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
- apath->path.disabled_nodes = 0;
+ apath->path.disabled_nodes =
+ (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
apath->path.startup_cost = 0;
apath->path.total_cost = 0;
apath->path.rows = 0;
Cost input_startup_cost, Cost input_total_cost,
double tuples)
{
+ RelOptInfo *rel = path->parent;
Cost startup_cost = 0;
Cost run_cost = 0;
Cost comparison_cost;
double N;
double logN;
+ uint64 enable_mask = PGS_MERGE_APPEND;
+
+ if (path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
/*
* Avoid log(0)...
*/
run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
- path->disabled_nodes = input_disabled_nodes;
+ path->disabled_nodes =
+ (rel->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
+ path->disabled_nodes += input_disabled_nodes;
path->startup_cost = startup_cost + input_startup_cost;
path->total_cost = startup_cost + run_cost + input_total_cost;
}
*/
void
cost_material(Path *path,
- int input_disabled_nodes,
+ bool enabled, int input_disabled_nodes,
Cost input_startup_cost, Cost input_total_cost,
double tuples, int width)
{
double nbytes = relation_byte_size(tuples, width);
double work_mem_bytes = work_mem * (Size) 1024;
+ if (path->parallel_workers == 0 &&
+ path->parent != NULL &&
+ (path->parent->pgs_mask & PGS_CONSIDER_NONPARTIAL) == 0)
+ enabled = false;
+
path->rows = tuples;
/*
run_cost += seq_page_cost * npages;
}
- path->disabled_nodes = input_disabled_nodes + (enable_material ? 0 : 1);
+ path->disabled_nodes = input_disabled_nodes + (enabled ? 0 : 1);
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
}
*/
void
initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
- JoinType jointype,
+ JoinType jointype, uint64 enable_mask,
Path *outer_path, Path *inner_path,
JoinPathExtraData *extra)
{
Cost inner_rescan_run_cost;
/* Count up disabled nodes. */
- disabled_nodes = enable_nestloop ? 0 : 1;
+ disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
disabled_nodes += inner_path->disabled_nodes;
disabled_nodes += outer_path->disabled_nodes;
Assert(outerstartsel <= outerendsel);
Assert(innerstartsel <= innerendsel);
- disabled_nodes = enable_mergejoin ? 0 : 1;
+ /*
+ * We don't decide whether to materialize the inner path until we get to
+ * final_cost_mergejoin(), so we don't know whether to check the pgs_mask
+ * again PGS_MERGEJOIN_PLAIN or PGS_MERGEJOIN_MATERIALIZE. Instead, we
+ * just account for any child nodes here and assume that this node is not
+ * itslef disabled; we can sort out the details in final_cost_mergejoin().
+ *
+ * (We could be more precise here by setting disabled_nodes to 1 at this
+ * stage if both PGS_MERGEJOIN_PLAIN and PGS_MERGEJOIN_MATERIALIZE are
+ * disabled, but that seems to against the idea of making this function
+ * produce a quick, optimistic approximation of the final cost.)
+ */
+ disabled_nodes = 0;
/* cost of source data */
double mergejointuples,
rescannedtuples;
double rescanratio;
-
- /* Set the number of disabled nodes. */
- path->jpath.path.disabled_nodes = workspace->disabled_nodes;
+ uint64 enable_mask = 0;
/* Protect some assumptions below that rowcounts aren't zero */
if (inner_path_rows <= 0)
path->materialize_inner = false;
/*
- * Prefer materializing if it looks cheaper, unless the user has asked to
- * suppress materialization.
+ * If merge joins with materialization are enabled, then choose
+ * materialization if either (a) it looks cheaper or (b) merge joins
+ * without materialization are disabled.
*/
- else if (enable_material && mat_inner_cost < bare_inner_cost)
+ else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
+ (mat_inner_cost < bare_inner_cost ||
+ (extra->pgs_mask & PGS_MERGEJOIN_PLAIN) == 0))
path->materialize_inner = true;
/*
- * Even if materializing doesn't look cheaper, we *must* do it if the
- * inner path is to be used directly (without sorting) and it doesn't
- * support mark/restore.
+ * Regardless of what plan shapes are enabled and what the costs seem to
+ * be, we *must* materialize it if the inner path is to be used directly
+ * (without sorting) and it doesn't support mark/restore. Planner failure
+ * is not an option!
*
* Since the inner side must be ordered, and only Sorts and IndexScans can
* create order to begin with, and they both support mark/restore, you
* merge joins can *preserve* the order of their inputs, so they can be
* selected as the input of a mergejoin, and they don't support
* mark/restore at present.
- *
- * We don't test the value of enable_material here, because
- * materialization is required for correctness in this case, and turning
- * it off does not entitle us to deliver an invalid plan.
*/
else if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path))
* though.
*
* Since materialization is a performance optimization in this case,
- * rather than necessary for correctness, we skip it if enable_material is
- * off.
+ * rather than necessary for correctness, we skip it if materialization is
+ * switched off.
*/
- else if (enable_material && innersortkeys != NIL &&
+ else if ((extra->pgs_mask & PGS_MERGEJOIN_MATERIALIZE) != 0 &&
+ innersortkeys != NIL &&
relation_byte_size(inner_path_rows,
inner_path->pathtarget->width) >
work_mem * (Size) 1024)
else
path->materialize_inner = false;
- /* Charge the right incremental cost for the chosen case */
+ /* Get the number of disabled nodes, not yet including this one. */
+ path->jpath.path.disabled_nodes = workspace->disabled_nodes;
+
+ /*
+ * Charge the right incremental cost for the chosen case, and update
+ * enable_mask as appropriate.
+ */
if (path->materialize_inner)
+ {
run_cost += mat_inner_cost;
+ enable_mask |= PGS_MERGEJOIN_MATERIALIZE;
+ }
else
+ {
run_cost += bare_inner_cost;
+ enable_mask |= PGS_MERGEJOIN_PLAIN;
+ }
+
+ /* Incremental count of disabled nodes if this node is disabled. */
+ if (path->jpath.path.parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
+ if ((extra->pgs_mask & enable_mask) != enable_mask)
+ ++path->jpath.path.disabled_nodes;
/* CPU costs */
int numbatches;
int num_skew_mcvs;
size_t space_allowed; /* unused */
+ uint64 enable_mask = PGS_HASHJOIN;
+
+ if (outer_path->parallel_workers == 0)
+ enable_mask |= PGS_CONSIDER_NONPARTIAL;
/* Count up disabled nodes. */
- disabled_nodes = enable_hashjoin ? 0 : 1;
+ disabled_nodes = (extra->pgs_mask & enable_mask) == enable_mask ? 0 : 1;
disabled_nodes += inner_path->disabled_nodes;
disabled_nodes += outer_path->disabled_nodes;
ListCell *lc;
int i;
- /* Index-only scans must be enabled */
- if (!enable_indexonlyscan)
+ /* If we're not allowed to consider index-only scans, give up now */
+ if ((rel->pgs_mask & PGS_CONSIDER_INDEXONLY) == 0)
return false;
/*
#include "utils/lsyscache.h"
#include "utils/typcache.h"
-/* Hook for plugins to get control in add_paths_to_joinrel() */
+/* Hooks for plugins to get control in add_paths_to_joinrel() */
set_join_pathlist_hook_type set_join_pathlist_hook = NULL;
+join_path_setup_hook_type join_path_setup_hook = NULL;
/*
* Paths parameterized by a parent rel can be considered to be parameterized
extra.mergeclause_list = NIL;
extra.sjinfo = sjinfo;
extra.param_source_rels = NULL;
+ extra.pgs_mask = joinrel->pgs_mask;
/*
* See if the inner relation is provably unique for this outer rel.
if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER)
jointype = JOIN_INNER;
+ /*
+ * Give extensions a chance to take control. In particular, an extension
+ * might want to modify extra.pgs_mask. It's possible to override pgs_mask
+ * on a query-wide basis using join_search_hook, or for a particular
+ * relation using joinrel_setup_hook, but extensions that want to provide
+ * different advice for the same joinrel based on the choice of innerrel
+ * and outerrel will need to use this hook.
+ *
+ * A very simple way for an extension to use this hook is to set
+ * extra.pgs_mask = 0, if it simply doesn't want any of the paths
+ * generated by this call to add_paths_to_joinrel() to be selected. An
+ * extension could use this technique to constrain the join order, since
+ * it could thereby arrange to reject all paths from join orders that it
+ * does not like. An extension can also selectively clear bits from
+ * extra.pgs_mask to rule out specific techniques for specific joins, or
+ * even replace the mask entirely.
+ *
+ * NB: Below this point, this function should be careful to reference
+ * extra.pgs_mask rather than rel->pgs_mask to avoid disregarding any
+ * changes made by the hook we're about to call.
+ */
+ if (join_path_setup_hook)
+ join_path_setup_hook(root, joinrel, outerrel, innerrel,
+ jointype, &extra);
+
/*
* Find potential mergejoin clauses. We can skip this if we are not
* interested in doing a mergejoin. However, mergejoin may be our only
- * way of implementing a full outer join, so override enable_mergejoin if
- * it's a full join.
+ * way of implementing a full outer join, so in that case we don't care
+ * whether mergejoins are disabled.
*/
- if (enable_mergejoin || jointype == JOIN_FULL)
+ if ((extra.pgs_mask & PGS_MERGEJOIN_ANY) != 0 || jointype == JOIN_FULL)
extra.mergeclause_list = select_mergejoin_clauses(root,
joinrel,
outerrel,
/*
* 4. Consider paths where both outer and inner relations must be hashed
- * before being joined. As above, disregard enable_hashjoin for full
- * joins, because there may be no other alternative.
+ * before being joined. As above, when it's a full join, we must try this
+ * even when the path type is disabled, because it may be our only option.
*/
- if (enable_hashjoin || jointype == JOIN_FULL)
+ if ((extra.pgs_mask & PGS_HASHJOIN) != 0 || jointype == JOIN_FULL)
hash_inner_and_outer(root, joinrel, outerrel, innerrel,
jointype, &extra);
* to the same server and assigned to the same user to check access
* permissions as, give the FDW a chance to push down joins.
*/
- if (joinrel->fdwroutine &&
+ if ((extra.pgs_mask & PGS_FOREIGNJOIN) != 0 && joinrel->fdwroutine &&
joinrel->fdwroutine->GetForeignJoinPaths)
joinrel->fdwroutine->GetForeignJoinPaths(root, joinrel,
outerrel, innerrel,
/*
* 6. Finally, give extensions a chance to manipulate the path list. They
* could add new paths (such as CustomPaths) by calling add_path(), or
- * add_partial_path() if parallel aware. They could also delete or modify
- * paths added by the core code.
+ * add_partial_path() if parallel aware.
+ *
+ * In theory, extensions could also use this hook to delete or modify
+ * paths added by the core code, but in practice this is difficult to make
+ * work, since it's too late to get back any paths that have already been
+ * discarded by add_path() or add_partial_path(). If you're trying to
+ * suppress paths, consider using join_path_setup_hook instead.
*/
if (set_join_pathlist_hook)
set_join_pathlist_hook(root, joinrel, outerrel, innerrel,
List *ph_lateral_vars;
/* Obviously not if it's disabled */
- if (!enable_memoize)
+ if ((extra->pgs_mask & PGS_NESTLOOP_MEMOIZE) == 0)
return NULL;
/*
Path *inner_path,
List *pathkeys,
JoinType jointype,
+ uint64 nestloop_subtype,
JoinPathExtraData *extra)
{
Relids required_outer;
* methodology worthwhile.
*/
initial_cost_nestloop(root, &workspace, jointype,
+ nestloop_subtype | PGS_CONSIDER_NONPARTIAL,
outer_path, inner_path, extra);
if (add_path_precheck(joinrel, workspace.disabled_nodes,
Path *inner_path,
List *pathkeys,
JoinType jointype,
+ uint64 nestloop_subtype,
JoinPathExtraData *extra)
{
JoinCostWorkspace workspace;
* Before creating a path, get a quick lower bound on what it is likely to
* cost. Bail out right away if it looks terrible.
*/
- initial_cost_nestloop(root, &workspace, jointype,
+ initial_cost_nestloop(root, &workspace, jointype, nestloop_subtype,
outer_path, inner_path, extra);
if (!add_partial_path_precheck(joinrel, workspace.disabled_nodes,
workspace.total_cost, pathkeys))
if (nestjoinOK)
{
/*
- * Consider materializing the cheapest inner path, unless
- * enable_material is off or the path in question materializes its
- * output anyway.
+ * Consider materializing the cheapest inner path, unless that is
+ * disabled or the path in question materializes its output anyway.
*/
- if (enable_material && inner_cheapest_total != NULL &&
+ if ((extra->pgs_mask & PGS_NESTLOOP_MATERIALIZE) != 0 &&
+ inner_cheapest_total != NULL &&
!ExecMaterializesOutput(inner_cheapest_total->pathtype))
matpath = (Path *)
- create_material_path(innerrel, inner_cheapest_total);
+ create_material_path(innerrel, inner_cheapest_total, true);
}
foreach(lc1, outerrel->pathlist)
innerpath,
merge_pathkeys,
jointype,
+ PGS_NESTLOOP_PLAIN,
extra);
/*
mpath,
merge_pathkeys,
jointype,
+ PGS_NESTLOOP_MEMOIZE,
extra);
}
matpath,
merge_pathkeys,
jointype,
+ PGS_NESTLOOP_MATERIALIZE,
extra);
}
/*
* Consider materializing the cheapest inner path, unless: 1)
- * enable_material is off, 2) the cheapest inner path is not
+ * materialization is disabled here, 2) the cheapest inner path is not
* parallel-safe, 3) the cheapest inner path is parameterized by the outer
* rel, or 4) the cheapest inner path materializes its output anyway.
*/
- if (enable_material && inner_cheapest_total->parallel_safe &&
+ if ((extra->pgs_mask & PGS_NESTLOOP_MATERIALIZE) != 0 &&
+ inner_cheapest_total->parallel_safe &&
!PATH_PARAM_BY_REL(inner_cheapest_total, outerrel) &&
!ExecMaterializesOutput(inner_cheapest_total->pathtype))
{
matpath = (Path *)
- create_material_path(innerrel, inner_cheapest_total);
+ create_material_path(innerrel, inner_cheapest_total, true);
Assert(matpath->parallel_safe);
}
continue;
try_partial_nestloop_path(root, joinrel, outerpath, innerpath,
- pathkeys, jointype, extra);
+ pathkeys, jointype,
+ PGS_NESTLOOP_PLAIN, extra);
/*
* Try generating a memoize path and see if that makes the nested
extra);
if (mpath != NULL)
try_partial_nestloop_path(root, joinrel, outerpath, mpath,
- pathkeys, jointype, extra);
+ pathkeys, jointype,
+ PGS_NESTLOOP_MEMOIZE, extra);
}
/* Also consider materialized form of the cheapest inner path */
if (matpath != NULL)
try_partial_nestloop_path(root, joinrel, outerpath, matpath,
- pathkeys, jointype, extra);
+ pathkeys, jointype,
+ PGS_NESTLOOP_MATERIALIZE, extra);
}
}
List *tidquals;
List *tidrangequals;
bool isCurrentOf;
+ bool enabled = (rel->pgs_mask & PGS_TIDSCAN) != 0;
/*
* If any suitable quals exist in the rel's baserestrict list, generate a
* plain (unparameterized) TidPath with them.
*
- * We skip this when enable_tidscan = false, except when the qual is
+ * We skip this when TID scans are disabled, except when the qual is
* CurrentOfExpr. In that case, a TID scan is the only correct path.
*/
tidquals = TidQualFromRestrictInfoList(root, rel->baserestrictinfo, rel,
&isCurrentOf);
- if (tidquals != NIL && (enable_tidscan || isCurrentOf))
+ if (tidquals != NIL && (enabled || isCurrentOf))
{
/*
* This path uses no join clauses, but it could still have required
}
/* Skip the rest if TID scans are disabled. */
- if (!enable_tidscan)
+ if (!enabled)
return false;
/*
/* Set cost data */
cost_material(&matpath,
+ enable_material,
subplan->disabled_nodes,
subplan->startup_cost,
subplan->total_cost,
tuple_fraction = 0.0;
}
+ /*
+ * Compute the initial path generation strategy mask.
+ *
+ * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
+ * GUC, and so the corresponding bits are always set in the default
+ * strategy mask.
+ *
+ * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
+ * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
+ * corresponds to this exactly: enable_indexscan=off disables both
+ * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
+ * converts the index-only scan paths that we would have considered into
+ * index scan paths.
+ */
+ glob->default_pgs_mask = PGS_APPEND | PGS_MERGE_APPEND | PGS_FOREIGNJOIN |
+ PGS_GATHER | PGS_CONSIDER_NONPARTIAL;
+ if (enable_tidscan)
+ glob->default_pgs_mask |= PGS_TIDSCAN;
+ if (enable_seqscan)
+ glob->default_pgs_mask |= PGS_SEQSCAN;
+ if (enable_indexscan)
+ glob->default_pgs_mask |= PGS_INDEXSCAN | PGS_INDEXONLYSCAN;
+ if (enable_indexonlyscan)
+ glob->default_pgs_mask |= PGS_CONSIDER_INDEXONLY;
+ if (enable_bitmapscan)
+ glob->default_pgs_mask |= PGS_BITMAPSCAN;
+ if (enable_mergejoin)
+ {
+ glob->default_pgs_mask |= PGS_MERGEJOIN_PLAIN;
+ if (enable_material)
+ glob->default_pgs_mask |= PGS_MERGEJOIN_MATERIALIZE;
+ }
+ if (enable_nestloop)
+ {
+ glob->default_pgs_mask |= PGS_NESTLOOP_PLAIN;
+ if (enable_material)
+ glob->default_pgs_mask |= PGS_NESTLOOP_MATERIALIZE;
+ if (enable_memoize)
+ glob->default_pgs_mask |= PGS_NESTLOOP_MEMOIZE;
+ }
+ if (enable_hashjoin)
+ glob->default_pgs_mask |= PGS_HASHJOIN;
+ if (enable_gathermerge)
+ glob->default_pgs_mask |= PGS_GATHER_MERGE;
+ if (enable_partitionwise_join)
+ glob->default_pgs_mask |= PGS_CONSIDER_PARTITIONWISE;
+
/* Allow plugins to take control after we've initialized "glob" */
if (planner_setup_hook)
(*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
is_parallel_safe(root, (Node *) havingQual))
grouped_rel->consider_parallel = true;
+ /* Assume that the same path generation strategies are allowed */
+ grouped_rel->pgs_mask = input_rel->pgs_mask;
+
/*
* If the input rel belongs to a single FDW, so does the grouped rel.
*/
if (input_rel->consider_parallel && target_parallel_safe)
ordered_rel->consider_parallel = true;
+ /* Assume that the same path generation strategies are allowed. */
+ ordered_rel->pgs_mask = input_rel->pgs_mask;
+
/*
* If the input rel belongs to a single FDW, so does the ordered_rel.
*/
grouped_rel->relids);
partially_grouped_rel->consider_parallel =
grouped_rel->consider_parallel;
+ partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
partially_grouped_rel->serverid = grouped_rel->serverid;
partially_grouped_rel->userid = grouped_rel->userid;
* pathnode.
*/
MaterialPath *
-create_material_path(RelOptInfo *rel, Path *subpath)
+create_material_path(RelOptInfo *rel, Path *subpath, bool enabled)
{
MaterialPath *pathnode = makeNode(MaterialPath);
pathnode->subpath = subpath;
cost_material(&pathnode->path,
+ enabled,
subpath->disabled_nodes,
subpath->startup_cost,
subpath->total_cost,
pathnode->est_unique_keys = 0.0;
pathnode->est_hit_ratio = 0.0;
- /* we should not generate this path type when enable_memoize=false */
- Assert(enable_memoize);
+ /*
+ * We should not be asked to generate this path type when memoization is
+ * disabled, so set our count of disabled nodes equal to the subpath's
+ * count.
+ *
+ * It would be nice to also Assert that memoization is enabled, but the
+ * value of enable_memoize is not controlling: what we would need to check
+ * is that the JoinPathExtraData's pgs_mask included PGS_NESTLOOP_MEMOIZE.
+ */
pathnode->path.disabled_nodes = subpath->disabled_nodes;
/*
{
MaterialPath *mpath = (MaterialPath *) path;
Path *spath = mpath->subpath;
+ bool enabled;
spath = reparameterize_path(root, spath,
required_outer,
loop_count);
+ enabled =
+ (mpath->path.disabled_nodes <= spath->disabled_nodes);
if (spath == NULL)
return NULL;
- return (Path *) create_material_path(rel, spath);
+ return (Path *) create_material_path(rel, spath, enabled);
}
case T_Memoize:
{
* Allow a plugin to editorialize on the info we obtained from the
* catalogs. Actions might include altering the assumed relation size,
* removing an index, or adding a hypothetical index to the indexlist.
+ *
+ * An extension can also modify rel->pgs_mask here to control path
+ * generation.
*/
if (get_relation_info_hook)
(*get_relation_info_hook) (root, relationObjectId, inhparent, rel);
RelOptInfo *join_rel;
} JoinHashEntry;
+/* Hook for plugins to get control during joinrel setup */
+joinrel_setup_hook_type joinrel_setup_hook = NULL;
+
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *input_rel,
SpecialJoinInfo *sjinfo,
rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */
+ rel->pgs_mask = root->glob->default_pgs_mask;
rel->reltarget = create_empty_pathtarget();
rel->pathlist = NIL;
rel->ppilist = NIL;
joinrel->consider_startup = (root->tuple_fraction > 0);
joinrel->consider_param_startup = false;
joinrel->consider_parallel = false;
+ joinrel->pgs_mask = root->glob->default_pgs_mask;
joinrel->reltarget = create_empty_pathtarget();
joinrel->pathlist = NIL;
joinrel->ppilist = NIL;
*/
joinrel->has_eclass_joins = has_relevant_eclass_joinclause(root, joinrel);
- /* Store the partition information. */
- build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
- restrictlist);
-
/*
* Set estimates of the joinrel's size.
*/
is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
joinrel->consider_parallel = true;
+ /*
+ * Allow a plugin to editorialize on the new joinrel's properties. Actions
+ * might include altering the size estimate or clearing consider_parallel.
+ */
+ if (joinrel_setup_hook)
+ (*joinrel_setup_hook) (root, joinrel, outer_rel, inner_rel, sjinfo,
+ restrictlist);
+
+ /* Store the partition information. */
+ build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
+ restrictlist);
+
/* Add the joinrel to the PlannerInfo. */
add_join_rel(root, joinrel);
joinrel->consider_startup = (root->tuple_fraction > 0);
joinrel->consider_param_startup = false;
joinrel->consider_parallel = false;
+ joinrel->pgs_mask = root->glob->default_pgs_mask;
joinrel->reltarget = create_empty_pathtarget();
joinrel->pathlist = NIL;
joinrel->ppilist = NIL;
*/
joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
- /* Is the join between partitions itself partitioned? */
- build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
- restrictlist);
-
/* Child joinrel is parallel safe if parent is parallel safe. */
joinrel->consider_parallel = parent_joinrel->consider_parallel;
set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
sjinfo, restrictlist);
+ /*
+ * Allow a plugin to editorialize on the new joinrel's properties. Actions
+ * might include altering the size estimate or clearing consider_parallel,
+ * although the latter would be better done in the parent joinrel rather
+ * than here.
+ */
+ if (joinrel_setup_hook)
+ (*joinrel_setup_hook) (root, joinrel, outer_rel, inner_rel, sjinfo,
+ restrictlist);
+
+ /* Is the join between partitions itself partitioned? */
+ build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
+ restrictlist);
+
/* We build the join only once. */
Assert(!find_join_rel(root, joinrel->relids));
upperrel = makeNode(RelOptInfo);
upperrel->reloptkind = RELOPT_UPPER_REL;
upperrel->relids = bms_copy(relids);
+ upperrel->pgs_mask = root->glob->default_pgs_mask;
/* cheap startup cost is interesting iff not all tuples to be retrieved */
upperrel->consider_startup = (root->tuple_fraction > 0);
PartitionScheme part_scheme;
/* Nothing to do if partitionwise join technique is disabled. */
- if (!enable_partitionwise_join)
+ if ((joinrel->pgs_mask & PGS_CONSIDER_PARTITIONWISE) == 0)
{
Assert(!IS_PARTITIONED_REL(joinrel));
return;
#include "nodes/parsenodes.h"
#include "storage/block.h"
+/*
+ * Path generation strategies.
+ *
+ * These constants are used to specify the set of strategies that the planner
+ * should use, either for the query as a whole or for a specific baserel or
+ * joinrel. The various planner-related enable_* GUCs are used to set the
+ * PlannerGlobal's default_pgs_mask, and that in turn is used to set each
+ * RelOptInfo's pgs_mask. In both cases, extensions can use hooks to modify the
+ * default value. Not every strategy listed here has a corresponding enable_*
+ * GUC; those that don't are always allowed unless disabled by an extension.
+ * Not all strategies are relevant for every RelOptInfo; e.g. PGS_SEQSCAN
+ * doesn't affect joinrels one way or the other.
+ *
+ * In most cases, disabling a path generation strategy merely means that any
+ * paths generated using that strategy are marked as disabled, but in some
+ * cases, path generation is skipped altogether. The latter strategy is only
+ * permissible when it can't result in planner failure -- for instance, we
+ * couldn't do this for sequential scans on a plain rel, because there might
+ * not be any other possible path. Nevertheless, the behaviors in each
+ * individual case are to some extent the result of historical accident,
+ * chosen to match the preexisting behaviors of the enable_* GUCs.
+ *
+ * In a few cases, we have more than one bit for the same strategy, controlling
+ * different aspects of the planner behavior. When PGS_CONSIDER_INDEXONLY is
+ * unset, we don't even consider index-only scans, and any such scans that
+ * would have been generated become index scans instead. On the other hand,
+ * unsetting PGS_INDEXSCAN or PGS_INDEXONLYSCAN causes generated paths of the
+ * corresponding types to be marked as disabled. Similarly, unsetting
+ * PGS_CONSIDER_PARTITIONWISE prevents any sort of thinking about partitionwise
+ * joins for the current rel, which incidentally will preclude higher-level
+ * joinrels from building parititonwise paths using paths taken from the
+ * current rel's children. On the other hand, unsetting PGS_APPEND or
+ * PGS_MERGE_APPEND will only arrange to disable paths of the corresponding
+ * types if they are generated at the level of the current rel.
+ *
+ * Finally, unsetting PGS_CONSIDER_NONPARTIAL disables all non-partial paths
+ * except those that use Gather or Gather Merge. In most other cases, a
+ * plugin can nudge the planner toward a particular strategy by disabling
+ * all of the others, but that doesn't work here: unsetting PGS_SEQSCAN,
+ * for instance, would disable both partial and non-partial sequential scans.
+ */
+#define PGS_SEQSCAN 0x00000001
+#define PGS_INDEXSCAN 0x00000002
+#define PGS_INDEXONLYSCAN 0x00000004
+#define PGS_BITMAPSCAN 0x00000008
+#define PGS_TIDSCAN 0x00000010
+#define PGS_FOREIGNJOIN 0x00000020
+#define PGS_MERGEJOIN_PLAIN 0x00000040
+#define PGS_MERGEJOIN_MATERIALIZE 0x00000080
+#define PGS_NESTLOOP_PLAIN 0x00000100
+#define PGS_NESTLOOP_MATERIALIZE 0x00000200
+#define PGS_NESTLOOP_MEMOIZE 0x00000400
+#define PGS_HASHJOIN 0x00000800
+#define PGS_APPEND 0x00001000
+#define PGS_MERGE_APPEND 0x00002000
+#define PGS_GATHER 0x00004000
+#define PGS_GATHER_MERGE 0x00008000
+#define PGS_CONSIDER_INDEXONLY 0x00010000
+#define PGS_CONSIDER_PARTITIONWISE 0x00020000
+#define PGS_CONSIDER_NONPARTIAL 0x00040000
+
+/*
+ * Convenience macros for useful combination of the bits defined above.
+ */
+#define PGS_SCAN_ANY \
+ (PGS_SEQSCAN | PGS_INDEXSCAN | PGS_INDEXONLYSCAN | PGS_BITMAPSCAN | \
+ PGS_TIDSCAN)
+#define PGS_MERGEJOIN_ANY \
+ (PGS_MERGEJOIN_PLAIN | PGS_MERGEJOIN_MATERIALIZE)
+#define PGS_NESTLOOP_ANY \
+ (PGS_NESTLOOP_PLAIN | PGS_NESTLOOP_MATERIALIZE | PGS_NESTLOOP_MEMOIZE)
+#define PGS_JOIN_ANY \
+ (PGS_FOREIGNJOIN | PGS_MERGEJOIN_ANY | PGS_NESTLOOP_ANY | PGS_HASHJOIN)
/*
* Relids
/* worst PROPARALLEL hazard level */
char maxParallelHazard;
+ /* mask of allowed path generation strategies */
+ uint64 default_pgs_mask;
+
/* partition descriptors */
PartitionDirectory partition_directory pg_node_attr(read_write_ignore);
Cardinality rows;
/*
- * per-relation planner control flags
+ * per-relation planner control
*/
/* keep cheap-startup-cost paths? */
bool consider_startup;
bool consider_param_startup;
/* consider parallel paths? */
bool consider_parallel;
+ /* path generation strategy mask */
+ uint64 pgs_mask;
/*
* default result targetlist for Paths scanning this relation; list of
* sjinfo is extra info about special joins for selectivity estimation
* semifactors is as shown above (only valid for SEMI/ANTI/inner_unique joins)
* param_source_rels are OK targets for parameterization of result paths
+ * pgs_mask is a bitmask of PGS_* constants to limit the join strategy
*/
typedef struct JoinPathExtraData
{
SpecialJoinInfo *sjinfo;
SemiAntiJoinFactors semifactors;
Relids param_source_rels;
+ uint64 pgs_mask;
} JoinPathExtraData;
/*
Cost input_startup_cost, Cost input_total_cost,
double tuples);
extern void cost_material(Path *path,
- int input_disabled_nodes,
+ bool enabled, int input_disabled_nodes,
Cost input_startup_cost, Cost input_total_cost,
double tuples, int width);
extern void cost_agg(Path *path, PlannerInfo *root,
double input_tuples);
extern void initial_cost_nestloop(PlannerInfo *root,
JoinCostWorkspace *workspace,
- JoinType jointype,
+ JoinType jointype, uint64 enable_mask,
Path *outer_path, Path *inner_path,
JoinPathExtraData *extra);
extern void final_cost_nestloop(PlannerInfo *root, NestPath *path,
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
+/* Hook for plugins to get control during joinrel setup */
+typedef void (*joinrel_setup_hook_type) (PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outer_rel,
+ RelOptInfo *inner_rel,
+ SpecialJoinInfo *sjinfo,
+ List *restrictlist);
+extern PGDLLIMPORT joinrel_setup_hook_type joinrel_setup_hook;
/*
* prototypes for pathnode.c
RelOptInfo *rel,
PathTarget *target,
List *havingqual);
-extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
+extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath,
+ bool enabled);
extern MemoizePath *create_memoize_path(PlannerInfo *root,
RelOptInfo *rel,
Path *subpath,
extern PGDLLIMPORT int min_parallel_index_scan_size;
extern PGDLLIMPORT bool enable_group_by_reordering;
-/* Hook for plugins to get control in set_rel_pathlist() */
+/* Hooks for plugins to get control in set_rel_pathlist() */
+typedef void (*join_path_setup_hook_type) (PlannerInfo *root,
+ RelOptInfo *joinrel,
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ JoinType jointype,
+ JoinPathExtraData *extra);
+extern PGDLLIMPORT join_path_setup_hook_type join_path_setup_hook;
typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root,
RelOptInfo *rel,
Index rti,