summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/allpaths.c43
-rw-r--r--src/backend/optimizer/path/clausesel.c36
-rw-r--r--src/backend/optimizer/path/costsize.c120
-rw-r--r--src/backend/optimizer/path/equivclass.c40
-rw-r--r--src/backend/optimizer/path/indxpath.c40
-rw-r--r--src/backend/optimizer/path/joinpath.c16
-rw-r--r--src/backend/optimizer/path/joinrels.c37
-rw-r--r--src/backend/optimizer/path/orindxpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c102
-rw-r--r--src/backend/optimizer/plan/initsplan.c32
-rw-r--r--src/backend/optimizer/plan/planagg.c16
-rw-r--r--src/backend/optimizer/plan/planmain.c8
-rw-r--r--src/backend/optimizer/plan/planner.c213
-rw-r--r--src/backend/optimizer/plan/setrefs.c28
-rw-r--r--src/backend/optimizer/plan/subselect.c173
-rw-r--r--src/backend/optimizer/prep/prepjointree.c174
-rw-r--r--src/backend/optimizer/prep/prepunion.c64
-rw-r--r--src/backend/optimizer/util/clauses.c95
-rw-r--r--src/backend/optimizer/util/pathnode.c53
-rw-r--r--src/backend/optimizer/util/placeholder.c18
-rw-r--r--src/backend/optimizer/util/plancat.c16
-rw-r--r--src/backend/optimizer/util/predtest.c19
-rw-r--r--src/backend/optimizer/util/relnode.c6
-rw-r--r--src/backend/optimizer/util/restrictinfo.c16
-rw-r--r--src/backend/optimizer/util/var.c18
25 files changed, 697 insertions, 692 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index fee9b8fac8..4a0a1012c0 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.182 2009/04/19 19:46:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.183 2009/06/11 14:48:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,9 +58,9 @@ static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
bool *differentTypes);
@@ -292,13 +292,13 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Initialize to compute size estimates for whole append relation.
*
- * We handle width estimates by weighting the widths of different
- * child rels proportionally to their number of rows. This is sensible
- * because the use of width estimates is mainly to compute the total
- * relation "footprint" if we have to sort or hash it. To do this,
- * we sum the total equivalent size (in "double" arithmetic) and then
- * divide by the total rowcount estimate. This is done separately for
- * the total rel width and each attribute.
+ * We handle width estimates by weighting the widths of different child
+ * rels proportionally to their number of rows. This is sensible because
+ * the use of width estimates is mainly to compute the total relation
+ * "footprint" if we have to sort or hash it. To do this, we sum the
+ * total equivalent size (in "double" arithmetic) and then divide by the
+ * total rowcount estimate. This is done separately for the total rel
+ * width and each attribute.
*
* Note: if you consider changing this logic, beware that child rels could
* have zero rows and/or width, if they were excluded by constraints.
@@ -377,11 +377,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Note: we could compute appropriate attr_needed data for the
- * child's variables, by transforming the parent's attr_needed
- * through the translated_vars mapping. However, currently there's
- * no need because attr_needed is only examined for base relations
- * not otherrels. So we just leave the child's attr_needed empty.
+ * Note: we could compute appropriate attr_needed data for the child's
+ * variables, by transforming the parent's attr_needed through the
+ * translated_vars mapping. However, currently there's no need
+ * because attr_needed is only examined for base relations not
+ * otherrels. So we just leave the child's attr_needed empty.
*/
/*
@@ -438,7 +438,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->rows = parent_rows;
if (parent_rows > 0)
{
- int i;
+ int i;
rel->width = rint(parent_size / parent_rows);
for (i = 0; i < nattrs; i++)
@@ -681,6 +681,7 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
if (!cteroot) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
}
+
/*
* Note: cte_plan_ids can be shorter than cteList, if we are still working
* on planning the CTEs (ie, this is a side-reference from another CTE).
@@ -726,8 +727,8 @@ set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
/*
* We need to find the non-recursive term's plan, which is in the plan
- * level that's processing the recursive UNION, which is one level
- * *below* where the CTE comes from.
+ * level that's processing the recursive UNION, which is one level *below*
+ * where the CTE comes from.
*/
levelsup = rte->ctelevelsup;
if (levelsup == 0) /* shouldn't happen */
@@ -1087,7 +1088,7 @@ compare_tlist_datatypes(List *tlist, List *colTypes,
* of rows returned. (This condition is vacuous for DISTINCT, because then
* there are no non-DISTINCT output columns, so we needn't check. But note
* we are assuming that the qual can't distinguish values that the DISTINCT
- * operator sees as equal. This is a bit shaky but we have no way to test
+ * operator sees as equal. This is a bit shaky but we have no way to test
* for the case, and it's unlikely enough that we shouldn't refuse the
* optimization just because it could theoretically happen.)
*
@@ -1113,8 +1114,8 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
return false;
/*
- * It would be unsafe to push down window function calls, but at least
- * for the moment we could never see any in a qual anyhow.
+ * It would be unsafe to push down window function calls, but at least for
+ * the moment we could never see any in a qual anyhow.
*/
Assert(!contain_window_function(qual));
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index ee02689d29..34407af607 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.97 2009/02/06 23:43:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.98 2009/06/11 14:48:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,8 +102,8 @@ clauselist_selectivity(PlannerInfo *root,
ListCell *l;
/*
- * If there's exactly one clause, then no use in trying to match up
- * pairs, so just go directly to clause_selectivity().
+ * If there's exactly one clause, then no use in trying to match up pairs,
+ * so just go directly to clause_selectivity().
*/
if (list_length(clauses) == 1)
return clause_selectivity(root, (Node *) linitial(clauses),
@@ -410,30 +410,30 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
if (varRelid != 0)
{
/*
- * Caller is forcing restriction mode (eg, because we are examining
- * an inner indexscan qual).
+ * Caller is forcing restriction mode (eg, because we are examining an
+ * inner indexscan qual).
*/
return false;
}
else if (sjinfo == NULL)
{
/*
- * It must be a restriction clause, since it's being evaluated at
- * a scan node.
+ * It must be a restriction clause, since it's being evaluated at a
+ * scan node.
*/
return false;
}
else
{
/*
- * Otherwise, it's a join if there's more than one relation used.
- * We can optimize this calculation if an rinfo was passed.
+ * Otherwise, it's a join if there's more than one relation used. We
+ * can optimize this calculation if an rinfo was passed.
*
- * XXX Since we know the clause is being evaluated at a join,
- * the only way it could be single-relation is if it was delayed
- * by outer joins. Although we can make use of the restriction
- * qual estimators anyway, it seems likely that we ought to account
- * for the probability of injected nulls somehow.
+ * XXX Since we know the clause is being evaluated at a join, the
+ * only way it could be single-relation is if it was delayed by outer
+ * joins. Although we can make use of the restriction qual estimators
+ * anyway, it seems likely that we ought to account for the
+ * probability of injected nulls somehow.
*/
if (rinfo)
return (bms_membership(rinfo->clause_relids) == BMS_MULTIPLE);
@@ -467,7 +467,7 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo,
* if the clause isn't a join clause.
*
* sjinfo is NULL for a non-join clause, otherwise it provides additional
- * context information about the join being performed. There are some
+ * context information about the join being performed. There are some
* special cases:
* 1. For a special (not INNER) join, sjinfo is always a member of
* root->join_info_list.
@@ -525,7 +525,7 @@ clause_selectivity(PlannerInfo *root,
* contains only vars of that relid --- otherwise varRelid will affect
* the result, so mustn't cache. Outer join quals might be examined
* with either their join's actual jointype or JOIN_INNER, so we need
- * two cache variables to remember both cases. Note: we assume the
+ * two cache variables to remember both cases. Note: we assume the
* result won't change if we are switching the input relations or
* considering a unique-ified case, so we only need one cache variable
* for all non-JOIN_INNER cases.
@@ -571,8 +571,8 @@ clause_selectivity(PlannerInfo *root,
{
/*
* A Var at the top of a clause must be a bool Var. This is
- * equivalent to the clause reln.attribute = 't', so we
- * compute the selectivity as if that is what we have.
+ * equivalent to the clause reln.attribute = 't', so we compute
+ * the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(root,
BooleanEqualOperator,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index eca0f80b8c..08f1d361ba 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -54,7 +54,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.208 2009/05/09 22:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.209 2009/06/11 14:48:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -126,7 +126,7 @@ static bool adjust_semi_join(PlannerInfo *root, JoinPath *path,
Selectivity *match_count,
bool *indexed_join_quals);
static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
- List *quals);
+ List *quals);
static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
static double relation_byte_size(double tuples, int width);
static double page_size(double tuples, int width);
@@ -946,7 +946,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
*
* Note: this is used for both self-reference and regular CTEs; the
* possible cost differences are below the threshold of what we could
- * estimate accurately anyway. Note that the costs of evaluating the
+ * estimate accurately anyway. Note that the costs of evaluating the
* referenced CTE query are added into the final plan as initplan costs,
* and should NOT be counted here.
*/
@@ -998,9 +998,9 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
/*
* We arbitrarily assume that about 10 recursive iterations will be
- * needed, and that we've managed to get a good fix on the cost and
- * output size of each one of them. These are mighty shaky assumptions
- * but it's hard to see how to do better.
+ * needed, and that we've managed to get a good fix on the cost and output
+ * size of each one of them. These are mighty shaky assumptions but it's
+ * hard to see how to do better.
*/
total_cost += 10 * rterm->total_cost;
total_rows += 10 * rterm->plan_rows;
@@ -1406,8 +1406,8 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = nestloop_inner_path_rows(inner_path);
double ntuples;
- Selectivity outer_match_frac;
- Selectivity match_count;
+ Selectivity outer_match_frac;
+ Selectivity match_count;
bool indexed_join_quals;
if (!enable_nestloop)
@@ -1446,7 +1446,7 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
&indexed_join_quals))
{
double outer_matched_rows;
- Selectivity inner_scan_frac;
+ Selectivity inner_scan_frac;
/*
* SEMI or ANTI join: executor will stop after first match.
@@ -1470,11 +1470,11 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
/*
* For unmatched outer-rel rows, there are two cases. If the inner
- * path is an indexscan using all the joinquals as indexquals, then
- * an unmatched row results in an indexscan returning no rows, which
- * is probably quite cheap. We estimate this case as the same cost
- * to return the first tuple of a nonempty scan. Otherwise, the
- * executor will have to scan the whole inner rel; not so cheap.
+ * path is an indexscan using all the joinquals as indexquals, then an
+ * unmatched row results in an indexscan returning no rows, which is
+ * probably quite cheap. We estimate this case as the same cost to
+ * return the first tuple of a nonempty scan. Otherwise, the executor
+ * will have to scan the whole inner rel; not so cheap.
*/
if (indexed_join_quals)
{
@@ -1569,7 +1569,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
/*
- * Get approx # tuples passing the mergequals. We use approx_tuple_count
+ * Get approx # tuples passing the mergequals. We use approx_tuple_count
* here because we need an estimate done with JOIN_INNER semantics.
*/
mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
@@ -1586,7 +1586,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* estimated approximately as size of merge join output minus size of
* inner relation. Assume that the distinct key values are 1, 2, ..., and
* denote the number of values of each key in the outer relation as m1,
- * m2, ...; in the inner relation, n1, n2, ... Then we have
+ * m2, ...; in the inner relation, n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
@@ -1620,11 +1620,11 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* (unless it's an outer join, in which case the outer side has to be
* scanned all the way anyway). Estimate fraction of the left and right
* inputs that will actually need to be scanned. Likewise, we can
- * estimate the number of rows that will be skipped before the first
- * join pair is found, which should be factored into startup cost.
- * We use only the first (most significant) merge clause for this purpose.
- * Since mergejoinscansel() is a fairly expensive computation, we cache
- * the results in the merge clause RestrictInfo.
+ * estimate the number of rows that will be skipped before the first join
+ * pair is found, which should be factored into startup cost. We use only
+ * the first (most significant) merge clause for this purpose. Since
+ * mergejoinscansel() is a fairly expensive computation, we cache the
+ * results in the merge clause RestrictInfo.
*/
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
{
@@ -1795,8 +1795,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* clauses that are to be applied at the join. (This is pessimistic since
* not all of the quals may get evaluated at each tuple.)
*
- * Note: we could adjust for SEMI/ANTI joins skipping some qual evaluations
- * here, but it's probably not worth the trouble.
+ * Note: we could adjust for SEMI/ANTI joins skipping some qual
+ * evaluations here, but it's probably not worth the trouble.
*/
startup_cost += qp_qual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
@@ -1890,8 +1890,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
int num_skew_mcvs;
double virtualbuckets;
Selectivity innerbucketsize;
- Selectivity outer_match_frac;
- Selectivity match_count;
+ Selectivity outer_match_frac;
+ Selectivity match_count;
ListCell *hcl;
if (!enable_hashjoin)
@@ -1937,11 +1937,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
*/
ExecChooseHashTableSize(inner_path_rows,
inner_path->parent->width,
- true, /* useskew */
+ true, /* useskew */
&numbuckets,
&numbatches,
&num_skew_mcvs);
virtualbuckets = (double) numbuckets *(double) numbatches;
+
/* mark the path with estimated # of batches */
path->num_batches = numbatches;
@@ -2038,7 +2039,7 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
NULL))
{
double outer_matched_rows;
- Selectivity inner_scan_frac;
+ Selectivity inner_scan_frac;
/*
* SEMI or ANTI join: executor will stop after first match.
@@ -2064,12 +2065,12 @@ cost_hashjoin(HashPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
* preferentially hit heavily-populated buckets; instead assume they
* are uncorrelated with the inner distribution and so they see an
* average bucket size of inner_path_rows / virtualbuckets. In the
- * second place, it seems likely that they will have few if any
- * exact hash-code matches and so very few of the tuples in the
- * bucket will actually require eval of the hash quals. We don't
- * have any good way to estimate how many will, but for the moment
- * assume that the effective cost per bucket entry is one-tenth what
- * it is for matchable tuples.
+ * second place, it seems likely that they will have few if any exact
+ * hash-code matches and so very few of the tuples in the bucket will
+ * actually require eval of the hash quals. We don't have any good
+ * way to estimate how many will, but for the moment assume that the
+ * effective cost per bucket entry is one-tenth what it is for
+ * matchable tuples.
*/
run_cost += hash_qual_cost.per_tuple *
(outer_path_rows - outer_matched_rows) *
@@ -2151,18 +2152,17 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
/*
* The per-tuple costs include the cost of evaluating the lefthand
* expressions, plus the cost of probing the hashtable. We already
- * accounted for the lefthand expressions as part of the testexpr,
- * and will also have counted one cpu_operator_cost for each
- * comparison operator. That is probably too low for the probing
- * cost, but it's hard to make a better estimate, so live with it for
- * now.
+ * accounted for the lefthand expressions as part of the testexpr, and
+ * will also have counted one cpu_operator_cost for each comparison
+ * operator. That is probably too low for the probing cost, but it's
+ * hard to make a better estimate, so live with it for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output we will
+ * evaluation. We need to estimate how much of the output we will
* actually need to scan. NOTE: this logic should agree with the
* tuple_fraction estimates used by make_subplan() in
* plan/subselect.c.
@@ -2315,9 +2315,9 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* Vars and Consts are charged zero, and so are boolean operators (AND,
* OR, NOT). Simplistic, but a lot better than no model at all.
*
- * Note that Aggref and WindowFunc nodes are (and should be) treated
- * like Vars --- whatever execution cost they have is absorbed into
- * plan-node-specific costing. As far as expression evaluation is
+ * Note that Aggref and WindowFunc nodes are (and should be) treated like
+ * Vars --- whatever execution cost they have is absorbed into
+ * plan-node-specific costing. As far as expression evaluation is
* concerned they're just like Vars.
*
* Should we try to account for the possibility of short-circuit
@@ -2425,10 +2425,10 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
else if (IsA(node, AlternativeSubPlan))
{
/*
- * Arbitrarily use the first alternative plan for costing. (We should
+ * Arbitrarily use the first alternative plan for costing. (We should
* certainly only include one alternative, and we don't yet have
- * enough information to know which one the executor is most likely
- * to use.)
+ * enough information to know which one the executor is most likely to
+ * use.)
*/
AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
@@ -2495,8 +2495,8 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
*/
/*
- * In an ANTI join, we must ignore clauses that are "pushed down",
- * since those won't affect the match logic. In a SEMI join, we do not
+ * In an ANTI join, we must ignore clauses that are "pushed down", since
+ * those won't affect the match logic. In a SEMI join, we do not
* distinguish joinquals from "pushed down" quals, so just use the whole
* restrictinfo list.
*/
@@ -2550,15 +2550,15 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
/*
* jselec can be interpreted as the fraction of outer-rel rows that have
- * any matches (this is true for both SEMI and ANTI cases). And nselec
- * is the fraction of the Cartesian product that matches. So, the
- * average number of matches for each outer-rel row that has at least
- * one match is nselec * inner_rows / jselec.
+ * any matches (this is true for both SEMI and ANTI cases). And nselec is
+ * the fraction of the Cartesian product that matches. So, the average
+ * number of matches for each outer-rel row that has at least one match is
+ * nselec * inner_rows / jselec.
*
* Note: it is correct to use the inner rel's "rows" count here, not
* PATH_ROWS(), even if the inner path under consideration is an inner
- * indexscan. This is because we have included all the join clauses
- * in the selectivity estimate, even ones used in an inner indexscan.
+ * indexscan. This is because we have included all the join clauses in
+ * the selectivity estimate, even ones used in an inner indexscan.
*/
if (jselec > 0) /* protect against zero divide */
{
@@ -2573,10 +2573,9 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
*match_count = avgmatch;
/*
- * If requested, check whether the inner path uses all the joinquals
- * as indexquals. (If that's true, we can assume that an unmatched
- * outer tuple is cheap to process, whereas otherwise it's probably
- * expensive.)
+ * If requested, check whether the inner path uses all the joinquals as
+ * indexquals. (If that's true, we can assume that an unmatched outer
+ * tuple is cheap to process, whereas otherwise it's probably expensive.)
*/
if (indexed_join_quals)
{
@@ -2906,8 +2905,8 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
if (rte->self_reference)
{
/*
- * In a self-reference, arbitrarily assume the average worktable
- * size is about 10 times the nonrecursive term's size.
+ * In a self-reference, arbitrarily assume the average worktable size
+ * is about 10 times the nonrecursive term's size.
*/
rel->tuples = 10 * cteplan->plan_rows;
}
@@ -2959,7 +2958,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
ndx = var->varattno - rel->min_attr;
/*
- * The width probably hasn't been cached yet, but may as well check
+ * The width probably hasn't been cached yet, but may as well
+ * check
*/
if (rel->attr_widths[ndx] > 0)
{
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index 17c9539679..17d24e400f 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.18 2009/04/19 19:46:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.19 2009/06/11 14:48:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -438,9 +438,9 @@ get_eclass_for_sort_expr(PlannerInfo *root,
/*
* add_eq_member doesn't check for volatile functions, set-returning
- * functions, aggregates, or window functions, but such could appear
- * in sort expressions; so we have to check whether its const-marking
- * was correct.
+ * functions, aggregates, or window functions, but such could appear in
+ * sort expressions; so we have to check whether its const-marking was
+ * correct.
*/
if (newec->ec_has_const)
{
@@ -563,11 +563,11 @@ generate_base_implied_equalities_const(PlannerInfo *root,
ListCell *lc;
/*
- * In the trivial case where we just had one "var = const" clause,
- * push the original clause back into the main planner machinery. There
- * is nothing to be gained by doing it differently, and we save the
- * effort to re-build and re-analyze an equality clause that will be
- * exactly equivalent to the old one.
+ * In the trivial case where we just had one "var = const" clause, push
+ * the original clause back into the main planner machinery. There is
+ * nothing to be gained by doing it differently, and we save the effort to
+ * re-build and re-analyze an equality clause that will be exactly
+ * equivalent to the old one.
*/
if (list_length(ec->ec_members) == 2 &&
list_length(ec->ec_sources) == 1)
@@ -1166,7 +1166,7 @@ create_join_clause(PlannerInfo *root,
*
* Outer join clauses that are marked outerjoin_delayed are special: this
* condition means that one or both VARs might go to null due to a lower
- * outer join. We can still push a constant through the clause, but only
+ * outer join. We can still push a constant through the clause, but only
* if its operator is strict; and we *have to* throw the clause back into
* regular joinclause processing. By keeping the strict join clause,
* we ensure that any null-extended rows that are mistakenly generated due
@@ -1816,11 +1816,11 @@ have_relevant_eclass_joinclause(PlannerInfo *root,
* path to look through ec_sources. Checking the members anyway is OK
* as a possibly-overoptimistic heuristic.
*
- * We don't test ec_has_const either, even though a const eclass
- * won't generate real join clauses. This is because if we had
- * "WHERE a.x = b.y and a.x = 42", it is worth considering a join
- * between a and b, since the join result is likely to be small even
- * though it'll end up being an unqualified nestloop.
+ * We don't test ec_has_const either, even though a const eclass won't
+ * generate real join clauses. This is because if we had "WHERE a.x =
+ * b.y and a.x = 42", it is worth considering a join between a and b,
+ * since the join result is likely to be small even though it'll end
+ * up being an unqualified nestloop.
*/
/* Needn't scan if it couldn't contain members from each rel */
@@ -1890,11 +1890,11 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
* path to look through ec_sources. Checking the members anyway is OK
* as a possibly-overoptimistic heuristic.
*
- * We don't test ec_has_const either, even though a const eclass
- * won't generate real join clauses. This is because if we had
- * "WHERE a.x = b.y and a.x = 42", it is worth considering a join
- * between a and b, since the join result is likely to be small even
- * though it'll end up being an unqualified nestloop.
+ * We don't test ec_has_const either, even though a const eclass won't
+ * generate real join clauses. This is because if we had "WHERE a.x =
+ * b.y and a.x = 42", it is worth considering a join between a and b,
+ * since the join result is likely to be small even though it'll end
+ * up being an unqualified nestloop.
*/
/* Needn't scan if it couldn't contain members from each rel */
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index db271e4f1e..b3f96eb773 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.239 2009/04/16 20:42:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.240 2009/06/11 14:48:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -179,14 +179,14 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
true, NULL, SAOP_FORBID, ST_ANYSCAN);
/*
- * Submit all the ones that can form plain IndexScan plans to add_path.
- * (A plain IndexPath always represents a plain IndexScan plan; however
- * some of the indexes might support only bitmap scans, and those we
- * mustn't submit to add_path here.) Also, pick out the ones that might
- * be useful as bitmap scans. For that, we must discard indexes that
- * don't support bitmap scans, and we also are only interested in paths
- * that have some selectivity; we should discard anything that was
- * generated solely for ordering purposes.
+ * Submit all the ones that can form plain IndexScan plans to add_path. (A
+ * plain IndexPath always represents a plain IndexScan plan; however some
+ * of the indexes might support only bitmap scans, and those we mustn't
+ * submit to add_path here.) Also, pick out the ones that might be useful
+ * as bitmap scans. For that, we must discard indexes that don't support
+ * bitmap scans, and we also are only interested in paths that have some
+ * selectivity; we should discard anything that was generated solely for
+ * ordering purposes.
*/
bitindexpaths = NIL;
foreach(l, indexpaths)
@@ -1628,13 +1628,13 @@ eclass_matches_any_index(EquivalenceClass *ec, EquivalenceMember *em,
/*
* If it's a btree index, we can reject it if its opfamily isn't
- * compatible with the EC, since no clause generated from the
- * EC could be used with the index. For non-btree indexes,
- * we can't easily tell whether clauses generated from the EC
- * could be used with the index, so only check for expression
- * match. This might mean we return "true" for a useless index,
- * but that will just cause some wasted planner cycles; it's
- * better than ignoring useful indexes.
+ * compatible with the EC, since no clause generated from the EC
+ * could be used with the index. For non-btree indexes, we can't
+ * easily tell whether clauses generated from the EC could be used
+ * with the index, so only check for expression match. This might
+ * mean we return "true" for a useless index, but that will just
+ * cause some wasted planner cycles; it's better than ignoring
+ * useful indexes.
*/
if ((index->relam != BTREE_AM_OID ||
list_member_oid(ec->ec_opfamilies, curFamily)) &&
@@ -2223,9 +2223,9 @@ match_special_index_operator(Expr *clause, Oid opfamily,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * Note: actually, in the Pattern_Prefix_Exact case, we only need "="
- * so a hash index would work. Currently it doesn't seem worth checking
- * for that, however.
+ * Note: actually, in the Pattern_Prefix_Exact case, we only need "=" so a
+ * hash index would work. Currently it doesn't seem worth checking for
+ * that, however.
*
* We insist on the opfamily being the specific one we expect, else we'd
* do the wrong thing if someone were to make a reverse-sort opfamily with
@@ -2460,7 +2460,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily)
/*
* LIKE and regex operators are not members of any btree index opfamily,
* but they can be members of opfamilies for more exotic index types such
- * as GIN. Therefore, we should only do expansion if the operator is
+ * as GIN. Therefore, we should only do expansion if the operator is
* actually not in the opfamily. But checking that requires a syscache
* lookup, so it's best to first see if the operator is one we are
* interested in.
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index e172c43c3c..bc0831933e 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.121 2009/02/05 01:24:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.122 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1022,14 +1022,14 @@ select_mergejoin_clauses(PlannerInfo *root,
* Note: it would be bad if this condition failed for an otherwise
* mergejoinable FULL JOIN clause, since that would result in
* undesirable planner failure. I believe that is not possible
- * however; a variable involved in a full join could only appear
- * in below_outer_join eclasses, which aren't considered redundant.
+ * however; a variable involved in a full join could only appear in
+ * below_outer_join eclasses, which aren't considered redundant.
*
- * This case *can* happen for left/right join clauses: the
- * outer-side variable could be equated to a constant. Because we
- * will propagate that constant across the join clause, the loss of
- * ability to do a mergejoin is not really all that big a deal, and
- * so it's not clear that improving this is important.
+ * This case *can* happen for left/right join clauses: the outer-side
+ * variable could be equated to a constant. Because we will propagate
+ * that constant across the join clause, the loss of ability to do a
+ * mergejoin is not really all that big a deal, and so it's not clear
+ * that improving this is important.
*/
cache_mergeclause_eclasses(root, restrictinfo);
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 7c38607db6..9e6f57f4c9 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.99 2009/02/27 22:41:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.100 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -353,7 +353,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
ListCell *l;
/*
- * Ensure output params are set on failure return. This is just to
+ * Ensure output params are set on failure return. This is just to
* suppress uninitialized-variable warnings from overly anal compilers.
*/
*sjinfo_p = NULL;
@@ -361,7 +361,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we have any special joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the join info
+ * in any case we have to determine its join type. Scan the join info
* list for conflicts.
*/
match_sjinfo = NULL;
@@ -569,7 +569,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
/*
* If it's a plain inner join, then we won't have found anything in
- * join_info_list. Make up a SpecialJoinInfo so that selectivity
+ * join_info_list. Make up a SpecialJoinInfo so that selectivity
* estimation functions will know what's being joined.
*/
if (sjinfo == NULL)
@@ -595,8 +595,8 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
&restrictlist);
/*
- * If we've already proven this join is empty, we needn't consider
- * any more paths for it.
+ * If we've already proven this join is empty, we needn't consider any
+ * more paths for it.
*/
if (is_dummy_rel(joinrel))
{
@@ -605,19 +605,19 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
}
/*
- * Consider paths using each rel as both outer and inner. Depending
- * on the join type, a provably empty outer or inner rel might mean
- * the join is provably empty too; in which case throw away any
- * previously computed paths and mark the join as dummy. (We do it
- * this way since it's conceivable that dummy-ness of a multi-element
- * join might only be noticeable for certain construction paths.)
+ * Consider paths using each rel as both outer and inner. Depending on
+ * the join type, a provably empty outer or inner rel might mean the join
+ * is provably empty too; in which case throw away any previously computed
+ * paths and mark the join as dummy. (We do it this way since it's
+ * conceivable that dummy-ness of a multi-element join might only be
+ * noticeable for certain construction paths.)
*
* Also, a provably constant-false join restriction typically means that
- * we can skip evaluating one or both sides of the join. We do this
- * by marking the appropriate rel as dummy.
+ * we can skip evaluating one or both sides of the join. We do this by
+ * marking the appropriate rel as dummy.
*
- * We need only consider the jointypes that appear in join_info_list,
- * plus JOIN_INNER.
+ * We need only consider the jointypes that appear in join_info_list, plus
+ * JOIN_INNER.
*/
switch (sjinfo->jointype)
{
@@ -665,6 +665,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
restrictlist);
break;
case JOIN_SEMI:
+
/*
* We might have a normal semijoin, or a case where we don't have
* enough rels to do the semijoin but can unique-ify the RHS and
@@ -971,12 +972,12 @@ restriction_is_constant_false(List *restrictlist)
*/
foreach(lc, restrictlist)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
Assert(IsA(rinfo, RestrictInfo));
if (rinfo->clause && IsA(rinfo->clause, Const))
{
- Const *con = (Const *) rinfo->clause;
+ Const *con = (Const *) rinfo->clause;
/* constant NULL is as good as constant FALSE for our purposes */
if (con->constisnull)
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index c84591fe0c..6a644f9a0f 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.89 2009/04/16 20:42:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.90 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,10 +96,10 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* enforced at the relation scan level.
*
* We must also ignore clauses that are marked !is_pushed_down (ie they
- * are themselves outer-join clauses). It would be safe to extract an
+ * are themselves outer-join clauses). It would be safe to extract an
* index condition from such a clause if we are within the nullable rather
* than the non-nullable side of its join, but we haven't got enough
- * context here to tell which applies. OR clauses in outer-join quals
+ * context here to tell which applies. OR clauses in outer-join quals
* aren't exactly common, so we'll let that case go unoptimized for now.
*/
foreach(i, rel->joininfo)
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 2aabd880aa..ab07a0dbea 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.259 2009/05/09 22:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.260 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,9 +63,9 @@ static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path
static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path,
Plan *outer_plan, Plan *inner_plan);
static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path,
@@ -98,9 +98,9 @@ static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
Index scanrelid, List *values_lists);
static CteScan *make_ctescan(List *qptlist, List *qpqual,
- Index scanrelid, int ctePlanId, int cteParam);
+ Index scanrelid, int ctePlanId, int cteParam);
static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
- Index scanrelid, int wtParam);
+ Index scanrelid, int wtParam);
static BitmapAnd *make_bitmap_and(List *bitmapplans);
static BitmapOr *make_bitmap_or(List *bitmapplans);
static NestLoop *make_nestloop(List *tlist,
@@ -113,10 +113,10 @@ static HashJoin *make_hashjoin(List *tlist,
Plan *lefttree, Plan *righttree,
JoinType jointype);
static Hash *make_hash(Plan *lefttree,
- Oid skewTable,
- AttrNumber skewColumn,
- Oid skewColType,
- int32 skewColTypmod);
+ Oid skewTable,
+ AttrNumber skewColumn,
+ Oid skewColType,
+ int32 skewColTypmod);
static MergeJoin *make_mergejoin(List *tlist,
List *joinclauses, List *otherclauses,
List *mergeclauses,
@@ -329,7 +329,7 @@ build_relation_tlist(RelOptInfo *rel)
foreach(v, rel->reltargetlist)
{
/* Do we really need to copy here? Not sure */
- Node *node = (Node *) copyObject(lfirst(v));
+ Node *node = (Node *) copyObject(lfirst(v));
tlist = lappend(tlist, makeTargetEntry((Expr *) node,
resno,
@@ -657,20 +657,20 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
return subplan;
/*
- * As constructed, the subplan has a "flat" tlist containing just the
- * Vars needed here and at upper levels. The values we are supposed
- * to unique-ify may be expressions in these variables. We have to
- * add any such expressions to the subplan's tlist.
+ * As constructed, the subplan has a "flat" tlist containing just the Vars
+ * needed here and at upper levels. The values we are supposed to
+ * unique-ify may be expressions in these variables. We have to add any
+ * such expressions to the subplan's tlist.
*
- * The subplan may have a "physical" tlist if it is a simple scan plan.
- * If we're going to sort, this should be reduced to the regular tlist,
- * so that we don't sort more data than we need to. For hashing, the
- * tlist should be left as-is if we don't need to add any expressions;
- * but if we do have to add expressions, then a projection step will be
- * needed at runtime anyway, so we may as well remove unneeded items.
- * Therefore newtlist starts from build_relation_tlist() not just a
- * copy of the subplan's tlist; and we don't install it into the subplan
- * unless we are sorting or stuff has to be added.
+ * The subplan may have a "physical" tlist if it is a simple scan plan. If
+ * we're going to sort, this should be reduced to the regular tlist, so
+ * that we don't sort more data than we need to. For hashing, the tlist
+ * should be left as-is if we don't need to add any expressions; but if we
+ * do have to add expressions, then a projection step will be needed at
+ * runtime anyway, so we may as well remove unneeded items. Therefore
+ * newtlist starts from build_relation_tlist() not just a copy of the
+ * subplan's tlist; and we don't install it into the subplan unless we are
+ * sorting or stuff has to be added.
*/
in_operators = best_path->in_operators;
uniq_exprs = best_path->uniq_exprs;
@@ -1063,10 +1063,10 @@ create_bitmap_scan_plan(PlannerInfo *root,
qpqual = order_qual_clauses(root, qpqual);
/*
- * When dealing with special operators, we will at this point
- * have duplicate clauses in qpqual and bitmapqualorig. We may as well
- * drop 'em from bitmapqualorig, since there's no point in making the
- * tests twice.
+ * When dealing with special operators, we will at this point have
+ * duplicate clauses in qpqual and bitmapqualorig. We may as well drop
+ * 'em from bitmapqualorig, since there's no point in making the tests
+ * twice.
*/
bitmapqualorig = list_difference_ptr(bitmapqualorig, qpqual);
@@ -1414,10 +1414,10 @@ static CteScan *
create_ctescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses)
{
- CteScan *scan_plan;
+ CteScan *scan_plan;
Index scan_relid = best_path->parent->relid;
RangeTblEntry *rte;
- SubPlan *ctesplan = NULL;
+ SubPlan *ctesplan = NULL;
int plan_id;
int cte_param_id;
PlannerInfo *cteroot;
@@ -1441,6 +1441,7 @@ create_ctescan_plan(PlannerInfo *root, Path *best_path,
if (!cteroot) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
}
+
/*
* Note: cte_plan_ids can be shorter than cteList, if we are still working
* on planning the CTEs (ie, this is a side-reference from another CTE).
@@ -1471,8 +1472,8 @@ create_ctescan_plan(PlannerInfo *root, Path *best_path,
elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
/*
- * We need the CTE param ID, which is the sole member of the
- * SubPlan's setParam list.
+ * We need the CTE param ID, which is the sole member of the SubPlan's
+ * setParam list.
*/
cte_param_id = linitial_int(ctesplan->setParam);
@@ -1512,12 +1513,12 @@ create_worktablescan_plan(PlannerInfo *root, Path *best_path,
/*
* We need to find the worktable param ID, which is in the plan level
- * that's processing the recursive UNION, which is one level *below*
- * where the CTE comes from.
+ * that's processing the recursive UNION, which is one level *below* where
+ * the CTE comes from.
*/
levelsup = rte->ctelevelsup;
if (levelsup == 0) /* shouldn't happen */
- elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
+ elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
levelsup--;
cteroot = root;
while (levelsup-- > 0)
@@ -1526,7 +1527,7 @@ create_worktablescan_plan(PlannerInfo *root, Path *best_path,
if (!cteroot) /* shouldn't happen */
elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
}
- if (cteroot->wt_param_id < 0) /* shouldn't happen */
+ if (cteroot->wt_param_id < 0) /* shouldn't happen */
elog(ERROR, "could not find param ID for CTE \"%s\"", rte->ctename);
/* Sort clauses into best execution order */
@@ -1563,10 +1564,9 @@ create_nestloop_plan(PlannerInfo *root,
NestLoop *join_plan;
/*
- * If the inner path is a nestloop inner indexscan, it might be using
- * some of the join quals as index quals, in which case we don't have
- * to check them again at the join node. Remove any join quals that
- * are redundant.
+ * If the inner path is a nestloop inner indexscan, it might be using some
+ * of the join quals as index quals, in which case we don't have to check
+ * them again at the join node. Remove any join quals that are redundant.
*/
joinrestrictclauses =
select_nonredundant_join_clauses(root,
@@ -1869,12 +1869,12 @@ create_hashjoin_plan(PlannerInfo *root,
disuse_physical_tlist(outer_plan, best_path->jpath.outerjoinpath);
/*
- * If there is a single join clause and we can identify the outer
- * variable as a simple column reference, supply its identity for
- * possible use in skew optimization. (Note: in principle we could
- * do skew optimization with multiple join clauses, but we'd have to
- * be able to determine the most common combinations of outer values,
- * which we don't currently have enough stats for.)
+ * If there is a single join clause and we can identify the outer variable
+ * as a simple column reference, supply its identity for possible use in
+ * skew optimization. (Note: in principle we could do skew optimization
+ * with multiple join clauses, but we'd have to be able to determine the
+ * most common combinations of outer values, which we don't currently have
+ * enough stats for.)
*/
if (list_length(hashclauses) == 1)
{
@@ -1887,7 +1887,7 @@ create_hashjoin_plan(PlannerInfo *root,
node = (Node *) ((RelabelType *) node)->arg;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
RangeTblEntry *rte;
rte = root->simple_rte_array[var->varno];
@@ -2029,8 +2029,8 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path)
/* Never need to commute... */
/*
- * Determine which index attribute this is and change the
- * indexkey operand as needed.
+ * Determine which index attribute this is and change the indexkey
+ * operand as needed.
*/
linitial(saop->args) = fix_indexqual_operand(linitial(saop->args),
index);
@@ -2506,7 +2506,7 @@ make_ctescan(List *qptlist,
int ctePlanId,
int cteParam)
{
- CteScan *node = makeNode(CteScan);
+ CteScan *node = makeNode(CteScan);
Plan *plan = &node->scan.plan;
/* cost should be inserted by caller */
@@ -3282,7 +3282,7 @@ make_windowagg(PlannerInfo *root, List *tlist,
{
WindowAgg *node = makeNode(WindowAgg);
Plan *plan = &node->plan;
- Path windowagg_path; /* dummy for result of cost_windowagg */
+ Path windowagg_path; /* dummy for result of cost_windowagg */
QualCost qual_cost;
node->winref = winref;
@@ -3294,7 +3294,7 @@ make_windowagg(PlannerInfo *root, List *tlist,
node->ordOperators = ordOperators;
node->frameOptions = frameOptions;
- copy_plan_costsize(plan, lefttree); /* only care about copying size */
+ copy_plan_costsize(plan, lefttree); /* only care about copying size */
cost_windowagg(&windowagg_path, root,
numWindowFuncs, partNumCols, ordNumCols,
lefttree->startup_cost,
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 56100ba6cc..8a189d4443 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.153 2009/05/07 20:13:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.154 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -332,7 +332,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
*/
foreach(l, (List *) f->quals)
{
- Node *qual = (Node *) lfirst(l);
+ Node *qual = (Node *) lfirst(l);
distribute_qual_to_rels(root, qual,
false, below_outer_join, JOIN_INNER,
@@ -430,8 +430,8 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
* we mustn't add it to join_info_list just yet, because we don't want
* distribute_qual_to_rels to think it is an outer join below us.
*
- * Semijoins are a bit of a hybrid: we build a SpecialJoinInfo,
- * but we want ojscope = NULL for distribute_qual_to_rels.
+ * Semijoins are a bit of a hybrid: we build a SpecialJoinInfo, but we
+ * want ojscope = NULL for distribute_qual_to_rels.
*/
if (j->jointype != JOIN_INNER)
{
@@ -455,7 +455,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
/* Process the qual clauses */
foreach(l, (List *) j->quals)
{
- Node *qual = (Node *) lfirst(l);
+ Node *qual = (Node *) lfirst(l);
distribute_qual_to_rels(root, qual,
false, below_outer_join, j->jointype,
@@ -629,9 +629,9 @@ make_outerjoininfo(PlannerInfo *root,
* min_lefthand. (We must use its full syntactic relset, not just its
* min_lefthand + min_righthand. This is because there might be other
* OJs below this one that this one can commute with, but we cannot
- * commute with them if we don't with this one.) Also, if the
- * current join is an antijoin, we must preserve ordering regardless
- * of strictness.
+ * commute with them if we don't with this one.) Also, if the current
+ * join is an antijoin, we must preserve ordering regardless of
+ * strictness.
*
* Note: I believe we have to insist on being strict for at least one
* rel in the lower OJ's min_righthand, not its whole syn_righthand.
@@ -882,7 +882,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* We can't use such a clause to deduce equivalence (the left and
* right sides might be unequal above the join because one of them has
* gone to NULL) ... but we might be able to use it for more limited
- * deductions, if it is mergejoinable. So consider adding it to the
+ * deductions, if it is mergejoinable. So consider adding it to the
* lists of set-aside outer-join clauses.
*/
is_pushed_down = false;
@@ -937,8 +937,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
/*
* It's possible that this is an IS NULL clause that's redundant
* with a lower antijoin; if so we can just discard it. We need
- * not test in any of the other cases, because this will only
- * be possible for pushed-down, delayed clauses.
+ * not test in any of the other cases, because this will only be
+ * possible for pushed-down, delayed clauses.
*/
if (check_redundant_nullability_qual(root, clause))
return;
@@ -1122,8 +1122,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
*/
static bool
check_outerjoin_delay(PlannerInfo *root,
- Relids *relids_p, /* in/out parameter */
- Relids *nullable_relids_p, /* output parameter */
+ Relids *relids_p, /* in/out parameter */
+ Relids *nullable_relids_p, /* output parameter */
bool is_pushed_down)
{
Relids relids;
@@ -1215,8 +1215,8 @@ check_redundant_nullability_qual(PlannerInfo *root, Node *clause)
forced_null_rel = forced_null_var->varno;
/*
- * If the Var comes from the nullable side of a lower antijoin, the
- * IS NULL condition is necessarily true.
+ * If the Var comes from the nullable side of a lower antijoin, the IS
+ * NULL condition is necessarily true.
*/
foreach(lc, root->join_info_list)
{
@@ -1393,7 +1393,7 @@ build_implied_join_equality(Oid opno,
true, /* is_pushed_down */
false, /* outerjoin_delayed */
false, /* pseudoconstant */
- qualscope, /* required_relids */
+ qualscope, /* required_relids */
NULL); /* nullable_relids */
/* Set mergejoinability info always, and hashjoinability if enabled */
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 0cdcb74dfe..6bdbd2a913 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.45 2009/01/01 17:23:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.46 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,8 +96,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
* Reject unoptimizable cases.
*
* We don't handle GROUP BY or windowing, because our current
- * implementations of grouping require looking at all the rows anyway,
- * and so there's not much point in optimizing MIN/MAX.
+ * implementations of grouping require looking at all the rows anyway, and
+ * so there's not much point in optimizing MIN/MAX.
*/
if (parse->groupClause || parse->hasWindowFuncs)
return NULL;
@@ -189,12 +189,12 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
&aggs_list);
/*
- * We have to replace Aggrefs with Params in equivalence classes too,
- * else ORDER BY or DISTINCT on an optimized aggregate will fail.
+ * We have to replace Aggrefs with Params in equivalence classes too, else
+ * ORDER BY or DISTINCT on an optimized aggregate will fail.
*
- * Note: at some point it might become necessary to mutate other
- * data structures too, such as the query's sortClause or distinctClause.
- * Right now, those won't be examined after this point.
+ * Note: at some point it might become necessary to mutate other data
+ * structures too, such as the query's sortClause or distinctClause. Right
+ * now, those won't be examined after this point.
*/
mutate_eclass_expressions(root,
replace_aggs_with_params_mutator,
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index c87da7a071..0b75d150ab 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.114 2009/01/01 17:23:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.115 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -289,13 +289,13 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* If both GROUP BY and ORDER BY are specified, we will need two
* levels of sort --- and, therefore, certainly need to read all the
- * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if
- * we have both DISTINCT and GROUP BY, or if we have a window
+ * tuples --- unless ORDER BY is a subset of GROUP BY. Likewise if we
+ * have both DISTINCT and GROUP BY, or if we have a window
* specification not compatible with the GROUP BY.
*/
if (!pathkeys_contained_in(root->sort_pathkeys, root->group_pathkeys) ||
!pathkeys_contained_in(root->distinct_pathkeys, root->group_pathkeys) ||
- !pathkeys_contained_in(root->window_pathkeys, root->group_pathkeys))
+ !pathkeys_contained_in(root->window_pathkeys, root->group_pathkeys))
tuple_fraction = 0.0;
}
else if (parse->hasAggs || root->hasHavingQual)
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 7ad15d9da2..3f344b3a14 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.255 2009/04/28 21:31:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.256 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@
/* GUC parameter */
-double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
+double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
/* Hook for plugins to get control in planner() */
planner_hook_type planner_hook = NULL;
@@ -84,18 +84,18 @@ static void locate_grouping_columns(PlannerInfo *root,
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
static List *add_volatile_sort_exprs(List *window_tlist, List *tlist,
- List *activeWindows);
+ List *activeWindows);
static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
- List *tlist, bool canonicalize);
+ List *tlist, bool canonicalize);
static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
- List *tlist,
- int numSortCols, AttrNumber *sortColIdx,
- int *partNumCols,
- AttrNumber **partColIdx,
- Oid **partOperators,
- int *ordNumCols,
- AttrNumber **ordColIdx,
- Oid **ordOperators);
+ List *tlist,
+ int numSortCols, AttrNumber *sortColIdx,
+ int *partNumCols,
+ AttrNumber **partColIdx,
+ Oid **partOperators,
+ int *ordNumCols,
+ AttrNumber **ordColIdx,
+ Oid **ordOperators);
/*****************************************************************************
@@ -171,10 +171,9 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
tuple_fraction = cursor_tuple_fraction;
/*
- * We document cursor_tuple_fraction as simply being a fraction,
- * which means the edge cases 0 and 1 have to be treated specially
- * here. We convert 1 to 0 ("all the tuples") and 0 to a very small
- * fraction.
+ * We document cursor_tuple_fraction as simply being a fraction, which
+ * means the edge cases 0 and 1 have to be treated specially here. We
+ * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
*/
if (tuple_fraction >= 1.0)
tuple_fraction = 0.0;
@@ -297,8 +296,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
root->non_recursive_plan = NULL;
/*
- * If there is a WITH list, process each WITH query and build an
- * initplan SubPlan structure for it.
+ * If there is a WITH list, process each WITH query and build an initplan
+ * SubPlan structure for it.
*/
if (parse->cteList)
SS_process_ctes(root);
@@ -313,8 +312,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
pull_up_sublinks(root);
/*
- * Scan the rangetable for set-returning functions, and inline them
- * if possible (producing subqueries that might get pulled up next).
+ * Scan the rangetable for set-returning functions, and inline them if
+ * possible (producing subqueries that might get pulled up next).
* Recursion issues here are handled in the same way as for SubLinks.
*/
inline_set_returning_functions(root);
@@ -329,8 +328,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
/*
* Detect whether any rangetable entries are RTE_JOIN kind; if not, we can
* avoid the expense of doing flatten_join_alias_vars(). Also check for
- * outer joins --- if none, we can skip reduce_outer_joins().
- * This must be done after we have done pull_up_subqueries, of course.
+ * outer joins --- if none, we can skip reduce_outer_joins(). This must be
+ * done after we have done pull_up_subqueries, of course.
*/
root->hasJoinRTEs = false;
hasOuterJoins = false;
@@ -528,7 +527,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* Simplify constant expressions.
*
* Note: one essential effect here is to insert the current actual values
- * of any default arguments for functions. To ensure that happens, we
+ * of any default arguments for functions. To ensure that happens, we
* *must* process all expressions here. Previous PG versions sometimes
* skipped const-simplification if it didn't seem worth the trouble, but
* we can't do that anymore.
@@ -797,8 +796,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If there's a top-level ORDER BY, assume we have to fetch all the
* tuples. This might be too simplistic given all the hackery below
- * to possibly avoid the sort; but the odds of accurate estimates
- * here are pretty low anyway.
+ * to possibly avoid the sort; but the odds of accurate estimates here
+ * are pretty low anyway.
*/
if (parse->sortClause)
tuple_fraction = 0.0;
@@ -908,9 +907,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Calculate pathkeys that represent grouping/ordering requirements.
* Stash them in PlannerInfo so that query_planner can canonicalize
- * them after EquivalenceClasses have been formed. The sortClause
- * is certainly sort-able, but GROUP BY and DISTINCT might not be,
- * in which case we just leave their pathkeys empty.
+ * them after EquivalenceClasses have been formed. The sortClause is
+ * certainly sort-able, but GROUP BY and DISTINCT might not be, in
+ * which case we just leave their pathkeys empty.
*/
if (parse->groupClause &&
grouping_is_sortable(parse->groupClause))
@@ -982,7 +981,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a
* superset of GROUP BY, it would be tempting to request sort by ORDER
* BY --- but that might just leave us failing to exploit an available
- * sort order at all. Needs more thought. The choice for DISTINCT
+ * sort order at all. Needs more thought. The choice for DISTINCT
* versus ORDER BY is much easier, since we know that the parser
* ensured that one is a superset of the other.
*/
@@ -1012,12 +1011,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*/
if (parse->groupClause)
{
- bool can_hash;
- bool can_sort;
+ bool can_hash;
+ bool can_sort;
/*
* Executor doesn't support hashed aggregation with DISTINCT
- * aggregates. (Doing so would imply storing *all* the input
+ * aggregates. (Doing so would imply storing *all* the input
* values in the hash table, which seems like a certain loser.)
*/
can_hash = (agg_counts.numDistinctAggs == 0 &&
@@ -1079,16 +1078,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Normal case --- create a plan according to query_planner's
* results.
*/
- bool need_sort_for_grouping = false;
+ bool need_sort_for_grouping = false;
result_plan = create_plan(root, best_path);
current_pathkeys = best_path->pathkeys;
/* Detect if we'll need an explicit sort for grouping */
if (parse->groupClause && !use_hashed_grouping &&
- !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
+ !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
{
need_sort_for_grouping = true;
+
/*
* Always override query_planner's tlist, so that we don't
* sort useless data from a "physical" tlist.
@@ -1275,9 +1275,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
} /* end of non-minmax-aggregate case */
/*
- * Since each window function could require a different sort order,
- * we stack up a WindowAgg node for each window, with sort steps
- * between them as needed.
+ * Since each window function could require a different sort order, we
+ * stack up a WindowAgg node for each window, with sort steps between
+ * them as needed.
*/
if (activeWindows)
{
@@ -1286,12 +1286,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* If the top-level plan node is one that cannot do expression
- * evaluation, we must insert a Result node to project the
- * desired tlist. (In some cases this might not really be
- * required, but it's not worth trying to avoid it.) Note that
- * on second and subsequent passes through the following loop,
- * the top-level node will be a WindowAgg which we know can
- * project; so we only need to check once.
+ * evaluation, we must insert a Result node to project the desired
+ * tlist. (In some cases this might not really be required, but
+ * it's not worth trying to avoid it.) Note that on second and
+ * subsequent passes through the following loop, the top-level
+ * node will be a WindowAgg which we know can project; so we only
+ * need to check once.
*/
if (!is_projection_capable_plan(result_plan))
{
@@ -1302,21 +1302,20 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
/*
- * The "base" targetlist for all steps of the windowing process
- * is a flat tlist of all Vars and Aggs needed in the result.
- * (In some cases we wouldn't need to propagate all of these
- * all the way to the top, since they might only be needed as
- * inputs to WindowFuncs. It's probably not worth trying to
- * optimize that though.) We also need any volatile sort
- * expressions, because make_sort_from_pathkeys won't add those
- * on its own, and anyway we want them evaluated only once at
- * the bottom of the stack. As we climb up the stack, we add
- * outputs for the WindowFuncs computed at each level. Also,
- * each input tlist has to present all the columns needed to
- * sort the data for the next WindowAgg step. That's handled
- * internally by make_sort_from_pathkeys, but we need the
- * copyObject steps here to ensure that each plan node has
- * a separately modifiable tlist.
+ * The "base" targetlist for all steps of the windowing process is
+ * a flat tlist of all Vars and Aggs needed in the result. (In
+ * some cases we wouldn't need to propagate all of these all the
+ * way to the top, since they might only be needed as inputs to
+ * WindowFuncs. It's probably not worth trying to optimize that
+ * though.) We also need any volatile sort expressions, because
+ * make_sort_from_pathkeys won't add those on its own, and anyway
+ * we want them evaluated only once at the bottom of the stack.
+ * As we climb up the stack, we add outputs for the WindowFuncs
+ * computed at each level. Also, each input tlist has to present
+ * all the columns needed to sort the data for the next WindowAgg
+ * step. That's handled internally by make_sort_from_pathkeys,
+ * but we need the copyObject steps here to ensure that each plan
+ * node has a separately modifiable tlist.
*/
window_tlist = flatten_tlist(tlist);
if (parse->hasAggs)
@@ -1392,7 +1391,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
{
/* Add the current WindowFuncs to the running tlist */
window_tlist = add_to_flat_tlist(window_tlist,
- wflists->windowFuncs[wc->winref]);
+ wflists->windowFuncs[wc->winref]);
}
else
{
@@ -1404,7 +1403,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan = (Plan *)
make_windowagg(root,
(List *) copyObject(window_tlist),
- list_length(wflists->windowFuncs[wc->winref]),
+ list_length(wflists->windowFuncs[wc->winref]),
wc->winref,
partNumCols,
partColIdx,
@@ -1423,11 +1422,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*/
if (parse->distinctClause)
{
- double dNumDistinctRows;
- long numDistinctRows;
- bool use_hashed_distinct;
- bool can_sort;
- bool can_hash;
+ double dNumDistinctRows;
+ long numDistinctRows;
+ bool use_hashed_distinct;
+ bool can_sort;
+ bool can_hash;
/*
* If there was grouping or aggregation, use the current number of
@@ -1472,7 +1471,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("could not implement DISTINCT"),
errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
- use_hashed_distinct = false; /* keep compiler quiet */
+ use_hashed_distinct = false; /* keep compiler quiet */
}
}
@@ -1483,10 +1482,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan->targetlist,
NIL,
AGG_HASHED,
- list_length(parse->distinctClause),
- extract_grouping_cols(parse->distinctClause,
- result_plan->targetlist),
- extract_grouping_ops(parse->distinctClause),
+ list_length(parse->distinctClause),
+ extract_grouping_cols(parse->distinctClause,
+ result_plan->targetlist),
+ extract_grouping_ops(parse->distinctClause),
numDistinctRows,
0,
result_plan);
@@ -1502,11 +1501,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* rigorous of DISTINCT and ORDER BY, to avoid a second sort
* below. However, for regular DISTINCT, don't sort now if we
* don't have to --- sorting afterwards will likely be cheaper,
- * and also has the possibility of optimizing via LIMIT. But
- * for DISTINCT ON, we *must* force the final sort now, else
- * it won't have the desired behavior.
+ * and also has the possibility of optimizing via LIMIT. But for
+ * DISTINCT ON, we *must* force the final sort now, else it won't
+ * have the desired behavior.
*/
- List *needed_pathkeys;
+ List *needed_pathkeys;
if (parse->hasDistinctOn &&
list_length(root->distinct_pathkeys) <
@@ -1530,7 +1529,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan = (Plan *) make_sort_from_pathkeys(root,
result_plan,
- current_pathkeys,
+ current_pathkeys,
-1.0);
}
@@ -1551,7 +1550,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
{
result_plan = (Plan *) make_sort_from_pathkeys(root,
result_plan,
- root->sort_pathkeys,
+ root->sort_pathkeys,
limit_tuples);
current_pathkeys = root->sort_pathkeys;
}
@@ -1883,12 +1882,12 @@ preprocess_groupclause(PlannerInfo *root)
return;
/*
- * Add any remaining GROUP BY items to the new list, but only if we
- * were able to make a complete match. In other words, we only
- * rearrange the GROUP BY list if the result is that one list is a
- * prefix of the other --- otherwise there's no possibility of a
- * common sort. Also, give up if there are any non-sortable GROUP BY
- * items, since then there's no hope anyway.
+ * Add any remaining GROUP BY items to the new list, but only if we were
+ * able to make a complete match. In other words, we only rearrange the
+ * GROUP BY list if the result is that one list is a prefix of the other
+ * --- otherwise there's no possibility of a common sort. Also, give up
+ * if there are any non-sortable GROUP BY items, since then there's no
+ * hope anyway.
*/
foreach(gl, parse->groupClause)
{
@@ -1962,11 +1961,10 @@ choose_hashed_grouping(PlannerInfo *root,
/*
* When we have both GROUP BY and DISTINCT, use the more-rigorous of
- * DISTINCT and ORDER BY as the assumed required output sort order.
- * This is an oversimplification because the DISTINCT might get
- * implemented via hashing, but it's not clear that the case is common
- * enough (or that our estimates are good enough) to justify trying to
- * solve it exactly.
+ * DISTINCT and ORDER BY as the assumed required output sort order. This
+ * is an oversimplification because the DISTINCT might get implemented via
+ * hashing, but it's not clear that the case is common enough (or that our
+ * estimates are good enough) to justify trying to solve it exactly.
*/
if (list_length(root->distinct_pathkeys) >
list_length(root->sort_pathkeys))
@@ -2056,7 +2054,7 @@ choose_hashed_grouping(PlannerInfo *root,
* differences that it doesn't seem worth trying to unify the two functions.
*
* But note that making the two choices independently is a bit bogus in
- * itself. If the two could be combined into a single choice operation
+ * itself. If the two could be combined into a single choice operation
* it'd probably be better, but that seems far too unwieldy to be practical,
* especially considering that the combination of GROUP BY and DISTINCT
* isn't very common in real queries. By separating them, we are giving
@@ -2098,8 +2096,8 @@ choose_hashed_distinct(PlannerInfo *root,
* comparison.
*
* We need to consider input_plan + hashagg [+ final sort] versus
- * input_plan [+ sort] + group [+ final sort] where brackets indicate
- * a step that may not be needed.
+ * input_plan [+ sort] + group [+ final sort] where brackets indicate a
+ * step that may not be needed.
*
* These path variables are dummies that just hold cost fields; we don't
* make actual Paths for these steps.
@@ -2108,16 +2106,17 @@ choose_hashed_distinct(PlannerInfo *root,
numDistinctCols, dNumDistinctRows,
input_plan->startup_cost, input_plan->total_cost,
input_plan->plan_rows);
+
/*
- * Result of hashed agg is always unsorted, so if ORDER BY is present
- * we need to charge for the final sort.
+ * Result of hashed agg is always unsorted, so if ORDER BY is present we
+ * need to charge for the final sort.
*/
if (root->parse->sortClause)
cost_sort(&hashed_p, root, root->sort_pathkeys, hashed_p.total_cost,
dNumDistinctRows, input_plan->plan_width, limit_tuples);
/*
- * Now for the GROUP case. See comments in grouping_planner about the
+ * Now for the GROUP case. See comments in grouping_planner about the
* sorting choices here --- this code should match that code.
*/
sorted_p.startup_cost = input_plan->startup_cost;
@@ -2398,10 +2397,10 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
* are otherwise distinct (eg, different names or framing clauses).
*
* There is room to be much smarter here, for example detecting whether
- * one window's sort keys are a prefix of another's (so that sorting
- * for the latter would do for the former), or putting windows first
- * that match a sort order available for the underlying query. For the
- * moment we are content with meeting the spec.
+ * one window's sort keys are a prefix of another's (so that sorting for
+ * the latter would do for the former), or putting windows first that
+ * match a sort order available for the underlying query. For the moment
+ * we are content with meeting the spec.
*/
result = NIL;
while (actives != NIL)
@@ -2469,12 +2468,12 @@ add_volatile_sort_exprs(List *window_tlist, List *tlist, List *activeWindows)
}
/*
- * Now scan the original tlist to find the referenced expressions.
- * Any that are volatile must be added to window_tlist.
+ * Now scan the original tlist to find the referenced expressions. Any
+ * that are volatile must be added to window_tlist.
*
- * Note: we know that the input window_tlist contains no items marked
- * with ressortgrouprefs, so we don't have to worry about collisions
- * of the reference numbers.
+ * Note: we know that the input window_tlist contains no items marked with
+ * ressortgrouprefs, so we don't have to worry about collisions of the
+ * reference numbers.
*/
foreach(lc, tlist)
{
@@ -2524,7 +2523,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("could not implement window ORDER BY"),
- errdetail("Window ordering columns must be of sortable datatypes.")));
+ errdetail("Window ordering columns must be of sortable datatypes.")));
/* Okay, make the combined pathkeys */
window_sortclauses = list_concat(list_copy(wc->partitionClause),
@@ -2545,7 +2544,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* This depends on the behavior of make_pathkeys_for_window()!
*
* We are given the target WindowClause and an array of the input column
- * numbers associated with the resulting pathkeys. In the easy case, there
+ * numbers associated with the resulting pathkeys. In the easy case, there
* are the same number of pathkey columns as partitioning + ordering columns
* and we just have to copy some data around. However, it's possible that
* some of the original partitioning + ordering columns were eliminated as
@@ -2553,11 +2552,11 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
* though the parser gets rid of obvious duplicates. A typical scenario is a
* window specification "PARTITION BY x ORDER BY y" coupled with a clause
* "WHERE x = y" that causes the two sort columns to be recognized as
- * redundant.) In that unusual case, we have to work a lot harder to
+ * redundant.) In that unusual case, we have to work a lot harder to
* determine which keys are significant.
*
* The method used here is a bit brute-force: add the sort columns to a list
- * one at a time and note when the resulting pathkey list gets longer. But
+ * one at a time and note when the resulting pathkey list gets longer. But
* it's a sufficiently uncommon case that a faster way doesn't seem worth
* the amount of code refactoring that'd be needed.
*----------
@@ -2659,7 +2658,7 @@ get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist,
* Currently, we disallow sublinks in standalone expressions, so there's no
* real "planning" involved here. (That might not always be true though.)
* What we must do is run eval_const_expressions to ensure that any function
- * default arguments get inserted. The fact that constant subexpressions
+ * default arguments get inserted. The fact that constant subexpressions
* get simplified is a side-effect that is useful when the expression will
* get evaluated more than once. Also, we must fix operator function IDs.
*
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 17016d5f3b..11e14f96c5 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.149 2009/01/22 20:16:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.150 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,7 +116,7 @@ static Node *fix_upper_expr_mutator(Node *node,
fix_upper_expr_context *context);
static bool fix_opfuncids_walker(Node *node, void *context);
static bool extract_query_dependencies_walker(Node *node,
- PlannerGlobal *context);
+ PlannerGlobal *context);
/*****************************************************************************
@@ -349,7 +349,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
break;
case T_CteScan:
{
- CteScan *splan = (CteScan *) plan;
+ CteScan *splan = (CteScan *) plan;
splan->scan.scanrelid += rtoffset;
splan->scan.plan.targetlist =
@@ -713,13 +713,13 @@ fix_expr_common(PlannerGlobal *glob, Node *node)
{
set_sa_opfuncid((ScalarArrayOpExpr *) node);
record_plan_function_dependency(glob,
- ((ScalarArrayOpExpr *) node)->opfuncid);
+ ((ScalarArrayOpExpr *) node)->opfuncid);
}
else if (IsA(node, ArrayCoerceExpr))
{
if (OidIsValid(((ArrayCoerceExpr *) node)->elemfuncid))
record_plan_function_dependency(glob,
- ((ArrayCoerceExpr *) node)->elemfuncid);
+ ((ArrayCoerceExpr *) node)->elemfuncid);
}
else if (IsA(node, Const))
{
@@ -759,8 +759,8 @@ fix_scan_expr(PlannerGlobal *glob, Node *node, int rtoffset)
* If rtoffset == 0, we don't need to change any Vars, and if there
* are no placeholders anywhere we won't need to remove them. Then
* it's OK to just scribble on the input node tree instead of copying
- * (since the only change, filling in any unset opfuncid fields,
- * is harmless). This saves just enough cycles to be noticeable on
+ * (since the only change, filling in any unset opfuncid fields, is
+ * harmless). This saves just enough cycles to be noticeable on
* trivial queries.
*/
(void) fix_scan_expr_walker(node, &context);
@@ -1633,11 +1633,11 @@ set_returning_clause_references(PlannerGlobal *glob,
* entries, while leaving result-rel Vars as-is.
*
* PlaceHolderVars will also be sought in the targetlist, but no
- * more-complex expressions will be. Note that it is not possible for
- * a PlaceHolderVar to refer to the result relation, since the result
- * is never below an outer join. If that case could happen, we'd have
- * to be prepared to pick apart the PlaceHolderVar and evaluate its
- * contained expression instead.
+ * more-complex expressions will be. Note that it is not possible for a
+ * PlaceHolderVar to refer to the result relation, since the result is
+ * never below an outer join. If that case could happen, we'd have to be
+ * prepared to pick apart the PlaceHolderVar and evaluate its contained
+ * expression instead.
*/
itlist = build_tlist_index_other_vars(topplan->targetlist, resultRelation);
@@ -1734,8 +1734,8 @@ record_plan_function_dependency(PlannerGlobal *glob, Oid funcid)
* we just assume they'll never change (or at least not in ways that'd
* invalidate plans using them). For this purpose we can consider a
* built-in function to be one with OID less than FirstBootstrapObjectId.
- * Note that the OID generator guarantees never to generate such an
- * OID after startup, even at OID wraparound.
+ * Note that the OID generator guarantees never to generate such an OID
+ * after startup, even at OID wraparound.
*/
if (funcid >= (Oid) FirstBootstrapObjectId)
{
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 6839e5d99b..cdff123828 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.149 2009/04/25 16:44:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.150 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,9 +56,9 @@ static Node *build_subplan(PlannerInfo *root, Plan *plan, List *rtable,
SubLinkType subLinkType, Node *testexpr,
bool adjust_testexpr, bool unknownEqFalse);
static List *generate_subquery_params(PlannerInfo *root, List *tlist,
- List **paramIds);
+ List **paramIds);
static List *generate_subquery_vars(PlannerInfo *root, List *tlist,
- Index varno);
+ Index varno);
static Node *convert_testexpr(PlannerInfo *root,
Node *testexpr,
List *subst_nodes);
@@ -308,7 +308,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
* path/costsize.c.
*
* XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
- * its output. In that case it would've been better to specify full
+ * its output. In that case it would've been better to specify full
* retrieval. At present, however, we can only check hashability after
* we've made the subplan :-(. (Determining whether it'll fit in work_mem
* is the really hard part.) Therefore, we don't want to be too
@@ -338,11 +338,11 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/*
* If it's a correlated EXISTS with an unimportant targetlist, we might be
* able to transform it to the equivalent of an IN and then implement it
- * by hashing. We don't have enough information yet to tell which way
- * is likely to be better (it depends on the expected number of executions
- * of the EXISTS qual, and we are much too early in planning the outer
- * query to be able to guess that). So we generate both plans, if
- * possible, and leave it to the executor to decide which to use.
+ * by hashing. We don't have enough information yet to tell which way is
+ * likely to be better (it depends on the expected number of executions of
+ * the EXISTS qual, and we are much too early in planning the outer query
+ * to be able to guess that). So we generate both plans, if possible, and
+ * leave it to the executor to decide which to use.
*/
if (simple_exists && IsA(result, SubPlan))
{
@@ -368,7 +368,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType,
/* Now we can check if it'll fit in work_mem */
if (subplan_is_hashable(plan))
{
- SubPlan *hashplan;
+ SubPlan *hashplan;
AlternativeSubPlan *asplan;
/* OK, convert to SubPlan format. */
@@ -437,7 +437,7 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable,
if (pitem->abslevel == root->query_level)
{
- Node *arg;
+ Node *arg;
/*
* The Var or Aggref has already been adjusted to have the correct
@@ -447,8 +447,8 @@ build_subplan(PlannerInfo *root, Plan *plan, List *rtable,
arg = copyObject(pitem->item);
/*
- * If it's an Aggref, its arguments might contain SubLinks,
- * which have not yet been processed. Do that now.
+ * If it's an Aggref, its arguments might contain SubLinks, which
+ * have not yet been processed. Do that now.
*/
if (IsA(arg, Aggref))
arg = SS_process_sublinks(root, arg, false);
@@ -714,7 +714,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno)
/*
* convert_testexpr: convert the testexpr given by the parser into
* actually executable form. This entails replacing PARAM_SUBLINK Params
- * with Params or Vars representing the results of the sub-select. The
+ * with Params or Vars representing the results of the sub-select. The
* nodes to be substituted are passed in as the List result from
* generate_subquery_params or generate_subquery_vars.
*
@@ -794,8 +794,8 @@ static bool
testexpr_is_hashable(Node *testexpr)
{
/*
- * The testexpr must be a single OpExpr, or an AND-clause containing
- * only OpExprs.
+ * The testexpr must be a single OpExpr, or an AND-clause containing only
+ * OpExprs.
*
* The combining operators must be hashable and strict. The need for
* hashability is obvious, since we want to use hashing. Without
@@ -892,8 +892,8 @@ SS_process_ctes(PlannerInfo *root)
}
/*
- * Copy the source Query node. Probably not necessary, but let's
- * keep this similar to make_subplan.
+ * Copy the source Query node. Probably not necessary, but let's keep
+ * this similar to make_subplan.
*/
subquery = (Query *) copyObject(cte->ctequery);
@@ -907,7 +907,7 @@ SS_process_ctes(PlannerInfo *root)
&subroot);
/*
- * Make a SubPlan node for it. This is just enough unlike
+ * Make a SubPlan node for it. This is just enough unlike
* build_subplan that we can't share code.
*
* Note plan_id, plan_name, and cost fields are set further down.
@@ -925,8 +925,8 @@ SS_process_ctes(PlannerInfo *root)
/*
* Make parParam and args lists of param IDs and expressions that
- * current query level will pass to this child plan. Even though
- * this is an initplan, there could be side-references to earlier
+ * current query level will pass to this child plan. Even though this
+ * is an initplan, there could be side-references to earlier
* initplan's outputs, specifically their CTE output parameters.
*/
tmpset = bms_copy(plan->extParam);
@@ -948,8 +948,8 @@ SS_process_ctes(PlannerInfo *root)
bms_free(tmpset);
/*
- * Assign a param to represent the query output. We only really
- * care about reserving a parameter ID number.
+ * Assign a param to represent the query output. We only really care
+ * about reserving a parameter ID number.
*/
prm = generate_new_param(root, INTERNALOID, -1);
splan->setParam = list_make1_int(prm->paramid);
@@ -1028,9 +1028,9 @@ convert_ANY_sublink_to_join(PlannerInfo *root, SubLink *sublink,
return NULL;
/*
- * The test expression must contain some Vars of the parent query,
- * else it's not gonna be a join. (Note that it won't have Vars
- * referring to the subquery, rather Params.)
+ * The test expression must contain some Vars of the parent query, else
+ * it's not gonna be a join. (Note that it won't have Vars referring to
+ * the subquery, rather Params.)
*/
upper_varnos = pull_varnos(sublink->testexpr);
if (bms_is_empty(upper_varnos))
@@ -1126,10 +1126,10 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
subselect = (Query *) copyObject(subselect);
/*
- * See if the subquery can be simplified based on the knowledge that
- * it's being used in EXISTS(). If we aren't able to get rid of its
- * targetlist, we have to fail, because the pullup operation leaves
- * us with noplace to evaluate the targetlist.
+ * See if the subquery can be simplified based on the knowledge that it's
+ * being used in EXISTS(). If we aren't able to get rid of its
+ * targetlist, we have to fail, because the pullup operation leaves us
+ * with noplace to evaluate the targetlist.
*/
if (!simplify_EXISTS_query(subselect))
return NULL;
@@ -1175,13 +1175,13 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
* to the inner (necessarily true). Therefore this is a lot easier than
* what pull_up_subqueries has to go through.
*
- * In fact, it's even easier than what convert_ANY_sublink_to_join has
- * to do. The machinations of simplify_EXISTS_query ensured that there
- * is nothing interesting in the subquery except an rtable and jointree,
- * and even the jointree FromExpr no longer has quals. So we can just
- * append the rtable to our own and use the FromExpr in our jointree.
- * But first, adjust all level-zero varnos in the subquery to account
- * for the rtable merger.
+ * In fact, it's even easier than what convert_ANY_sublink_to_join has to
+ * do. The machinations of simplify_EXISTS_query ensured that there is
+ * nothing interesting in the subquery except an rtable and jointree, and
+ * even the jointree FromExpr no longer has quals. So we can just append
+ * the rtable to our own and use the FromExpr in our jointree. But first,
+ * adjust all level-zero varnos in the subquery to account for the rtable
+ * merger.
*/
rtoffset = list_length(parse->rtable);
OffsetVarNodes((Node *) subselect, rtoffset, 0);
@@ -1198,8 +1198,8 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
/*
* Now that the WHERE clause is adjusted to match the parent query
* environment, we can easily identify all the level-zero rels it uses.
- * The ones <= rtoffset belong to the upper query; the ones > rtoffset
- * do not.
+ * The ones <= rtoffset belong to the upper query; the ones > rtoffset do
+ * not.
*/
clause_varnos = pull_varnos(whereClause);
upper_varnos = NULL;
@@ -1212,8 +1212,8 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
Assert(!bms_is_empty(upper_varnos));
/*
- * Now that we've got the set of upper-level varnos, we can make the
- * last check: only available_rels can be referenced.
+ * Now that we've got the set of upper-level varnos, we can make the last
+ * check: only available_rels can be referenced.
*/
if (!bms_is_subset(upper_varnos, available_rels))
return NULL;
@@ -1308,7 +1308,7 @@ simplify_EXISTS_query(Query *query)
*
* On success, the modified subselect is returned, and we store a suitable
* upper-level test expression at *testexpr, plus a list of the subselect's
- * output Params at *paramIds. (The test expression is already Param-ified
+ * output Params at *paramIds. (The test expression is already Param-ified
* and hence need not go through convert_testexpr, which is why we have to
* deal with the Param IDs specially.)
*
@@ -1365,32 +1365,32 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
/*
* Clean up the WHERE clause by doing const-simplification etc on it.
* Aside from simplifying the processing we're about to do, this is
- * important for being able to pull chunks of the WHERE clause up into
- * the parent query. Since we are invoked partway through the parent's
+ * important for being able to pull chunks of the WHERE clause up into the
+ * parent query. Since we are invoked partway through the parent's
* preprocess_expression() work, earlier steps of preprocess_expression()
- * wouldn't get applied to the pulled-up stuff unless we do them here.
- * For the parts of the WHERE clause that get put back into the child
- * query, this work is partially duplicative, but it shouldn't hurt.
+ * wouldn't get applied to the pulled-up stuff unless we do them here. For
+ * the parts of the WHERE clause that get put back into the child query,
+ * this work is partially duplicative, but it shouldn't hurt.
*
- * Note: we do not run flatten_join_alias_vars. This is OK because
- * any parent aliases were flattened already, and we're not going to
- * pull any child Vars (of any description) into the parent.
+ * Note: we do not run flatten_join_alias_vars. This is OK because any
+ * parent aliases were flattened already, and we're not going to pull any
+ * child Vars (of any description) into the parent.
*
- * Note: passing the parent's root to eval_const_expressions is technically
- * wrong, but we can get away with it since only the boundParams (if any)
- * are used, and those would be the same in a subroot.
+ * Note: passing the parent's root to eval_const_expressions is
+ * technically wrong, but we can get away with it since only the
+ * boundParams (if any) are used, and those would be the same in a
+ * subroot.
*/
whereClause = eval_const_expressions(root, whereClause);
whereClause = (Node *) canonicalize_qual((Expr *) whereClause);
whereClause = (Node *) make_ands_implicit((Expr *) whereClause);
/*
- * We now have a flattened implicit-AND list of clauses, which we
- * try to break apart into "outervar = innervar" hash clauses.
- * Anything that can't be broken apart just goes back into the
- * newWhere list. Note that we aren't trying hard yet to ensure
- * that we have only outer or only inner on each side; we'll check
- * that if we get to the end.
+ * We now have a flattened implicit-AND list of clauses, which we try to
+ * break apart into "outervar = innervar" hash clauses. Anything that
+ * can't be broken apart just goes back into the newWhere list. Note that
+ * we aren't trying hard yet to ensure that we have only outer or only
+ * inner on each side; we'll check that if we get to the end.
*/
leftargs = rightargs = opids = newWhere = NIL;
foreach(lc, (List *) whereClause)
@@ -1400,8 +1400,8 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
if (IsA(expr, OpExpr) &&
hash_ok_operator(expr))
{
- Node *leftarg = (Node *) linitial(expr->args);
- Node *rightarg = (Node *) lsecond(expr->args);
+ Node *leftarg = (Node *) linitial(expr->args);
+ Node *rightarg = (Node *) lsecond(expr->args);
if (contain_vars_of_level(leftarg, 1))
{
@@ -1459,15 +1459,15 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
/*
* And there can't be any child Vars in the stuff we intend to pull up.
- * (Note: we'd need to check for child Aggs too, except we know the
- * child has no aggs at all because of simplify_EXISTS_query's check.
- * The same goes for window functions.)
+ * (Note: we'd need to check for child Aggs too, except we know the child
+ * has no aggs at all because of simplify_EXISTS_query's check. The same
+ * goes for window functions.)
*/
if (contain_vars_of_level((Node *) leftargs, 0))
return NULL;
/*
- * Also reject sublinks in the stuff we intend to pull up. (It might be
+ * Also reject sublinks in the stuff we intend to pull up. (It might be
* possible to support this, but doesn't seem worth the complication.)
*/
if (contain_subplans((Node *) leftargs))
@@ -1485,11 +1485,10 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
subselect->jointree->quals = (Node *) make_ands_explicit(newWhere);
/*
- * Build a new targetlist for the child that emits the expressions
- * we need. Concurrently, build a testexpr for the parent using
- * Params to reference the child outputs. (Since we generate Params
- * directly here, there will be no need to convert the testexpr in
- * build_subplan.)
+ * Build a new targetlist for the child that emits the expressions we
+ * need. Concurrently, build a testexpr for the parent using Params to
+ * reference the child outputs. (Since we generate Params directly here,
+ * there will be no need to convert the testexpr in build_subplan.)
*/
tlist = testlist = paramids = NIL;
resno = 1;
@@ -1625,10 +1624,10 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
}
/*
- * Don't recurse into the arguments of an outer aggregate here.
- * Any SubLinks in the arguments have to be dealt with at the outer
- * query level; they'll be handled when build_subplan collects the
- * Aggref into the arguments to be passed down to the current subplan.
+ * Don't recurse into the arguments of an outer aggregate here. Any
+ * SubLinks in the arguments have to be dealt with at the outer query
+ * level; they'll be handled when build_subplan collects the Aggref into
+ * the arguments to be passed down to the current subplan.
*/
if (IsA(node, Aggref))
{
@@ -1655,7 +1654,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
* is needed for a bare List.)
*
* Anywhere within the top-level AND/OR clause structure, we can tell
- * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
+ * make_subplan() that NULL and FALSE are interchangeable. So isTopQual
* propagates down in both cases. (Note that this is unlike the meaning
* of "top level qual" used in most other places in Postgres.)
*/
@@ -1702,8 +1701,8 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
}
/*
- * If we recurse down through anything other than an AND or OR node,
- * we are definitely not at top qual level anymore.
+ * If we recurse down through anything other than an AND or OR node, we
+ * are definitely not at top qual level anymore.
*/
locContext.isTopQual = false;
@@ -1759,8 +1758,8 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
/*
* Now determine the set of params that are validly referenceable in this
* query level; to wit, those available from outer query levels plus the
- * output parameters of any initPlans. (We do not include output
- * parameters of regular subplans. Those should only appear within the
+ * output parameters of any initPlans. (We do not include output
+ * parameters of regular subplans. Those should only appear within the
* testexpr of SubPlan nodes, and are taken care of locally within
* finalize_primnode.)
*
@@ -1809,7 +1808,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
if (attach_initplans)
{
plan->initPlan = root->init_plans;
- root->init_plans = NIL; /* make sure they're not attached twice */
+ root->init_plans = NIL; /* make sure they're not attached twice */
/* allParam must include all these params */
plan->allParam = bms_add_members(plan->allParam, initExtParam);
@@ -2043,9 +2042,9 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params)
/*
* Note: by definition, extParam and allParam should have the same value
- * in any plan node that doesn't have child initPlans. We set them
- * equal here, and later SS_finalize_plan will update them properly
- * in node(s) that it attaches initPlans to.
+ * in any plan node that doesn't have child initPlans. We set them equal
+ * here, and later SS_finalize_plan will update them properly in node(s)
+ * that it attaches initPlans to.
*
* For speed at execution time, make sure extParam/allParam are actually
* NULL if they are empty sets.
@@ -2095,7 +2094,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
/*
* Remove any param IDs of output parameters of the subplan that were
- * referenced in the testexpr. These are not interesting for
+ * referenced in the testexpr. These are not interesting for
* parameter change signaling since we always re-evaluate the subplan.
* Note that this wouldn't work too well if there might be uses of the
* same param IDs elsewhere in the plan, but that can't happen because
@@ -2167,9 +2166,9 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
root->parse->rtable);
/*
- * Create a SubPlan node and add it to the outer list of InitPlans.
- * Note it has to appear after any other InitPlans it might depend on
- * (see comments in ExecReScan).
+ * Create a SubPlan node and add it to the outer list of InitPlans. Note
+ * it has to appear after any other InitPlans it might depend on (see
+ * comments in ExecReScan).
*/
node = makeNode(SubPlan);
node->subLinkType = EXPR_SUBLINK;
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 3df9d57c1d..fd451b338b 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.65 2009/04/28 21:31:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.66 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,16 +52,16 @@ static Node *pull_up_simple_subquery(PlannerInfo *root, Node *jtnode,
static Node *pull_up_simple_union_all(PlannerInfo *root, Node *jtnode,
RangeTblEntry *rte);
static void pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root,
- int parentRTindex, Query *setOpQuery,
- int childRToffset);
+ int parentRTindex, Query *setOpQuery,
+ int childRToffset);
static void make_setop_translation_list(Query *query, Index newvarno,
- List **translated_vars);
+ List **translated_vars);
static bool is_simple_subquery(Query *subquery);
static bool is_simple_union_all(Query *subquery);
static bool is_simple_union_all_recurse(Node *setOp, Query *setOpQuery,
List *colTypes);
static List *insert_targetlist_placeholders(PlannerInfo *root, List *tlist,
- int varno, bool wrap_non_vars);
+ int varno, bool wrap_non_vars);
static bool is_safe_append_member(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno, RangeTblEntry *rte,
List *subtlist, List *subtlist_with_phvs,
@@ -74,7 +74,7 @@ static void reduce_outer_joins_pass2(Node *jtnode,
List *nonnullable_vars,
List *forced_null_vars);
static void substitute_multiple_relids(Node *node,
- int varno, Relids subrelids);
+ int varno, Relids subrelids);
static void fix_append_rel_relids(List *append_rel_list, int varno,
Relids subrelids);
static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
@@ -87,7 +87,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
*
* A clause "foo op ANY (sub-SELECT)" can be processed by pulling the
* sub-SELECT up to become a rangetable entry and treating the implied
- * comparisons as quals of a semijoin. However, this optimization *only*
+ * comparisons as quals of a semijoin. However, this optimization *only*
* works at the top level of WHERE or a JOIN/ON clause, because we cannot
* distinguish whether the ANY ought to return FALSE or NULL in cases
* involving NULL inputs. Also, in an outer join's ON clause we can only
@@ -104,7 +104,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* transformations if any are found.
*
* This routine has to run before preprocess_expression(), so the quals
- * clauses are not yet reduced to implicit-AND format. That means we need
+ * clauses are not yet reduced to implicit-AND format. That means we need
* to recursively search through explicit AND clauses, which are
* probably only binary ANDs. We stop as soon as we hit a non-AND item.
*/
@@ -162,8 +162,8 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/* First, recurse to process children and collect their relids */
foreach(l, f->fromlist)
{
- Node *newchild;
- Relids childrelids;
+ Node *newchild;
+ Relids childrelids;
newchild = pull_up_sublinks_jointree_recurse(root,
lfirst(l),
@@ -181,8 +181,8 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Note that the result will be either newf, or a stack of JoinExprs
- * with newf at the base. We rely on subsequent optimization steps
- * to flatten this and rearrange the joins as needed.
+ * with newf at the base. We rely on subsequent optimization steps to
+ * flatten this and rearrange the joins as needed.
*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
@@ -199,8 +199,8 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
Node *jtlink;
/*
- * Make a modifiable copy of join node, but don't bother copying
- * its subnodes (yet).
+ * Make a modifiable copy of join node, but don't bother copying its
+ * subnodes (yet).
*/
j = (JoinExpr *) palloc(sizeof(JoinExpr));
memcpy(j, jtnode, sizeof(JoinExpr));
@@ -214,19 +214,19 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Now process qual, showing appropriate child relids as available,
- * and attach any pulled-up jointree items at the right place.
- * In the inner-join case we put new JoinExprs above the existing one
- * (much as for a FromExpr-style join). In outer-join cases the
- * new JoinExprs must go into the nullable side of the outer join.
- * The point of the available_rels machinations is to ensure that we
- * only pull up quals for which that's okay.
+ * and attach any pulled-up jointree items at the right place. In the
+ * inner-join case we put new JoinExprs above the existing one (much
+ * as for a FromExpr-style join). In outer-join cases the new
+ * JoinExprs must go into the nullable side of the outer join. The
+ * point of the available_rels machinations is to ensure that we only
+ * pull up quals for which that's okay.
*
* XXX for the moment, we refrain from pulling up IN/EXISTS clauses
- * appearing in LEFT or RIGHT join conditions. Although it is
+ * appearing in LEFT or RIGHT join conditions. Although it is
* semantically valid to do so under the above conditions, we end up
* with a query in which the semijoin or antijoin must be evaluated
- * below the outer join, which could perform far worse than leaving
- * it as a sublink that is executed only for row pairs that meet the
+ * below the outer join, which could perform far worse than leaving it
+ * as a sublink that is executed only for row pairs that meet the
* other join conditions. Fixing this seems to require considerable
* restructuring of the executor, but maybe someday it can happen.
*
@@ -238,7 +238,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
case JOIN_INNER:
j->quals = pull_up_sublinks_qual_recurse(root, j->quals,
bms_union(leftrelids,
- rightrelids),
+ rightrelids),
&jtlink);
break;
case JOIN_LEFT:
@@ -267,7 +267,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
/*
* Although we could include the pulled-up subqueries in the returned
* relids, there's no need since upper quals couldn't refer to their
- * outputs anyway. But we *do* need to include the join's own rtindex
+ * outputs anyway. But we *do* need to include the join's own rtindex
* because we haven't yet collapsed join alias variables, so upper
* levels would mistakenly think they couldn't use references to this
* join.
@@ -416,7 +416,7 @@ inline_set_returning_functions(PlannerInfo *root)
if (rte->rtekind == RTE_FUNCTION)
{
- Query *funcquery;
+ Query *funcquery;
/* Check safety of expansion, and expand if possible */
funcquery = inline_set_returning_function(root, rte);
@@ -495,10 +495,10 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
* Alternatively, is it a simple UNION ALL subquery? If so, flatten
* into an "append relation".
*
- * It's safe to do this regardless of whether this query is
- * itself an appendrel member. (If you're thinking we should try to
- * flatten the two levels of appendrel together, you're right; but we
- * handle that in set_append_rel_pathlist, not here.)
+ * It's safe to do this regardless of whether this query is itself an
+ * appendrel member. (If you're thinking we should try to flatten the
+ * two levels of appendrel together, you're right; but we handle that
+ * in set_append_rel_pathlist, not here.)
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_union_all(rte->subquery))
@@ -637,10 +637,10 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* pull_up_subqueries' processing is complete for its jointree and
* rangetable.
*
- * Note: we should pass NULL for containing-join info even if we are within
- * an outer join in the upper query; the lower query starts with a clean
- * slate for outer-join semantics. Likewise, we say we aren't handling an
- * appendrel member.
+ * Note: we should pass NULL for containing-join info even if we are
+ * within an outer join in the upper query; the lower query starts with a
+ * clean slate for outer-join semantics. Likewise, we say we aren't
+ * handling an appendrel member.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree, NULL, NULL);
@@ -673,8 +673,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Adjust level-0 varnos in subquery so that we can append its rangetable
- * to upper query's. We have to fix the subquery's append_rel_list
- * as well.
+ * to upper query's. We have to fix the subquery's append_rel_list as
+ * well.
*/
rtoffset = list_length(parse->rtable);
OffsetVarNodes((Node *) subquery, rtoffset, 0);
@@ -691,15 +691,15 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* The subquery's targetlist items are now in the appropriate form to
* insert into the top query, but if we are under an outer join then
* non-nullable items may have to be turned into PlaceHolderVars. If we
- * are dealing with an appendrel member then anything that's not a
- * simple Var has to be turned into a PlaceHolderVar.
+ * are dealing with an appendrel member then anything that's not a simple
+ * Var has to be turned into a PlaceHolderVar.
*/
subtlist = subquery->targetList;
if (lowest_outer_join != NULL || containing_appendrel != NULL)
subtlist_with_phvs = insert_targetlist_placeholders(root,
subtlist,
varno,
- containing_appendrel != NULL);
+ containing_appendrel != NULL);
else
subtlist_with_phvs = subtlist;
@@ -709,7 +709,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* replace any of the jointree structure. (This'd be a lot cleaner if we
* could use query_tree_mutator.) We have to use PHVs in the targetList,
* returningList, and havingQual, since those are certainly above any
- * outer join. resolvenew_in_jointree tracks its location in the jointree
+ * outer join. resolvenew_in_jointree tracks its location in the jointree
* and uses PHVs or not appropriately.
*/
parse->targetList = (List *)
@@ -730,11 +730,11 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
subtlist_with_phvs, CMD_SELECT, 0);
/*
- * Replace references in the translated_vars lists of appendrels.
- * When pulling up an appendrel member, we do not need PHVs in the list
- * of the parent appendrel --- there isn't any outer join between.
- * Elsewhere, use PHVs for safety. (This analysis could be made tighter
- * but it seems unlikely to be worth much trouble.)
+ * Replace references in the translated_vars lists of appendrels. When
+ * pulling up an appendrel member, we do not need PHVs in the list of the
+ * parent appendrel --- there isn't any outer join between. Elsewhere, use
+ * PHVs for safety. (This analysis could be made tighter but it seems
+ * unlikely to be worth much trouble.)
*/
foreach(lc, root->append_rel_list)
{
@@ -753,9 +753,9 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
*
* You might think that we could avoid using PHVs for alias vars of joins
* below lowest_outer_join, but that doesn't work because the alias vars
- * could be referenced above that join; we need the PHVs to be present
- * in such references after the alias vars get flattened. (It might be
- * worth trying to be smarter here, someday.)
+ * could be referenced above that join; we need the PHVs to be present in
+ * such references after the alias vars get flattened. (It might be worth
+ * trying to be smarter here, someday.)
*/
foreach(lc, parse->rtable)
{
@@ -789,9 +789,9 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* will be adjusted, so having created them with the subquery's varno is
* correct.
*
- * Likewise, relids appearing in AppendRelInfo nodes have to be fixed.
- * We already checked that this won't require introducing multiple
- * subrelids into the single-slot AppendRelInfo structs.
+ * Likewise, relids appearing in AppendRelInfo nodes have to be fixed. We
+ * already checked that this won't require introducing multiple subrelids
+ * into the single-slot AppendRelInfo structs.
*/
if (parse->hasSubLinks || root->glob->lastPHId != 0 ||
root->append_rel_list)
@@ -822,9 +822,10 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Miscellaneous housekeeping.
*/
parse->hasSubLinks |= subquery->hasSubLinks;
+
/*
- * subquery won't be pulled up if it hasAggs or hasWindowFuncs, so no
- * work needed on those flags
+ * subquery won't be pulled up if it hasAggs or hasWindowFuncs, so no work
+ * needed on those flags
*/
/*
@@ -859,10 +860,10 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
/*
* Append child RTEs to parent rtable.
*
- * Upper-level vars in subquery are now one level closer to their
- * parent than before. We don't have to worry about offsetting
- * varnos, though, because any such vars must refer to stuff above the
- * level of the query we are pulling into.
+ * Upper-level vars in subquery are now one level closer to their parent
+ * than before. We don't have to worry about offsetting varnos, though,
+ * because any such vars must refer to stuff above the level of the query
+ * we are pulling into.
*/
rtable = copyObject(subquery->rtable);
IncrementVarSublevelsUp_rtable(rtable, -1, 1);
@@ -1049,11 +1050,11 @@ is_simple_subquery(Query *subquery)
* query_planner() will correctly generate a Result plan for a jointree
* that's totally empty, but I don't think the right things happen if an
* empty FromExpr appears lower down in a jointree. It would pose a
- * problem for the PlaceHolderVar mechanism too, since we'd have no
- * way to identify where to evaluate a PHV coming out of the subquery.
- * Not worth working hard on this, just to collapse SubqueryScan/Result
- * into Result; especially since the SubqueryScan can often be optimized
- * away by setrefs.c anyway.
+ * problem for the PlaceHolderVar mechanism too, since we'd have no way to
+ * identify where to evaluate a PHV coming out of the subquery. Not worth
+ * working hard on this, just to collapse SubqueryScan/Result into Result;
+ * especially since the SubqueryScan can often be optimized away by
+ * setrefs.c anyway.
*/
if (subquery->jointree->fromlist == NIL)
return false;
@@ -1167,8 +1168,8 @@ insert_targetlist_placeholders(PlannerInfo *root, List *tlist,
}
/*
- * Simple Vars always escape being wrapped. This is common enough
- * to deserve a fast path even if we aren't doing wrap_non_vars.
+ * Simple Vars always escape being wrapped. This is common enough to
+ * deserve a fast path even if we aren't doing wrap_non_vars.
*/
if (tle->expr && IsA(tle->expr, Var) &&
((Var *) tle->expr)->varlevelsup == 0)
@@ -1180,8 +1181,8 @@ insert_targetlist_placeholders(PlannerInfo *root, List *tlist,
if (!wrap_non_vars)
{
/*
- * If it contains a Var of current level, and does not contain
- * any non-strict constructs, then it's certainly nullable and we
+ * If it contains a Var of current level, and does not contain any
+ * non-strict constructs, then it's certainly nullable and we
* don't need to insert a PlaceHolderVar. (Note: in future maybe
* we should insert PlaceHolderVars anyway, when a tlist item is
* expensive to evaluate?
@@ -1248,7 +1249,7 @@ is_safe_append_member(Query *subquery)
* but there's no other way...
*
* If we are above lowest_outer_join then use subtlist_with_phvs; at or
- * below it, use subtlist. (When no outer joins are in the picture,
+ * below it, use subtlist. (When no outer joins are in the picture,
* these will be the same list.)
*/
static void
@@ -1328,7 +1329,7 @@ resolvenew_in_jointree(Node *jtnode, int varno, RangeTblEntry *rte,
* SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL;
* If the join clause is strict for b.y, then only null-extended rows could
* pass the upper WHERE, and we can conclude that what the query is really
- * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
+ * specifying is an anti-semijoin. We change the join type from JOIN_LEFT
* to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be
* removed to prevent bogus selectivity calculations, but we leave it to
* distribute_qual_to_rels to get rid of such clauses.
@@ -1533,6 +1534,7 @@ reduce_outer_joins_pass2(Node *jtnode,
break;
case JOIN_SEMI:
case JOIN_ANTI:
+
/*
* These could only have been introduced by pull_up_sublinks,
* so there's no way that upper quals could refer to their
@@ -1565,14 +1567,14 @@ reduce_outer_joins_pass2(Node *jtnode,
}
/*
- * See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case
- * if the join's own quals are strict for any var that was forced
- * null by higher qual levels. NOTE: there are other ways that we
- * could detect an anti-join, in particular if we were to check
- * whether Vars coming from the RHS must be non-null because of
- * table constraints. That seems complicated and expensive though
- * (in particular, one would have to be wary of lower outer joins).
- * For the moment this seems sufficient.
+ * See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if
+ * the join's own quals are strict for any var that was forced null by
+ * higher qual levels. NOTE: there are other ways that we could
+ * detect an anti-join, in particular if we were to check whether Vars
+ * coming from the RHS must be non-null because of table constraints.
+ * That seems complicated and expensive though (in particular, one
+ * would have to be wary of lower outer joins). For the moment this
+ * seems sufficient.
*/
if (jointype == JOIN_LEFT)
{
@@ -1582,8 +1584,8 @@ reduce_outer_joins_pass2(Node *jtnode,
computed_local_nonnullable_vars = true;
/*
- * It's not sufficient to check whether local_nonnullable_vars
- * and forced_null_vars overlap: we need to know if the overlap
+ * It's not sufficient to check whether local_nonnullable_vars and
+ * forced_null_vars overlap: we need to know if the overlap
* includes any RHS variables.
*/
overlap = list_intersection(local_nonnullable_vars,
@@ -1621,11 +1623,11 @@ reduce_outer_joins_pass2(Node *jtnode,
* side, because an outer join never eliminates any rows from its
* non-nullable side. Also, there is no point in passing upper
* constraints into the nullable side, since if there were any
- * we'd have been able to reduce the join. (In the case of
- * upper forced-null constraints, we *must not* pass them into
- * the nullable side --- they either applied here, or not.)
- * The upshot is that we pass either the local or the upper
- * constraints, never both, to the children of an outer join.
+ * we'd have been able to reduce the join. (In the case of upper
+ * forced-null constraints, we *must not* pass them into the
+ * nullable side --- they either applied here, or not.) The upshot
+ * is that we pass either the local or the upper constraints,
+ * never both, to the children of an outer join.
*
* At a FULL join we just punt and pass nothing down --- is it
* possible to be smarter?
@@ -1640,7 +1642,7 @@ reduce_outer_joins_pass2(Node *jtnode,
{
/* OK to merge upper and local constraints */
local_nonnullable_rels = bms_add_members(local_nonnullable_rels,
- nonnullable_rels);
+ nonnullable_rels);
local_nonnullable_vars = list_concat(local_nonnullable_vars,
nonnullable_vars);
local_forced_null_vars = list_concat(local_forced_null_vars,
@@ -1663,7 +1665,7 @@ reduce_outer_joins_pass2(Node *jtnode,
pass_nonnullable_vars = local_nonnullable_vars;
pass_forced_null_vars = local_forced_null_vars;
}
- else if (jointype != JOIN_FULL) /* ie, LEFT/SEMI/ANTI */
+ else if (jointype != JOIN_FULL) /* ie, LEFT/SEMI/ANTI */
{
/* can't pass local constraints to non-nullable side */
pass_nonnullable_rels = nonnullable_rels;
@@ -1722,7 +1724,7 @@ reduce_outer_joins_pass2(Node *jtnode,
* top query could (yet) contain such a reference.
*
* NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place. This should be OK since the tree was copied by ResolveNew
+ * nodes in-place. This should be OK since the tree was copied by ResolveNew
* earlier. Avoid scribbling on the original values of the bitmapsets, though,
* because expression_tree_mutator doesn't copy those.
*/
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 09acdaca65..b5e10a9180 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.170 2009/05/12 03:11:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.171 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ static void make_inh_translation_list(Relation oldrelation,
Index newvarno,
List **translated_vars);
static Bitmapset *translate_col_privs(const Bitmapset *parent_privs,
- List *translated_vars);
+ List *translated_vars);
static Node *adjust_appendrel_attrs_mutator(Node *node,
AppendRelInfo *context);
static Relids adjust_relid_set(Relids relids, Index oldrelid, Index newrelid);
@@ -220,9 +220,9 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
&subroot);
/*
- * Estimate number of groups if caller wants it. If the subquery
- * used grouping or aggregation, its output is probably mostly
- * unique anyway; otherwise do statistical estimation.
+ * Estimate number of groups if caller wants it. If the subquery used
+ * grouping or aggregation, its output is probably mostly unique
+ * anyway; otherwise do statistical estimation.
*/
if (pNumGroups)
{
@@ -231,7 +231,7 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
*pNumGroups = subplan->plan_rows;
else
*pNumGroups = estimate_num_groups(subroot,
- get_tlist_exprs(subquery->targetList, false),
+ get_tlist_exprs(subquery->targetList, false),
subplan->plan_rows);
}
@@ -361,7 +361,7 @@ generate_recursion_plan(SetOperationStmt *setOp, PlannerInfo *root,
}
else
{
- double dNumGroups;
+ double dNumGroups;
/* Identify the grouping semantics */
groupList = generate_setop_grouplist(setOp, tlist);
@@ -374,8 +374,8 @@ generate_recursion_plan(SetOperationStmt *setOp, PlannerInfo *root,
errdetail("All column datatypes must be hashable.")));
/*
- * For the moment, take the number of distinct groups as equal to
- * the total input size, ie, the worst case.
+ * For the moment, take the number of distinct groups as equal to the
+ * total input size, ie, the worst case.
*/
dNumGroups = lplan->plan_rows + rplan->plan_rows * 10;
@@ -460,9 +460,9 @@ generate_union_plan(SetOperationStmt *op, PlannerInfo *root,
plan = make_union_unique(op, plan, root, tuple_fraction, sortClauses);
/*
- * Estimate number of groups if caller wants it. For now we just
- * assume the output is unique --- this is certainly true for the
- * UNION case, and we want worst-case estimates anyway.
+ * Estimate number of groups if caller wants it. For now we just assume
+ * the output is unique --- this is certainly true for the UNION case, and
+ * we want worst-case estimates anyway.
*/
if (pNumGroups)
*pNumGroups = plan->plan_rows;
@@ -555,8 +555,8 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
* Estimate number of distinct groups that we'll need hashtable entries
* for; this is the size of the left-hand input for EXCEPT, or the smaller
* input for INTERSECT. Also estimate the number of eventual output rows.
- * In non-ALL cases, we estimate each group produces one output row;
- * in ALL cases use the relevant relation size. These are worst-case
+ * In non-ALL cases, we estimate each group produces one output row; in
+ * ALL cases use the relevant relation size. These are worst-case
* estimates, of course, but we need to be conservative.
*/
if (op->op == SETOP_EXCEPT)
@@ -578,7 +578,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root,
*/
use_hash = choose_hashed_setop(root, groupList, plan,
dNumGroups, dNumOutputRows, tuple_fraction,
- (op->op == SETOP_INTERSECT) ? "INTERSECT" : "EXCEPT");
+ (op->op == SETOP_INTERSECT) ? "INTERSECT" : "EXCEPT");
if (!use_hash)
plan = (Plan *) make_sort_from_sortclauses(root, groupList, plan);
@@ -687,12 +687,12 @@ make_union_unique(SetOperationStmt *op, Plan *plan,
}
/*
- * XXX for the moment, take the number of distinct groups as equal to
- * the total input size, ie, the worst case. This is too conservative,
- * but we don't want to risk having the hashtable overrun memory; also,
- * it's not clear how to get a decent estimate of the true size. One
- * should note as well the propensity of novices to write UNION rather
- * than UNION ALL even when they don't expect any duplicates...
+ * XXX for the moment, take the number of distinct groups as equal to the
+ * total input size, ie, the worst case. This is too conservative, but we
+ * don't want to risk having the hashtable overrun memory; also, it's not
+ * clear how to get a decent estimate of the true size. One should note
+ * as well the propensity of novices to write UNION rather than UNION ALL
+ * even when they don't expect any duplicates...
*/
dNumGroups = plan->plan_rows;
@@ -763,7 +763,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is UNION, INTERSECT, or EXCEPT */
+ /* translator: %s is UNION, INTERSECT, or EXCEPT */
errmsg("could not implement %s", construct),
errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
@@ -1260,16 +1260,16 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
appinfos = lappend(appinfos, appinfo);
/*
- * Translate the column permissions bitmaps to the child's attnums
- * (we have to build the translated_vars list before we can do this).
- * But if this is the parent table, leave copyObject's result alone.
+ * Translate the column permissions bitmaps to the child's attnums (we
+ * have to build the translated_vars list before we can do this). But
+ * if this is the parent table, leave copyObject's result alone.
*/
if (childOID != parentOID)
{
childrte->selectedCols = translate_col_privs(rte->selectedCols,
- appinfo->translated_vars);
+ appinfo->translated_vars);
childrte->modifiedCols = translate_col_privs(rte->modifiedCols,
- appinfo->translated_vars);
+ appinfo->translated_vars);
}
/*
@@ -1420,7 +1420,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation,
* parent rel's attribute numbering to the child's.
*
* The only surprise here is that we don't translate a parent whole-row
- * reference into a child whole-row reference. That would mean requiring
+ * reference into a child whole-row reference. That would mean requiring
* permissions on all child columns, which is overly strict, since the
* query is really only going to reference the inherited columns. Instead
* we set the per-column bits for all inherited columns.
@@ -1435,12 +1435,12 @@ translate_col_privs(const Bitmapset *parent_privs,
ListCell *lc;
/* System attributes have the same numbers in all tables */
- for (attno = FirstLowInvalidHeapAttributeNumber+1; attno < 0; attno++)
+ for (attno = FirstLowInvalidHeapAttributeNumber + 1; attno < 0; attno++)
{
if (bms_is_member(attno - FirstLowInvalidHeapAttributeNumber,
parent_privs))
child_privs = bms_add_member(child_privs,
- attno - FirstLowInvalidHeapAttributeNumber);
+ attno - FirstLowInvalidHeapAttributeNumber);
}
/* Check if parent has whole-row reference */
@@ -1451,7 +1451,7 @@ translate_col_privs(const Bitmapset *parent_privs,
attno = InvalidAttrNumber;
foreach(lc, translated_vars)
{
- Var *var = (Var *) lfirst(lc);
+ Var *var = (Var *) lfirst(lc);
attno++;
if (var == NULL) /* ignore dropped columns */
@@ -1461,7 +1461,7 @@ translate_col_privs(const Bitmapset *parent_privs,
bms_is_member(attno - FirstLowInvalidHeapAttributeNumber,
parent_privs))
child_privs = bms_add_member(child_privs,
- var->varattno - FirstLowInvalidHeapAttributeNumber);
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
}
return child_privs;
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index c9c7270d2b..75c5d0c94d 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.276 2009/02/25 03:30:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.277 2009/06/11 14:48:59 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -98,8 +98,8 @@ static Expr *simplify_function(Oid funcid,
bool allow_inline,
eval_const_expressions_context *context);
static List *add_function_defaults(List *args, Oid result_type,
- HeapTuple func_tuple,
- eval_const_expressions_context *context);
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context);
static Expr *evaluate_function(Oid funcid,
Oid result_type, int32 result_typmod, List *args,
HeapTuple func_tuple,
@@ -114,9 +114,9 @@ static Node *substitute_actual_parameters_mutator(Node *node,
static void sql_inline_error_callback(void *arg);
static Expr *evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod);
static Query *substitute_actual_srf_parameters(Query *expr,
- int nargs, List *args);
+ int nargs, List *args);
static Node *substitute_actual_srf_parameters_mutator(Node *node,
- substitute_actual_srf_parameters_context *context);
+ substitute_actual_srf_parameters_context *context);
static bool tlist_matches_coltypelist(List *tlist, List *coltypelist);
@@ -612,7 +612,8 @@ find_window_functions_walker(Node *node, WindowFuncLists *lists)
lists->numWindowFuncs++;
/*
- * Complain if the window function's arguments contain window functions
+ * Complain if the window function's arguments contain window
+ * functions
*/
if (contain_window_function((Node *) wfunc->args))
ereport(ERROR,
@@ -1557,8 +1558,8 @@ find_forced_null_vars(Node *node)
/*
* We don't bother considering the OR case, because it's fairly
- * unlikely anyone would write "v1 IS NULL OR v1 IS NULL".
- * Likewise, the NOT case isn't worth expending code on.
+ * unlikely anyone would write "v1 IS NULL OR v1 IS NULL". Likewise,
+ * the NOT case isn't worth expending code on.
*/
if (expr->boolop == AND_EXPR)
{
@@ -1594,7 +1595,7 @@ find_forced_null_var(Node *node)
if (expr->nulltesttype == IS_NULL)
{
- Var *var = (Var *) expr->arg;
+ Var *var = (Var *) expr->arg;
if (var && IsA(var, Var) &&
var->varlevelsup == 0)
@@ -1608,7 +1609,7 @@ find_forced_null_var(Node *node)
if (expr->booltesttype == IS_UNKNOWN)
{
- Var *var = (Var *) expr->arg;
+ Var *var = (Var *) expr->arg;
if (var && IsA(var, Var) &&
var->varlevelsup == 0)
@@ -2013,7 +2014,7 @@ eval_const_expressions(PlannerInfo *root, Node *node)
if (root)
{
context.boundParams = root->glob->boundParams; /* bound Params */
- context.glob = root->glob; /* for inlined-function dependencies */
+ context.glob = root->glob; /* for inlined-function dependencies */
}
else
{
@@ -2453,9 +2454,9 @@ eval_const_expressions_mutator(Node *node,
/*
* CoerceViaIO represents calling the source type's output function
- * then the result type's input function. So, try to simplify it
- * as though it were a stack of two such function calls. First we
- * need to know what the functions are.
+ * then the result type's input function. So, try to simplify it as
+ * though it were a stack of two such function calls. First we need
+ * to know what the functions are.
*/
getTypeOutputInfo(exprType((Node *) arg), &outfunc, &outtypisvarlena);
getTypeInputInfo(expr->resulttype, &infunc, &intypioparam);
@@ -2505,8 +2506,8 @@ eval_const_expressions_mutator(Node *node,
ArrayCoerceExpr *newexpr;
/*
- * Reduce constants in the ArrayCoerceExpr's argument, then build
- * a new ArrayCoerceExpr.
+ * Reduce constants in the ArrayCoerceExpr's argument, then build a
+ * new ArrayCoerceExpr.
*/
arg = (Expr *) eval_const_expressions_mutator((Node *) expr->arg,
context);
@@ -2925,7 +2926,7 @@ eval_const_expressions_mutator(Node *node,
newbtest->booltesttype = btest->booltesttype;
return (Node *) newbtest;
}
- if (IsA(node, PlaceHolderVar) && context->estimate)
+ if (IsA(node, PlaceHolderVar) &&context->estimate)
{
/*
* In estimation mode, just strip the PlaceHolderVar node altogether;
@@ -3266,7 +3267,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
*
* It is possible for some of the defaulted arguments to be polymorphic;
* therefore we can't assume that the default expressions have the correct
- * data types already. We have to re-resolve polymorphics and do coercion
+ * data types already. We have to re-resolve polymorphics and do coercion
* just like the parser did.
*/
static List *
@@ -3594,7 +3595,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* a RelabelType if needed to make the tlist expression match the declared
* type of the function.
*
@@ -3695,8 +3696,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
MemoryContextDelete(mycxt);
/*
- * Since there is now no trace of the function in the plan tree, we
- * must explicitly record the plan's dependency on the function.
+ * Since there is now no trace of the function in the plan tree, we must
+ * explicitly record the plan's dependency on the function.
*/
if (context->glob)
record_plan_function_dependency(context->glob, funcid);
@@ -3825,7 +3826,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod)
fix_opfuncids((Node *) expr);
/*
- * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
+ * Prepare expr for execution. (Note: we can't use ExecPrepareExpr
* because it'd result in recursively invoking eval_const_expressions.)
*/
exprstate = ExecInitExpr(expr, NULL);
@@ -3908,10 +3909,10 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
Assert(rte->rtekind == RTE_FUNCTION);
/*
- * It doesn't make a lot of sense for a SQL SRF to refer to itself
- * in its own FROM clause, since that must cause infinite recursion
- * at runtime. It will cause this code to recurse too, so check
- * for stack overflow. (There's no need to do more.)
+ * It doesn't make a lot of sense for a SQL SRF to refer to itself in its
+ * own FROM clause, since that must cause infinite recursion at runtime.
+ * It will cause this code to recurse too, so check for stack overflow.
+ * (There's no need to do more.)
*/
check_stack_depth();
@@ -3922,8 +3923,8 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* The function must be declared to return a set, else inlining would
- * change the results if the contained SELECT didn't return exactly
- * one row.
+ * change the results if the contained SELECT didn't return exactly one
+ * row.
*/
if (!fexpr->funcretset)
return NULL;
@@ -3932,7 +3933,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
* Refuse to inline if the arguments contain any volatile functions or
* sub-selects. Volatile functions are rejected because inlining may
* result in the arguments being evaluated multiple times, risking a
- * change in behavior. Sub-selects are rejected partly for implementation
+ * change in behavior. Sub-selects are rejected partly for implementation
* reasons (pushing them down another level might change their behavior)
* and partly because they're likely to be expensive and so multiple
* evaluation would be bad.
@@ -3957,7 +3958,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Forget it if the function is not SQL-language or has other showstopper
- * properties. In particular it mustn't be declared STRICT, since we
+ * properties. In particular it mustn't be declared STRICT, since we
* couldn't enforce that. It also mustn't be VOLATILE, because that is
* supposed to cause it to be executed with its own snapshot, rather than
* sharing the snapshot of the calling query. (The nargs check is just
@@ -4017,16 +4018,16 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
src = TextDatumGetCString(tmp);
/*
- * Parse, analyze, and rewrite (unlike inline_function(), we can't
- * skip rewriting here). We can fail as soon as we find more than
- * one query, though.
+ * Parse, analyze, and rewrite (unlike inline_function(), we can't skip
+ * rewriting here). We can fail as soon as we find more than one query,
+ * though.
*/
raw_parsetree_list = pg_parse_query(src);
if (list_length(raw_parsetree_list) != 1)
goto fail;
querytree_list = pg_analyze_and_rewrite(linitial(raw_parsetree_list), src,
- argtypes, funcform->pronargs);
+ argtypes, funcform->pronargs);
if (list_length(querytree_list) != 1)
goto fail;
querytree = linitial(querytree_list);
@@ -4043,13 +4044,13 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
/*
* Make sure the function (still) returns what it's declared to. This
* will raise an error if wrong, but that's okay since the function would
- * fail at runtime anyway. Note that check_sql_fn_retval will also insert
+ * fail at runtime anyway. Note that check_sql_fn_retval will also insert
* RelabelType(s) if needed to make the tlist expression(s) match the
* declared type of the function.
*
- * If the function returns a composite type, don't inline unless the
- * check shows it's returning a whole tuple result; otherwise what
- * it's returning is a single composite column which is not what we need.
+ * If the function returns a composite type, don't inline unless the check
+ * shows it's returning a whole tuple result; otherwise what it's
+ * returning is a single composite column which is not what we need.
*/
if (!check_sql_fn_retval(fexpr->funcid, fexpr->funcresulttype,
querytree_list,
@@ -4076,8 +4077,8 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
fexpr->args);
/*
- * Copy the modified query out of the temporary memory context,
- * and clean up.
+ * Copy the modified query out of the temporary memory context, and clean
+ * up.
*/
MemoryContextSwitchTo(oldcxt);
@@ -4088,8 +4089,8 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
ReleaseSysCache(func_tuple);
/*
- * Since there is now no trace of the function in the plan tree, we
- * must explicitly record the plan's dependency on the function.
+ * Since there is now no trace of the function in the plan tree, we must
+ * explicitly record the plan's dependency on the function.
*/
record_plan_function_dependency(root->glob, fexpr->funcid);
@@ -4128,9 +4129,9 @@ substitute_actual_srf_parameters(Query *expr, int nargs, List *args)
static Node *
substitute_actual_srf_parameters_mutator(Node *node,
- substitute_actual_srf_parameters_context *context)
+ substitute_actual_srf_parameters_context *context)
{
- Node *result;
+ Node *result;
if (node == NULL)
return NULL;
@@ -4138,7 +4139,7 @@ substitute_actual_srf_parameters_mutator(Node *node,
{
context->sublevels_up++;
result = (Node *) query_tree_mutator((Query *) node,
- substitute_actual_srf_parameters_mutator,
+ substitute_actual_srf_parameters_mutator,
(void *) context,
0);
context->sublevels_up--;
@@ -4154,8 +4155,8 @@ substitute_actual_srf_parameters_mutator(Node *node,
elog(ERROR, "invalid paramid: %d", param->paramid);
/*
- * Since the parameter is being inserted into a subquery,
- * we must adjust levels.
+ * Since the parameter is being inserted into a subquery, we must
+ * adjust levels.
*/
result = copyObject(list_nth(context->args, param->paramid - 1));
IncrementVarSublevelsUp(result, context->sublevels_up, 0);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 5ba413bb1a..b0358cb112 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.151 2009/03/26 17:15:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.152 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -797,7 +797,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
in_operators = NIL;
uniq_exprs = NIL;
all_btree = true;
- all_hash = enable_hashagg; /* don't consider hash if not enabled */
+ all_hash = enable_hashagg; /* don't consider hash if not enabled */
foreach(lc, sjinfo->join_quals)
{
OpExpr *op = (OpExpr *) lfirst(lc);
@@ -904,8 +904,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
goto no_unique_path;
/*
- * If we get here, we can unique-ify using at least one of sorting
- * and hashing. Start building the result Path object.
+ * If we get here, we can unique-ify using at least one of sorting and
+ * hashing. Start building the result Path object.
*/
pathnode = makeNode(UniquePath);
@@ -972,8 +972,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
-1.0);
/*
- * Charge one cpu_operator_cost per comparison per input tuple.
- * We assume all columns get compared at most of the tuples. (XXX
+ * Charge one cpu_operator_cost per comparison per input tuple. We
+ * assume all columns get compared at most of the tuples. (XXX
* probably this is an overestimate.) This should agree with
* make_unique.
*/
@@ -1030,7 +1030,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
return pathnode;
-no_unique_path: /* failure exit */
+no_unique_path: /* failure exit */
/* Mark the SpecialJoinInfo as not unique-able */
sjinfo->join_quals = NIL;
@@ -1404,27 +1404,27 @@ create_mergejoin_path(PlannerInfo *root,
* selected as the input of a mergejoin, and they don't support
* mark/restore at present.
*
- * Note: Sort supports mark/restore, so no materialize is really needed
- * in that case; but one may be desirable anyway to optimize the sort.
- * However, since we aren't representing the sort step separately in
- * the Path tree, we can't explicitly represent the materialize either.
- * So that case is not handled here. Instead, cost_mergejoin has to
- * factor in the cost and create_mergejoin_plan has to add the plan node.
+ * Note: Sort supports mark/restore, so no materialize is really needed in
+ * that case; but one may be desirable anyway to optimize the sort.
+ * However, since we aren't representing the sort step separately in the
+ * Path tree, we can't explicitly represent the materialize either. So
+ * that case is not handled here. Instead, cost_mergejoin has to factor
+ * in the cost and create_mergejoin_plan has to add the plan node.
*/
if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
{
- Path *mpath;
+ Path *mpath;
mpath = (Path *) create_material_path(inner_path->parent, inner_path);
/*
- * We expect the materialize won't spill to disk (it could only do
- * so if there were a whole lot of duplicate tuples, which is a case
- * cost_mergejoin will avoid choosing anyway). Therefore
- * cost_material's cost estimate is bogus and we should charge
- * just cpu_tuple_cost per tuple. (Keep this estimate in sync with
- * similar ones in cost_mergejoin and create_mergejoin_plan.)
+ * We expect the materialize won't spill to disk (it could only do so
+ * if there were a whole lot of duplicate tuples, which is a case
+ * cost_mergejoin will avoid choosing anyway). Therefore
+ * cost_material's cost estimate is bogus and we should charge just
+ * cpu_tuple_cost per tuple. (Keep this estimate in sync with similar
+ * ones in cost_mergejoin and create_mergejoin_plan.)
*/
mpath->startup_cost = inner_path->startup_cost;
mpath->total_cost = inner_path->total_cost;
@@ -1480,16 +1480,17 @@ create_hashjoin_path(PlannerInfo *root,
pathnode->jpath.outerjoinpath = outer_path;
pathnode->jpath.innerjoinpath = inner_path;
pathnode->jpath.joinrestrictinfo = restrict_clauses;
+
/*
* A hashjoin never has pathkeys, since its output ordering is
- * unpredictable due to possible batching. XXX If the inner relation is
+ * unpredictable due to possible batching. XXX If the inner relation is
* small enough, we could instruct the executor that it must not batch,
* and then we could assume that the output inherits the outer relation's
- * ordering, which might save a sort step. However there is considerable
- * downside if our estimate of the inner relation size is badly off.
- * For the moment we don't risk it. (Note also that if we wanted to take
- * this seriously, joinpath.c would have to consider many more paths for
- * the outer rel than it does now.)
+ * ordering, which might save a sort step. However there is considerable
+ * downside if our estimate of the inner relation size is badly off. For
+ * the moment we don't risk it. (Note also that if we wanted to take this
+ * seriously, joinpath.c would have to consider many more paths for the
+ * outer rel than it does now.)
*/
pathnode->jpath.path.pathkeys = NIL;
pathnode->path_hashclauses = hashclauses;
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 019352158d..b06c48c1e4 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.4 2009/04/19 19:46:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.5 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv)
phinfo->ph_var = copyObject(phv);
phinfo->ph_eval_at = pull_varnos((Node *) phv);
/* ph_eval_at may change later, see fix_placeholder_eval_levels */
- phinfo->ph_needed = NULL; /* initially it's unused */
+ phinfo->ph_needed = NULL; /* initially it's unused */
/* for the moment, estimate width using just the datatype info */
phinfo->ph_width = get_typavgwidth(exprType((Node *) phv->phexpr),
exprTypmod((Node *) phv->phexpr));
@@ -88,7 +88,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv)
*
* The initial eval_at level set by find_placeholder_info was the set of
* rels used in the placeholder's expression (or the whole subselect if
- * the expr is variable-free). If the subselect contains any outer joins
+ * the expr is variable-free). If the subselect contains any outer joins
* that can null any of those rels, we must delay evaluation to above those
* joins.
*
@@ -153,11 +153,11 @@ fix_placeholder_eval_levels(PlannerInfo *root)
/*
* Now that we know where to evaluate the placeholder, make sure that
* any vars or placeholders it uses will be available at that join
- * level. NOTE: this could cause more PlaceHolderInfos to be added
- * to placeholder_list. That is okay because we'll process them
- * before falling out of the foreach loop. Also, it could cause
- * the ph_needed sets of existing list entries to expand, which
- * is also okay because this loop doesn't examine those.
+ * level. NOTE: this could cause more PlaceHolderInfos to be added to
+ * placeholder_list. That is okay because we'll process them before
+ * falling out of the foreach loop. Also, it could cause the
+ * ph_needed sets of existing list entries to expand, which is also
+ * okay because this loop doesn't examine those.
*/
if (bms_membership(eval_at) == BMS_MULTIPLE)
{
@@ -173,7 +173,7 @@ fix_placeholder_eval_levels(PlannerInfo *root)
* Now, if any placeholder can be computed at a base rel and is needed
* above it, add it to that rel's targetlist. (This is essentially the
* same logic as in add_placeholders_to_joinrel, but we can't do that part
- * until joinrels are formed.) We have to do this as a separate step
+ * until joinrels are formed.) We have to do this as a separate step
* because the ph_needed values aren't stable until the previous loop
* finishes.
*/
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 0479e93ec8..4f07cade68 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.157 2009/05/12 00:56:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.158 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -525,7 +525,7 @@ get_relation_constraints(PlannerInfo *root,
/* Add NOT NULL constraints in expression form, if requested */
if (include_notnull && constr->has_not_null)
{
- int natts = relation->rd_att->natts;
+ int natts = relation->rd_att->natts;
for (i = 1; i <= natts; i++)
{
@@ -533,7 +533,7 @@ get_relation_constraints(PlannerInfo *root,
if (att->attnotnull && !att->attisdropped)
{
- NullTest *ntest = makeNode(NullTest);
+ NullTest *ntest = makeNode(NullTest);
ntest->arg = (Expr *) makeVar(varno,
i,
@@ -604,7 +604,7 @@ relation_excluded_by_constraints(PlannerInfo *root,
return false;
/*
- * OK to fetch the constraint expressions. Include "col IS NOT NULL"
+ * OK to fetch the constraint expressions. Include "col IS NOT NULL"
* expressions for attnotnull columns, in case we can refute those.
*/
constraint_pred = get_relation_constraints(root, rte->relid, rel, true);
@@ -865,10 +865,10 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
/*
* Note: ignore partial indexes, since they don't allow us to conclude
* that all attr values are distinct, *unless* they are marked predOK
- * which means we know the index's predicate is satisfied by the query.
- * We don't take any interest in expressional indexes either. Also, a
- * multicolumn unique index doesn't allow us to conclude that just the
- * specified attr is unique.
+ * which means we know the index's predicate is satisfied by the
+ * query. We don't take any interest in expressional indexes either.
+ * Also, a multicolumn unique index doesn't allow us to conclude that
+ * just the specified attr is unique.
*/
if (index->unique &&
index->ncolumns == 1 &&
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 3249b2726e..2b9f7727d8 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.26 2009/05/11 17:56:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.27 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ static Node *extract_not_arg(Node *clause);
static bool list_member_strip(List *list, Expr *datum);
static bool btree_predicate_proof(Expr *predicate, Node *clause,
bool refute_it);
-static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it);
+static Oid get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it);
static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr);
@@ -134,7 +134,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -192,7 +192,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list)
/*
* If either input is a single-element list, replace it with its lone
- * member; this avoids one useless level of AND-recursion. We only need
+ * member; this avoids one useless level of AND-recursion. We only need
* to worry about this at top level, since eval_const_expressions should
* have gotten rid of any trivial ANDs or ORs below that.
*/
@@ -652,13 +652,14 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
case CLASS_ATOM:
#ifdef NOT_USED
+
/*
* If A is a NOT-clause, A R=> B if B => A's arg
*
* Unfortunately not: this would only prove that B is not-TRUE,
* not that it's not NULL either. Keep this code as a comment
- * because it would be useful if we ever had a need for the
- * weak form of refutation.
+ * because it would be useful if we ever had a need for the weak
+ * form of refutation.
*/
not_arg = extract_not_arg(clause);
if (not_arg &&
@@ -738,7 +739,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
* This function also implements enforcement of MAX_SAOP_ARRAY_SIZE: if a
* ScalarArrayOpExpr's array has too many elements, we just classify it as an
* atom. (This will result in its being passed as-is to the simple_clause
- * functions, which will fail to prove anything about it.) Note that we
+ * functions, which will fail to prove anything about it.) Note that we
* cannot just stop after considering MAX_SAOP_ARRAY_SIZE elements; in general
* that would result in wrong proofs, rather than failing to prove anything.
*/
@@ -1484,8 +1485,8 @@ typedef struct OprProofCacheEntry
bool have_implic; /* do we know the implication result? */
bool have_refute; /* do we know the refutation result? */
- Oid implic_test_op; /* OID of the operator, or 0 if none */
- Oid refute_test_op; /* OID of the operator, or 0 if none */
+ Oid implic_test_op; /* OID of the operator, or 0 if none */
+ Oid refute_test_op; /* OID of the operator, or 0 if none */
} OprProofCacheEntry;
static HTAB *OprProofCacheHash = NULL;
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 2d289cae71..4ca3eeaaf2 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.93 2009/01/01 17:23:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.94 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -430,8 +430,8 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
int ndx;
/*
- * Ignore PlaceHolderVars in the input tlists; we'll make our
- * own decisions about whether to copy them.
+ * Ignore PlaceHolderVars in the input tlists; we'll make our own
+ * decisions about whether to copy them.
*/
if (IsA(origvar, PlaceHolderVar))
continue;
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 5b75d2de3b..47086a4bfc 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.59 2009/05/09 22:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.60 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -593,9 +593,9 @@ select_nonredundant_join_clauses(PlannerInfo *root,
* OK because we're only trying to prove we can dispense with some
* join quals; failing to prove that doesn't result in an incorrect
* plan. It's quite unlikely that a join qual could be proven
- * redundant by an index predicate anyway. (Also, if we did manage
- * to prove it, we'd have to have a special case for update targets;
- * see notes about EvalPlanQual testing in create_indexscan_plan().)
+ * redundant by an index predicate anyway. (Also, if we did manage to
+ * prove it, we'd have to have a special case for update targets; see
+ * notes about EvalPlanQual testing in create_indexscan_plan().)
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) inner_path;
@@ -614,10 +614,10 @@ select_nonredundant_join_clauses(PlannerInfo *root,
}
/*
- * XXX the inner path of a nestloop could also be an append relation
- * whose elements use join quals. However, they might each use different
- * quals; we could only remove join quals that are enforced by all the
- * appendrel members. For the moment we don't bother to try.
+ * XXX the inner path of a nestloop could also be an append relation whose
+ * elements use join quals. However, they might each use different quals;
+ * we could only remove join quals that are enforced by all the appendrel
+ * members. For the moment we don't bother to try.
*/
return restrictinfo_list;
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index cd88c337f1..deb9ef8ebd 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.85 2009/04/19 19:46:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.86 2009/06/11 14:48:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,9 +71,9 @@ static bool pull_varattnos_walker(Node *node, Bitmapset **varattnos);
static bool contain_var_clause_walker(Node *node, void *context);
static bool contain_vars_of_level_walker(Node *node, int *sublevels_up);
static bool locate_var_of_level_walker(Node *node,
- locate_var_of_level_context *context);
+ locate_var_of_level_context *context);
static bool locate_var_of_relation_walker(Node *node,
- locate_var_of_relation_context *context);
+ locate_var_of_relation_context *context);
static bool find_minimum_var_level_walker(Node *node,
find_minimum_var_level_context *context);
static bool pull_var_clause_walker(Node *node,
@@ -318,7 +318,7 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
* Find the parse location of any Var of the specified query level.
*
* Returns -1 if no such Var is in the querytree, or if they all have
- * unknown parse location. (The former case is probably caller error,
+ * unknown parse location. (The former case is probably caller error,
* but we don't bother to distinguish it from the latter case.)
*
* Will recurse into sublinks. Also, may be invoked directly on a Query.
@@ -333,7 +333,7 @@ locate_var_of_level(Node *node, int levelsup)
{
locate_var_of_level_context context;
- context.var_location = -1; /* in case we find nothing */
+ context.var_location = -1; /* in case we find nothing */
context.sublevels_up = levelsup;
(void) query_or_expression_tree_walker(node,
@@ -352,7 +352,7 @@ locate_var_of_level_walker(Node *node,
return false;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
if (var->varlevelsup == context->sublevels_up &&
var->location >= 0)
@@ -401,7 +401,7 @@ locate_var_of_relation(Node *node, int relid, int levelsup)
{
locate_var_of_relation_context context;
- context.var_location = -1; /* in case we find nothing */
+ context.var_location = -1; /* in case we find nothing */
context.relid = relid;
context.sublevels_up = levelsup;
@@ -421,7 +421,7 @@ locate_var_of_relation_walker(Node *node,
return false;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
if (var->varno == context->relid &&
var->varlevelsup == context->sublevels_up &&
@@ -625,7 +625,7 @@ find_minimum_var_level_walker(Node *node,
* Upper-level vars (with varlevelsup > 0) are not included.
* (These probably represent errors too, but we don't complain.)
*
- * Returns list of nodes found. Note the nodes themselves are not
+ * Returns list of nodes found. Note the nodes themselves are not
* copied, only referenced.
*
* Does not examine subqueries, therefore must only be used after reduction