summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2006-10-04 00:30:14 +0000
committerBruce Momjian <bruce@momjian.us>2006-10-04 00:30:14 +0000
commitf99a569a2ee3763b4ae174e81250c95ca0fdcbb6 (patch)
tree76e6371fe8b347c73d7020c0bc54b9fba519dc10 /src/backend
parent451e419e9852cdf9d7e7cefc09d5355abb3405e9 (diff)
downloadpostgresql-f99a569a2ee3763b4ae174e81250c95ca0fdcbb6.tar.gz
pgindent run for 8.2.
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c6
-rw-r--r--src/backend/access/common/printtup.c6
-rw-r--r--src/backend/access/common/reloptions.c64
-rw-r--r--src/backend/access/gin/ginarrayproc.c67
-rw-r--r--src/backend/access/gin/ginbtree.c315
-rw-r--r--src/backend/access/gin/ginbulk.c240
-rw-r--r--src/backend/access/gin/gindatapage.c506
-rw-r--r--src/backend/access/gin/ginentrypage.c419
-rw-r--r--src/backend/access/gin/ginget.c448
-rw-r--r--src/backend/access/gin/gininsert.c249
-rw-r--r--src/backend/access/gin/ginscan.c235
-rw-r--r--src/backend/access/gin/ginutil.c160
-rw-r--r--src/backend/access/gin/ginvacuum.c591
-rw-r--r--src/backend/access/gin/ginxlog.c453
-rw-r--r--src/backend/access/gist/gist.c151
-rw-r--r--src/backend/access/gist/gistget.c19
-rw-r--r--src/backend/access/gist/gistproc.c117
-rw-r--r--src/backend/access/gist/gistscan.c6
-rw-r--r--src/backend/access/gist/gistsplit.c482
-rw-r--r--src/backend/access/gist/gistutil.c189
-rw-r--r--src/backend/access/gist/gistvacuum.c194
-rw-r--r--src/backend/access/gist/gistxlog.c150
-rw-r--r--src/backend/access/hash/hashfunc.c8
-rw-r--r--src/backend/access/hash/hashpage.c4
-rw-r--r--src/backend/access/heap/heapam.c86
-rw-r--r--src/backend/access/heap/tuptoaster.c4
-rw-r--r--src/backend/access/index/genam.c4
-rw-r--r--src/backend/access/index/indexam.c8
-rw-r--r--src/backend/access/nbtree/nbtinsert.c50
-rw-r--r--src/backend/access/nbtree/nbtpage.c22
-rw-r--r--src/backend/access/nbtree/nbtree.c152
-rw-r--r--src/backend/access/nbtree/nbtsearch.c53
-rw-r--r--src/backend/access/nbtree/nbtsort.c18
-rw-r--r--src/backend/access/nbtree/nbtutils.c73
-rw-r--r--src/backend/access/nbtree/nbtxlog.c44
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/multixact.c12
-rw-r--r--src/backend/access/transam/slru.c38
-rw-r--r--src/backend/access/transam/twophase.c6
-rw-r--r--src/backend/access/transam/varsup.c14
-rw-r--r--src/backend/access/transam/xact.c32
-rw-r--r--src/backend/access/transam/xlog.c244
-rw-r--r--src/backend/access/transam/xlogutils.c8
-rw-r--r--src/backend/bootstrap/bootstrap.c9
-rw-r--r--src/backend/catalog/aclchk.c109
-rw-r--r--src/backend/catalog/catalog.c4
-rw-r--r--src/backend/catalog/dependency.c21
-rw-r--r--src/backend/catalog/heap.c36
-rw-r--r--src/backend/catalog/index.c123
-rw-r--r--src/backend/catalog/namespace.c12
-rw-r--r--src/backend/catalog/pg_aggregate.c16
-rw-r--r--src/backend/catalog/pg_depend.c4
-rw-r--r--src/backend/catalog/pg_shdepend.c17
-rw-r--r--src/backend/catalog/pg_type.c42
-rw-r--r--src/backend/catalog/toasting.c4
-rw-r--r--src/backend/commands/aggregatecmds.c24
-rw-r--r--src/backend/commands/analyze.c17
-rw-r--r--src/backend/commands/cluster.c6
-rw-r--r--src/backend/commands/comment.c57
-rw-r--r--src/backend/commands/copy.c181
-rw-r--r--src/backend/commands/dbcommands.c76
-rw-r--r--src/backend/commands/define.c9
-rw-r--r--src/backend/commands/explain.c10
-rw-r--r--src/backend/commands/functioncmds.c8
-rw-r--r--src/backend/commands/indexcmds.c56
-rw-r--r--src/backend/commands/opclasscmds.c20
-rw-r--r--src/backend/commands/operatorcmds.c18
-rw-r--r--src/backend/commands/portalcmds.c6
-rw-r--r--src/backend/commands/prepare.c40
-rw-r--r--src/backend/commands/proclang.c10
-rw-r--r--src/backend/commands/sequence.c16
-rw-r--r--src/backend/commands/tablecmds.c170
-rw-r--r--src/backend/commands/tablespace.c10
-rw-r--r--src/backend/commands/trigger.c67
-rw-r--r--src/backend/commands/typecmds.c18
-rw-r--r--src/backend/commands/user.c32
-rw-r--r--src/backend/commands/vacuum.c108
-rw-r--r--src/backend/commands/vacuumlazy.c31
-rw-r--r--src/backend/commands/variable.c4
-rw-r--r--src/backend/commands/view.c4
-rw-r--r--src/backend/executor/execMain.c86
-rw-r--r--src/backend/executor/execProcnode.c4
-rw-r--r--src/backend/executor/execQual.c38
-rw-r--r--src/backend/executor/execTuples.c8
-rw-r--r--src/backend/executor/execUtils.c6
-rw-r--r--src/backend/executor/functions.c8
-rw-r--r--src/backend/executor/nodeAgg.c90
-rw-r--r--src/backend/executor/nodeAppend.c8
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c33
-rw-r--r--src/backend/executor/nodeBitmapIndexscan.c18
-rw-r--r--src/backend/executor/nodeHashjoin.c28
-rw-r--r--src/backend/executor/nodeIndexscan.c47
-rw-r--r--src/backend/executor/nodeMaterial.c24
-rw-r--r--src/backend/executor/nodeMergejoin.c14
-rw-r--r--src/backend/executor/nodeNestloop.c10
-rw-r--r--src/backend/executor/nodeSeqscan.c4
-rw-r--r--src/backend/executor/nodeSort.c13
-rw-r--r--src/backend/executor/nodeSubplan.c16
-rw-r--r--src/backend/executor/nodeSubqueryscan.c4
-rw-r--r--src/backend/executor/nodeTidscan.c29
-rw-r--r--src/backend/executor/nodeValuesscan.c56
-rw-r--r--src/backend/executor/spi.c18
-rw-r--r--src/backend/libpq/auth.c263
-rw-r--r--src/backend/libpq/be-secure.c21
-rw-r--r--src/backend/libpq/hba.c6
-rw-r--r--src/backend/libpq/pqcomm.c9
-rw-r--r--src/backend/main/main.c54
-rw-r--r--src/backend/nodes/copyfuncs.c8
-rw-r--r--src/backend/nodes/equalfuncs.c6
-rw-r--r--src/backend/nodes/makefuncs.c4
-rw-r--r--src/backend/nodes/params.c8
-rw-r--r--src/backend/optimizer/path/allpaths.c30
-rw-r--r--src/backend/optimizer/path/clausesel.c20
-rw-r--r--src/backend/optimizer/path/costsize.c53
-rw-r--r--src/backend/optimizer/path/indxpath.c119
-rw-r--r--src/backend/optimizer/path/joinpath.c26
-rw-r--r--src/backend/optimizer/path/joinrels.c28
-rw-r--r--src/backend/optimizer/path/orindxpath.c6
-rw-r--r--src/backend/optimizer/path/pathkeys.c8
-rw-r--r--src/backend/optimizer/path/tidpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c69
-rw-r--r--src/backend/optimizer/plan/initsplan.c89
-rw-r--r--src/backend/optimizer/plan/planagg.c12
-rw-r--r--src/backend/optimizer/plan/planmain.c22
-rw-r--r--src/backend/optimizer/plan/planner.c26
-rw-r--r--src/backend/optimizer/plan/setrefs.c18
-rw-r--r--src/backend/optimizer/plan/subselect.c31
-rw-r--r--src/backend/optimizer/prep/prepjointree.c173
-rw-r--r--src/backend/optimizer/prep/prepqual.c10
-rw-r--r--src/backend/optimizer/prep/preptlist.c10
-rw-r--r--src/backend/optimizer/prep/prepunion.c83
-rw-r--r--src/backend/optimizer/util/clauses.c55
-rw-r--r--src/backend/optimizer/util/pathnode.c14
-rw-r--r--src/backend/optimizer/util/plancat.c32
-rw-r--r--src/backend/optimizer/util/predtest.c47
-rw-r--r--src/backend/optimizer/util/relnode.c11
-rw-r--r--src/backend/optimizer/util/restrictinfo.c9
-rw-r--r--src/backend/parser/analyze.c240
-rw-r--r--src/backend/parser/parse_clause.c43
-rw-r--r--src/backend/parser/parse_coerce.c28
-rw-r--r--src/backend/parser/parse_expr.c127
-rw-r--r--src/backend/parser/parse_func.c10
-rw-r--r--src/backend/parser/parse_node.c8
-rw-r--r--src/backend/parser/parse_oper.c21
-rw-r--r--src/backend/parser/parse_relation.c39
-rw-r--r--src/backend/parser/parse_target.c28
-rw-r--r--src/backend/parser/parse_type.c6
-rw-r--r--src/backend/parser/parser.c17
-rw-r--r--src/backend/port/win32/socket.c4
-rw-r--r--src/backend/port/win32/timer.c6
-rw-r--r--src/backend/port/win32_sema.c26
-rw-r--r--src/backend/postmaster/autovacuum.c42
-rw-r--r--src/backend/postmaster/bgwriter.c30
-rw-r--r--src/backend/postmaster/pgstat.c126
-rw-r--r--src/backend/postmaster/postmaster.c67
-rw-r--r--src/backend/rewrite/rewriteDefine.c31
-rw-r--r--src/backend/rewrite/rewriteHandler.c64
-rw-r--r--src/backend/rewrite/rewriteManip.c7
-rw-r--r--src/backend/rewrite/rewriteRemove.c6
-rw-r--r--src/backend/storage/buffer/bufmgr.c47
-rw-r--r--src/backend/storage/buffer/freelist.c14
-rw-r--r--src/backend/storage/file/fd.c10
-rw-r--r--src/backend/storage/freespace/freespace.c15
-rw-r--r--src/backend/storage/ipc/ipci.c10
-rw-r--r--src/backend/storage/ipc/procarray.c25
-rw-r--r--src/backend/storage/ipc/shmem.c62
-rw-r--r--src/backend/storage/lmgr/lmgr.c32
-rw-r--r--src/backend/storage/lmgr/lock.c61
-rw-r--r--src/backend/storage/lmgr/lwlock.c26
-rw-r--r--src/backend/storage/lmgr/proc.c74
-rw-r--r--src/backend/storage/lmgr/s_lock.c5
-rw-r--r--src/backend/storage/page/itemptr.c4
-rw-r--r--src/backend/storage/smgr/md.c6
-rw-r--r--src/backend/storage/smgr/smgr.c18
-rw-r--r--src/backend/tcop/fastpath.c16
-rw-r--r--src/backend/tcop/postgres.c112
-rw-r--r--src/backend/tcop/pquery.c35
-rw-r--r--src/backend/tcop/utility.c33
-rw-r--r--src/backend/utils/adt/arrayfuncs.c36
-rw-r--r--src/backend/utils/adt/datetime.c106
-rw-r--r--src/backend/utils/adt/domains.c30
-rw-r--r--src/backend/utils/adt/float.c88
-rw-r--r--src/backend/utils/adt/formatting.c20
-rw-r--r--src/backend/utils/adt/int.c20
-rw-r--r--src/backend/utils/adt/int8.c4
-rw-r--r--src/backend/utils/adt/like.c14
-rw-r--r--src/backend/utils/adt/lockfuncs.c10
-rw-r--r--src/backend/utils/adt/misc.c20
-rw-r--r--src/backend/utils/adt/network.c98
-rw-r--r--src/backend/utils/adt/numeric.c59
-rw-r--r--src/backend/utils/adt/oid.c10
-rw-r--r--src/backend/utils/adt/pg_locale.c25
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c10
-rw-r--r--src/backend/utils/adt/regexp.c4
-rw-r--r--src/backend/utils/adt/ri_triggers.c18
-rw-r--r--src/backend/utils/adt/ruleutils.c140
-rw-r--r--src/backend/utils/adt/selfuncs.c191
-rw-r--r--src/backend/utils/adt/tid.c18
-rw-r--r--src/backend/utils/adt/timestamp.c75
-rw-r--r--src/backend/utils/adt/varchar.c10
-rw-r--r--src/backend/utils/adt/varlena.c16
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c12
-rw-r--r--src/backend/utils/cache/lsyscache.c23
-rw-r--r--src/backend/utils/cache/relcache.c72
-rw-r--r--src/backend/utils/cache/syscache.c5
-rw-r--r--src/backend/utils/cache/typcache.c16
-rw-r--r--src/backend/utils/fmgr/dfmgr.c68
-rw-r--r--src/backend/utils/fmgr/fmgr.c4
-rw-r--r--src/backend/utils/hash/dynahash.c46
-rw-r--r--src/backend/utils/hash/hashfn.c6
-rw-r--r--src/backend/utils/init/globals.c4
-rw-r--r--src/backend/utils/init/miscinit.c8
-rw-r--r--src/backend/utils/init/postinit.c69
-rw-r--r--src/backend/utils/mb/conv.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c8
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c3
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c14
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c32
-rw-r--r--src/backend/utils/mb/mbutils.c29
-rw-r--r--src/backend/utils/mb/wchar.c210
-rw-r--r--src/backend/utils/misc/guc.c145
-rw-r--r--src/backend/utils/misc/ps_status.c11
-rw-r--r--src/backend/utils/misc/tzparser.c109
-rw-r--r--src/backend/utils/mmgr/aset.c4
-rw-r--r--src/backend/utils/mmgr/portalmem.c28
-rw-r--r--src/backend/utils/sort/logtape.c24
-rw-r--r--src/backend/utils/sort/tuplesort.c161
-rw-r--r--src/backend/utils/sort/tuplestore.c3
-rw-r--r--src/backend/utils/time/tqual.c10
232 files changed, 6933 insertions, 6108 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index d3b06d8ae6..d0f4b3d382 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.110 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.111 2006/10/04 00:29:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1294,7 +1294,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
- if (slot->tts_mintuple) /* internal error */
+ if (slot->tts_mintuple) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_getsysattr(tuple, attnum, tupleDesc, isnull);
}
@@ -1480,7 +1480,7 @@ slot_attisnull(TupleTableSlot *slot, int attnum)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
- if (slot->tts_mintuple) /* internal error */
+ if (slot->tts_mintuple) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_attisnull(tuple, attnum);
}
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 82117e5fe6..ab2d8b4771 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.98 2006/08/12 02:52:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.99 2006/10/04 00:29:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -127,8 +127,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
}
/*
- * If we are supposed to emit row descriptions,
- * then send the tuple descriptor of the tuples.
+ * If we are supposed to emit row descriptions, then send the tuple
+ * descriptor of the tuples.
*/
if (myState->sendDescrip)
SendRowDescriptionMessage(typeinfo,
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 8506070f10..0bcda36c06 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.1 2006/07/03 22:45:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.2 2006/10/04 00:29:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ transformRelOptions(Datum oldOptions, List *defList,
/* Copy any oldOptions that aren't to be replaced */
if (oldOptions != (Datum) 0)
{
- ArrayType *array = DatumGetArrayTypeP(oldOptions);
+ ArrayType *array = DatumGetArrayTypeP(oldOptions);
Datum *oldoptions;
int noldoptions;
int i;
@@ -71,15 +71,15 @@ transformRelOptions(Datum oldOptions, List *defList,
for (i = 0; i < noldoptions; i++)
{
- text *oldoption = DatumGetTextP(oldoptions[i]);
- char *text_str = (char *) VARATT_DATA(oldoption);
- int text_len = VARATT_SIZE(oldoption) - VARHDRSZ;
+ text *oldoption = DatumGetTextP(oldoptions[i]);
+ char *text_str = (char *) VARATT_DATA(oldoption);
+ int text_len = VARATT_SIZE(oldoption) - VARHDRSZ;
/* Search for a match in defList */
foreach(cell, defList)
{
- DefElem *def = lfirst(cell);
- int kw_len = strlen(def->defname);
+ DefElem *def = lfirst(cell);
+ int kw_len = strlen(def->defname);
if (text_len > kw_len && text_str[kw_len] == '=' &&
pg_strncasecmp(text_str, def->defname, kw_len) == 0)
@@ -96,33 +96,33 @@ transformRelOptions(Datum oldOptions, List *defList,
}
/*
- * If CREATE/SET, add new options to array; if RESET, just check that
- * the user didn't say RESET (option=val). (Must do this because the
- * grammar doesn't enforce it.)
+ * If CREATE/SET, add new options to array; if RESET, just check that the
+ * user didn't say RESET (option=val). (Must do this because the grammar
+ * doesn't enforce it.)
*/
foreach(cell, defList)
{
- DefElem *def = lfirst(cell);
+ DefElem *def = lfirst(cell);
if (isReset)
{
if (def->arg != NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("RESET must not include values for parameters")));
+ errmsg("RESET must not include values for parameters")));
}
else
{
- text *t;
+ text *t;
const char *value;
- Size len;
+ Size len;
if (ignoreOids && pg_strcasecmp(def->defname, "oids") == 0)
continue;
/*
- * Flatten the DefElem into a text string like "name=arg".
- * If we have just "name", assume "name=true" is meant.
+ * Flatten the DefElem into a text string like "name=arg". If we
+ * have just "name", assume "name=true" is meant.
*/
if (def->arg != NULL)
value = defGetString(def);
@@ -163,10 +163,10 @@ transformRelOptions(Datum oldOptions, List *defList,
* containing the corresponding value, or NULL if the keyword does not appear.
*/
void
-parseRelOptions(Datum options, int numkeywords, const char * const *keywords,
+parseRelOptions(Datum options, int numkeywords, const char *const * keywords,
char **values, bool validate)
{
- ArrayType *array;
+ ArrayType *array;
Datum *optiondatums;
int noptions;
int i;
@@ -187,21 +187,21 @@ parseRelOptions(Datum options, int numkeywords, const char * const *keywords,
for (i = 0; i < noptions; i++)
{
- text *optiontext = DatumGetTextP(optiondatums[i]);
- char *text_str = (char *) VARATT_DATA(optiontext);
- int text_len = VARATT_SIZE(optiontext) - VARHDRSZ;
- int j;
+ text *optiontext = DatumGetTextP(optiondatums[i]);
+ char *text_str = (char *) VARATT_DATA(optiontext);
+ int text_len = VARATT_SIZE(optiontext) - VARHDRSZ;
+ int j;
/* Search for a match in keywords */
for (j = 0; j < numkeywords; j++)
{
- int kw_len = strlen(keywords[j]);
+ int kw_len = strlen(keywords[j]);
if (text_len > kw_len && text_str[kw_len] == '=' &&
pg_strncasecmp(text_str, keywords[j], kw_len) == 0)
{
- char *value;
- int value_len;
+ char *value;
+ int value_len;
if (values[j] && validate)
ereport(ERROR,
@@ -218,8 +218,8 @@ parseRelOptions(Datum options, int numkeywords, const char * const *keywords,
}
if (j >= numkeywords && validate)
{
- char *s;
- char *p;
+ char *s;
+ char *p;
s = DatumGetCString(DirectFunctionCall1(textout, optiondatums[i]));
p = strchr(s, '=');
@@ -240,17 +240,17 @@ bytea *
default_reloptions(Datum reloptions, bool validate,
int minFillfactor, int defaultFillfactor)
{
- static const char * const default_keywords[1] = { "fillfactor" };
- char *values[1];
- int32 fillfactor;
+ static const char *const default_keywords[1] = {"fillfactor"};
+ char *values[1];
+ int32 fillfactor;
StdRdOptions *result;
parseRelOptions(reloptions, 1, default_keywords, values, validate);
/*
* If no options, we can just return NULL rather than doing anything.
- * (defaultFillfactor is thus not used, but we require callers to pass
- * it anyway since we would need it if more options were added.)
+ * (defaultFillfactor is thus not used, but we require callers to pass it
+ * anyway since we would need it if more options were added.)
*/
if (values[0] == NULL)
return NULL;
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 911cf62983..33a8b44a14 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginarrayproc.c
- * support functions for GIN's indexing of any array
+ * support functions for GIN's indexing of any array
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.5 2006/09/10 20:14:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.6 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -23,64 +23,73 @@
#define GinContainedStrategy 3
#define GinEqualStrategy 4
-#define ARRAYCHECK(x) do { \
+#define ARRAYCHECK(x) do { \
if ( ARR_HASNULL(x) ) \
- ereport(ERROR, \
- (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), \
- errmsg("array must not contain nulls"))); \
-} while(0)
+ ereport(ERROR, \
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), \
+ errmsg("array must not contain nulls"))); \
+} while(0)
/*
* Function used as extractValue and extractQuery both
*/
Datum
-ginarrayextract(PG_FUNCTION_ARGS) {
- ArrayType *array;
- uint32 *nentries = (uint32*)PG_GETARG_POINTER(1);
- Datum *entries = NULL;
- int16 elmlen;
- bool elmbyval;
- char elmalign;
+ginarrayextract(PG_FUNCTION_ARGS)
+{
+ ArrayType *array;
+ uint32 *nentries = (uint32 *) PG_GETARG_POINTER(1);
+ Datum *entries = NULL;
+ int16 elmlen;
+ bool elmbyval;
+ char elmalign;
- /* we should guarantee that array will not be destroyed during all operation */
+ /*
+ * we should guarantee that array will not be destroyed during all
+ * operation
+ */
array = PG_GETARG_ARRAYTYPE_P_COPY(0);
ARRAYCHECK(array);
get_typlenbyvalalign(ARR_ELEMTYPE(array),
- &elmlen, &elmbyval, &elmalign);
+ &elmlen, &elmbyval, &elmalign);
deconstruct_array(array,
- ARR_ELEMTYPE(array),
- elmlen, elmbyval, elmalign,
- &entries, NULL, (int*)nentries);
+ ARR_ELEMTYPE(array),
+ elmlen, elmbyval, elmalign,
+ &entries, NULL, (int *) nentries);
/* we should not free array, entries[i] points into it */
PG_RETURN_POINTER(entries);
}
Datum
-ginarrayconsistent(PG_FUNCTION_ARGS) {
- bool *check = (bool*)PG_GETARG_POINTER(0);
- StrategyNumber strategy = PG_GETARG_UINT16(1);
- ArrayType *query = PG_GETARG_ARRAYTYPE_P(2);
- int res, i, nentries;
+ginarrayconsistent(PG_FUNCTION_ARGS)
+{
+ bool *check = (bool *) PG_GETARG_POINTER(0);
+ StrategyNumber strategy = PG_GETARG_UINT16(1);
+ ArrayType *query = PG_GETARG_ARRAYTYPE_P(2);
+ int res,
+ i,
+ nentries;
/* ARRAYCHECK was already done by previous ginarrayextract call */
- switch( strategy ) {
+ switch (strategy)
+ {
case GinOverlapStrategy:
case GinContainedStrategy:
- /* at least one element in check[] is true, so result = true */
+ /* at least one element in check[] is true, so result = true */
res = TRUE;
break;
case GinContainsStrategy:
case GinEqualStrategy:
- nentries=ArrayGetNItems(ARR_NDIM(query), ARR_DIMS(query));
+ nentries = ArrayGetNItems(ARR_NDIM(query), ARR_DIMS(query));
res = TRUE;
- for(i=0;i<nentries;i++)
- if ( !check[i] ) {
+ for (i = 0; i < nentries; i++)
+ if (!check[i])
+ {
res = FALSE;
break;
}
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index bc3e02973f..fc44a5a0c7 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginbtree.c
- * page utilities routines for the postgres inverted index access method.
+ * page utilities routines for the postgres inverted index access method.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.4 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.5 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -20,24 +20,29 @@
* Locks buffer by needed method for search.
*/
static int
-ginTraverseLock(Buffer buffer, bool searchMode) {
- Page page;
- int access=GIN_SHARE;
+ginTraverseLock(Buffer buffer, bool searchMode)
+{
+ Page page;
+ int access = GIN_SHARE;
LockBuffer(buffer, GIN_SHARE);
- page = BufferGetPage( buffer );
- if ( GinPageIsLeaf(page) ) {
- if ( searchMode == FALSE ) {
+ page = BufferGetPage(buffer);
+ if (GinPageIsLeaf(page))
+ {
+ if (searchMode == FALSE)
+ {
/* we should relock our page */
LockBuffer(buffer, GIN_UNLOCK);
LockBuffer(buffer, GIN_EXCLUSIVE);
/* But root can become non-leaf during relock */
- if ( !GinPageIsLeaf(page) ) {
- /* resore old lock type (very rare) */
+ if (!GinPageIsLeaf(page))
+ {
+ /* resore old lock type (very rare) */
LockBuffer(buffer, GIN_UNLOCK);
LockBuffer(buffer, GIN_SHARE);
- } else
+ }
+ else
access = GIN_EXCLUSIVE;
}
}
@@ -45,9 +50,10 @@ ginTraverseLock(Buffer buffer, bool searchMode) {
return access;
}
-GinBtreeStack*
-ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno) {
- GinBtreeStack *stack = (GinBtreeStack*)palloc(sizeof(GinBtreeStack));
+GinBtreeStack *
+ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno)
+{
+ GinBtreeStack *stack = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
stack->blkno = blkno;
stack->buffer = ReadBuffer(btree->index, stack->blkno);
@@ -62,63 +68,73 @@ ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno) {
/*
* Locates leaf page contained tuple
*/
-GinBtreeStack*
-ginFindLeafPage(GinBtree btree, GinBtreeStack *stack) {
- bool isfirst=TRUE;
+GinBtreeStack *
+ginFindLeafPage(GinBtree btree, GinBtreeStack *stack)
+{
+ bool isfirst = TRUE;
BlockNumber rootBlkno;
- if ( !stack )
+ if (!stack)
stack = ginPrepareFindLeafPage(btree, GIN_ROOT_BLKNO);
rootBlkno = stack->blkno;
- for(;;) {
- Page page;
+ for (;;)
+ {
+ Page page;
BlockNumber child;
- int access=GIN_SHARE;
+ int access = GIN_SHARE;
stack->off = InvalidOffsetNumber;
-
- page = BufferGetPage( stack->buffer );
- if ( isfirst ) {
- if ( GinPageIsLeaf(page) && !btree->searchMode )
+ page = BufferGetPage(stack->buffer);
+
+ if (isfirst)
+ {
+ if (GinPageIsLeaf(page) && !btree->searchMode)
access = GIN_EXCLUSIVE;
isfirst = FALSE;
- } else
+ }
+ else
access = ginTraverseLock(stack->buffer, btree->searchMode);
- /* ok, page is correctly locked, we should check to move right ..,
- root never has a right link, so small optimization */
- while( btree->fullScan==FALSE && stack->blkno != rootBlkno && btree->isMoveRight(btree, page) ) {
+ /*
+ * ok, page is correctly locked, we should check to move right ..,
+ * root never has a right link, so small optimization
+ */
+ while (btree->fullScan == FALSE && stack->blkno != rootBlkno && btree->isMoveRight(btree, page))
+ {
BlockNumber rightlink = GinPageGetOpaque(page)->rightlink;
- if ( rightlink==InvalidBlockNumber )
+ if (rightlink == InvalidBlockNumber)
/* rightmost page */
break;
stack->blkno = rightlink;
LockBuffer(stack->buffer, GIN_UNLOCK);
stack->buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, stack->blkno);
- LockBuffer(stack->buffer, access);
- page = BufferGetPage( stack->buffer );
+ LockBuffer(stack->buffer, access);
+ page = BufferGetPage(stack->buffer);
}
- if ( GinPageIsLeaf(page) ) /* we found, return locked page */
+ if (GinPageIsLeaf(page)) /* we found, return locked page */
return stack;
/* now we have correct buffer, try to find child */
child = btree->findChildPage(btree, stack);
LockBuffer(stack->buffer, GIN_UNLOCK);
- Assert( child != InvalidBlockNumber );
- Assert( stack->blkno != child );
+ Assert(child != InvalidBlockNumber);
+ Assert(stack->blkno != child);
- if ( btree->searchMode ) {
+ if (btree->searchMode)
+ {
/* in search mode we may forget path to leaf */
stack->blkno = child;
- stack->buffer = ReleaseAndReadBuffer( stack->buffer, btree->index, stack->blkno );
- } else {
- GinBtreeStack *ptr = (GinBtreeStack*)palloc(sizeof(GinBtreeStack));
+ stack->buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, stack->blkno);
+ }
+ else
+ {
+ GinBtreeStack *ptr = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
ptr->parent = stack;
stack = ptr;
@@ -133,93 +149,110 @@ ginFindLeafPage(GinBtree btree, GinBtreeStack *stack) {
}
void
-freeGinBtreeStack( GinBtreeStack *stack ) {
- while(stack) {
- GinBtreeStack *tmp = stack->parent;
- if ( stack->buffer != InvalidBuffer )
+freeGinBtreeStack(GinBtreeStack *stack)
+{
+ while (stack)
+ {
+ GinBtreeStack *tmp = stack->parent;
+
+ if (stack->buffer != InvalidBuffer)
ReleaseBuffer(stack->buffer);
- pfree( stack );
+ pfree(stack);
stack = tmp;
}
}
/*
- * Try to find parent for current stack position, returns correct
+ * Try to find parent for current stack position, returns correct
* parent and child's offset in stack->parent.
* Function should never release root page to prevent conflicts
* with vacuum process
*/
void
-findParents( GinBtree btree, GinBtreeStack *stack,
- BlockNumber rootBlkno) {
-
- Page page;
- Buffer buffer;
- BlockNumber blkno, leftmostBlkno;
+findParents(GinBtree btree, GinBtreeStack *stack,
+ BlockNumber rootBlkno)
+{
+
+ Page page;
+ Buffer buffer;
+ BlockNumber blkno,
+ leftmostBlkno;
OffsetNumber offset;
- GinBtreeStack *root = stack->parent;
- GinBtreeStack *ptr;
+ GinBtreeStack *root = stack->parent;
+ GinBtreeStack *ptr;
- if ( !root ) {
+ if (!root)
+ {
/* XLog mode... */
- root = (GinBtreeStack*)palloc(sizeof(GinBtreeStack));
+ root = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
root->blkno = rootBlkno;
root->buffer = ReadBuffer(btree->index, rootBlkno);
LockBuffer(root->buffer, GIN_EXCLUSIVE);
root->parent = NULL;
- } else {
- /* find root, we should not release root page until update is finished!! */
- while( root->parent ) {
- ReleaseBuffer( root->buffer );
+ }
+ else
+ {
+ /*
+ * find root, we should not release root page until update is
+ * finished!!
+ */
+ while (root->parent)
+ {
+ ReleaseBuffer(root->buffer);
root = root->parent;
}
- Assert( root->blkno == rootBlkno );
- Assert( BufferGetBlockNumber(root->buffer) == rootBlkno );
+ Assert(root->blkno == rootBlkno);
+ Assert(BufferGetBlockNumber(root->buffer) == rootBlkno);
LockBuffer(root->buffer, GIN_EXCLUSIVE);
}
root->off = InvalidOffsetNumber;
page = BufferGetPage(root->buffer);
- Assert( !GinPageIsLeaf(page) );
+ Assert(!GinPageIsLeaf(page));
/* check trivial case */
- if ( (root->off = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) != InvalidOffsetNumber ) {
+ if ((root->off = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) != InvalidOffsetNumber)
+ {
stack->parent = root;
return;
}
leftmostBlkno = blkno = btree->getLeftMostPage(btree, page);
- LockBuffer(root->buffer, GIN_UNLOCK );
- Assert( blkno!=InvalidBlockNumber );
+ LockBuffer(root->buffer, GIN_UNLOCK);
+ Assert(blkno != InvalidBlockNumber);
- for(;;) {
+ for (;;)
+ {
buffer = ReadBuffer(btree->index, blkno);
LockBuffer(buffer, GIN_EXCLUSIVE);
page = BufferGetPage(buffer);
- if ( GinPageIsLeaf(page) )
+ if (GinPageIsLeaf(page))
elog(ERROR, "Lost path");
leftmostBlkno = btree->getLeftMostPage(btree, page);
- while( (offset = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber))==InvalidOffsetNumber ) {
+ while ((offset = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) == InvalidOffsetNumber)
+ {
blkno = GinPageGetOpaque(page)->rightlink;
- LockBuffer(buffer,GIN_UNLOCK);
+ LockBuffer(buffer, GIN_UNLOCK);
ReleaseBuffer(buffer);
- if ( blkno == InvalidBlockNumber )
+ if (blkno == InvalidBlockNumber)
break;
buffer = ReadBuffer(btree->index, blkno);
LockBuffer(buffer, GIN_EXCLUSIVE);
page = BufferGetPage(buffer);
}
- if ( blkno != InvalidBlockNumber ) {
- ptr = (GinBtreeStack*)palloc(sizeof(GinBtreeStack));
+ if (blkno != InvalidBlockNumber)
+ {
+ ptr = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
ptr->blkno = blkno;
ptr->buffer = buffer;
- ptr->parent = root; /* it's may be wrong, but in next call we will correct */
+ ptr->parent = root; /* it's may be wrong, but in next call we will
+ * correct */
ptr->off = offset;
stack->parent = ptr;
return;
@@ -233,79 +266,94 @@ findParents( GinBtree btree, GinBtreeStack *stack,
* Insert value (stored in GinBtree) to tree descibed by stack
*/
void
-ginInsertValue(GinBtree btree, GinBtreeStack *stack) {
- GinBtreeStack *parent = stack;
- BlockNumber rootBlkno = InvalidBuffer;
- Page page, rpage, lpage;
+ginInsertValue(GinBtree btree, GinBtreeStack *stack)
+{
+ GinBtreeStack *parent = stack;
+ BlockNumber rootBlkno = InvalidBuffer;
+ Page page,
+ rpage,
+ lpage;
/* remember root BlockNumber */
- while( parent ) {
+ while (parent)
+ {
rootBlkno = parent->blkno;
parent = parent->parent;
}
- while( stack ) {
+ while (stack)
+ {
XLogRecData *rdata;
- BlockNumber savedRightLink;
+ BlockNumber savedRightLink;
- page = BufferGetPage( stack->buffer );
+ page = BufferGetPage(stack->buffer);
savedRightLink = GinPageGetOpaque(page)->rightlink;
- if ( btree->isEnoughSpace( btree, stack->buffer, stack->off ) ) {
+ if (btree->isEnoughSpace(btree, stack->buffer, stack->off))
+ {
START_CRIT_SECTION();
- btree->placeToPage( btree, stack->buffer, stack->off, &rdata );
+ btree->placeToPage(btree, stack->buffer, stack->off, &rdata);
- if (!btree->index->rd_istemp) {
- XLogRecPtr recptr;
+ if (!btree->index->rd_istemp)
+ {
+ XLogRecPtr recptr;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
- }
+ }
- MarkBufferDirty( stack->buffer );
+ MarkBufferDirty(stack->buffer);
UnlockReleaseBuffer(stack->buffer);
END_CRIT_SECTION();
freeGinBtreeStack(stack->parent);
return;
- } else {
- Buffer rbuffer = GinNewBuffer(btree->index);
- Page newlpage;
+ }
+ else
+ {
+ Buffer rbuffer = GinNewBuffer(btree->index);
+ Page newlpage;
- /* newlpage is a pointer to memory page, it does'nt assosiates with buffer,
- stack->buffer shoud be untouched */
- newlpage = btree->splitPage( btree, stack->buffer, rbuffer, stack->off, &rdata );
+ /*
+ * newlpage is a pointer to memory page, it does'nt assosiates
+ * with buffer, stack->buffer shoud be untouched
+ */
+ newlpage = btree->splitPage(btree, stack->buffer, rbuffer, stack->off, &rdata);
- ((ginxlogSplit*)(rdata->data))->rootBlkno = rootBlkno;
+ ((ginxlogSplit *) (rdata->data))->rootBlkno = rootBlkno;
parent = stack->parent;
- if ( parent == NULL ) {
- /* split root, so we need to allocate new left page and
- place pointer on root to left and right page */
- Buffer lbuffer = GinNewBuffer(btree->index);
+ if (parent == NULL)
+ {
+ /*
+ * split root, so we need to allocate new left page and place
+ * pointer on root to left and right page
+ */
+ Buffer lbuffer = GinNewBuffer(btree->index);
- ((ginxlogSplit*)(rdata->data))->isRootSplit = TRUE;
- ((ginxlogSplit*)(rdata->data))->rrlink = InvalidBlockNumber;
+ ((ginxlogSplit *) (rdata->data))->isRootSplit = TRUE;
+ ((ginxlogSplit *) (rdata->data))->rrlink = InvalidBlockNumber;
- page = BufferGetPage( stack->buffer );
- lpage = BufferGetPage( lbuffer );
- rpage = BufferGetPage( rbuffer );
+ page = BufferGetPage(stack->buffer);
+ lpage = BufferGetPage(lbuffer);
+ rpage = BufferGetPage(rbuffer);
GinPageGetOpaque(rpage)->rightlink = InvalidBlockNumber;
GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
- ((ginxlogSplit*)(rdata->data))->lblkno = BufferGetBlockNumber(lbuffer);
+ ((ginxlogSplit *) (rdata->data))->lblkno = BufferGetBlockNumber(lbuffer);
START_CRIT_SECTION();
- GinInitBuffer( stack->buffer, GinPageGetOpaque(newlpage)->flags & ~GIN_LEAF );
- PageRestoreTempPage( newlpage, lpage );
- btree->fillRoot( btree, stack->buffer, lbuffer, rbuffer );
- if (!btree->index->rd_istemp) {
- XLogRecPtr recptr;
+ GinInitBuffer(stack->buffer, GinPageGetOpaque(newlpage)->flags & ~GIN_LEAF);
+ PageRestoreTempPage(newlpage, lpage);
+ btree->fillRoot(btree, stack->buffer, lbuffer, rbuffer);
+ if (!btree->index->rd_istemp)
+ {
+ XLogRecPtr recptr;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_SPLIT, rdata);
PageSetLSN(page, recptr);
@@ -324,23 +372,26 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack) {
UnlockReleaseBuffer(stack->buffer);
END_CRIT_SECTION();
-
+
return;
- } else {
+ }
+ else
+ {
/* split non-root page */
- ((ginxlogSplit*)(rdata->data))->isRootSplit = FALSE;
- ((ginxlogSplit*)(rdata->data))->rrlink = savedRightLink;
+ ((ginxlogSplit *) (rdata->data))->isRootSplit = FALSE;
+ ((ginxlogSplit *) (rdata->data))->rrlink = savedRightLink;
- lpage = BufferGetPage( stack->buffer );
- rpage = BufferGetPage( rbuffer );
+ lpage = BufferGetPage(stack->buffer);
+ rpage = BufferGetPage(rbuffer);
GinPageGetOpaque(rpage)->rightlink = savedRightLink;
GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer);
START_CRIT_SECTION();
- PageRestoreTempPage( newlpage, lpage );
- if (!btree->index->rd_istemp) {
- XLogRecPtr recptr;
+ PageRestoreTempPage(newlpage, lpage);
+ if (!btree->index->rd_istemp)
+ {
+ XLogRecPtr recptr;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_SPLIT, rdata);
PageSetLSN(lpage, recptr);
@@ -350,7 +401,7 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack) {
}
MarkBufferDirty(rbuffer);
UnlockReleaseBuffer(rbuffer);
- MarkBufferDirty( stack->buffer );
+ MarkBufferDirty(stack->buffer);
END_CRIT_SECTION();
}
}
@@ -361,31 +412,33 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack) {
LockBuffer(parent->buffer, GIN_EXCLUSIVE);
/* move right if it's needed */
- page = BufferGetPage( parent->buffer );
- while( (parent->off=btree->findChildPtr(btree, page, stack->blkno, parent->off)) == InvalidOffsetNumber ) {
+ page = BufferGetPage(parent->buffer);
+ while ((parent->off = btree->findChildPtr(btree, page, stack->blkno, parent->off)) == InvalidOffsetNumber)
+ {
BlockNumber rightlink = GinPageGetOpaque(page)->rightlink;
LockBuffer(parent->buffer, GIN_UNLOCK);
- if ( rightlink==InvalidBlockNumber ) {
- /* rightmost page, but we don't find parent, we should
- use plain search... */
+ if (rightlink == InvalidBlockNumber)
+ {
+ /*
+ * rightmost page, but we don't find parent, we should use
+ * plain search...
+ */
findParents(btree, stack, rootBlkno);
- parent=stack->parent;
- page = BufferGetPage( parent->buffer );
+ parent = stack->parent;
+ page = BufferGetPage(parent->buffer);
break;
}
parent->blkno = rightlink;
parent->buffer = ReleaseAndReadBuffer(parent->buffer, btree->index, parent->blkno);
- LockBuffer(parent->buffer, GIN_EXCLUSIVE);
- page = BufferGetPage( parent->buffer );
+ LockBuffer(parent->buffer, GIN_EXCLUSIVE);
+ page = BufferGetPage(parent->buffer);
}
UnlockReleaseBuffer(stack->buffer);
- pfree( stack );
+ pfree(stack);
stack = parent;
}
}
-
-
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 5bcd91af14..3db9e332a7 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginbulk.c
- * routines for fast build of inverted index
+ * routines for fast build of inverted index
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.5 2006/08/29 14:05:44 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.6 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,8 @@
#define DEF_NPTR 4
void
-ginInitBA(BuildAccumulator *accum) {
+ginInitBA(BuildAccumulator *accum)
+{
accum->maxdepth = 1;
accum->stackpos = 0;
accum->entries = NULL;
@@ -31,11 +32,13 @@ ginInitBA(BuildAccumulator *accum) {
accum->entryallocator = NULL;
}
-static EntryAccumulator*
-EAAllocate( BuildAccumulator *accum ) {
- if ( accum->entryallocator == NULL || accum->length>=DEF_NENTRY ) {
- accum->entryallocator = palloc(sizeof(EntryAccumulator)*DEF_NENTRY);
- accum->allocatedMemory += sizeof(EntryAccumulator)*DEF_NENTRY;
+static EntryAccumulator *
+EAAllocate(BuildAccumulator *accum)
+{
+ if (accum->entryallocator == NULL || accum->length >= DEF_NENTRY)
+ {
+ accum->entryallocator = palloc(sizeof(EntryAccumulator) * DEF_NENTRY);
+ accum->allocatedMemory += sizeof(EntryAccumulator) * DEF_NENTRY;
accum->length = 0;
}
@@ -48,24 +51,27 @@ EAAllocate( BuildAccumulator *accum ) {
* item pointer are ordered
*/
static void
-ginInsertData(BuildAccumulator *accum, EntryAccumulator *entry, ItemPointer heapptr) {
- if ( entry->number >= entry->length ) {
+ginInsertData(BuildAccumulator *accum, EntryAccumulator *entry, ItemPointer heapptr)
+{
+ if (entry->number >= entry->length)
+ {
accum->allocatedMemory += sizeof(ItemPointerData) * entry->length;
entry->length *= 2;
- entry->list = (ItemPointerData*)repalloc(entry->list,
- sizeof(ItemPointerData)*entry->length);
+ entry->list = (ItemPointerData *) repalloc(entry->list,
+ sizeof(ItemPointerData) * entry->length);
}
- if ( entry->shouldSort==FALSE ) {
- int res = compareItemPointers( entry->list + entry->number - 1, heapptr );
+ if (entry->shouldSort == FALSE)
+ {
+ int res = compareItemPointers(entry->list + entry->number - 1, heapptr);
- Assert( res != 0 );
+ Assert(res != 0);
- if ( res > 0 )
- entry->shouldSort=TRUE;
+ if (res > 0)
+ entry->shouldSort = TRUE;
}
- entry->list[ entry->number ] = *heapptr;
+ entry->list[entry->number] = *heapptr;
entry->number++;
}
@@ -74,7 +80,8 @@ ginInsertData(BuildAccumulator *accum, EntryAccumulator *entry, ItemPointer heap
* to avoid computing the datum size twice.
*/
static Datum
-getDatumCopy(BuildAccumulator *accum, Datum value) {
+getDatumCopy(BuildAccumulator *accum, Datum value)
+{
Form_pg_attribute *att = accum->ginstate->tupdesc->attrs;
Datum res;
@@ -100,51 +107,58 @@ getDatumCopy(BuildAccumulator *accum, Datum value) {
* Find/store one entry from indexed value.
*/
static void
-ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, Datum entry) {
- EntryAccumulator *ea = accum->entries, *pea = NULL;
- int res = 0;
- uint32 depth = 1;
-
- while( ea ) {
+ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, Datum entry)
+{
+ EntryAccumulator *ea = accum->entries,
+ *pea = NULL;
+ int res = 0;
+ uint32 depth = 1;
+
+ while (ea)
+ {
res = compareEntries(accum->ginstate, entry, ea->value);
- if ( res == 0 )
- break; /* found */
- else {
+ if (res == 0)
+ break; /* found */
+ else
+ {
pea = ea;
- if ( res < 0 )
+ if (res < 0)
ea = ea->left;
else
ea = ea->right;
}
depth++;
}
-
- if ( depth > accum->maxdepth )
+
+ if (depth > accum->maxdepth)
accum->maxdepth = depth;
- if ( ea == NULL ) {
+ if (ea == NULL)
+ {
ea = EAAllocate(accum);
ea->left = ea->right = NULL;
- ea->value = getDatumCopy(accum, entry);
+ ea->value = getDatumCopy(accum, entry);
ea->length = DEF_NPTR;
ea->number = 1;
ea->shouldSort = FALSE;
- ea->list = (ItemPointerData*)palloc(sizeof(ItemPointerData)*DEF_NPTR);
+ ea->list = (ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
ea->list[0] = *heapptr;
- accum->allocatedMemory += sizeof(ItemPointerData)*DEF_NPTR;
+ accum->allocatedMemory += sizeof(ItemPointerData) * DEF_NPTR;
- if ( pea == NULL )
+ if (pea == NULL)
accum->entries = ea;
- else {
- Assert( res != 0 );
- if ( res < 0 )
+ else
+ {
+ Assert(res != 0);
+ if (res < 0)
pea->left = ea;
else
pea->right = ea;
}
- } else
- ginInsertData( accum, ea, heapptr );
+ }
+ else
+ ginInsertData(accum, ea, heapptr);
}
/*
@@ -152,22 +166,23 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, Datum entry) {
* then calls itself for each parts
*/
static void
-ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, Datum *entries, uint32 nentry,
- uint32 low, uint32 high, uint32 offset) {
- uint32 pos;
- uint32 middle = (low+high)>>1;
-
- pos = (low+middle)>>1;
- if ( low!=middle && pos>=offset && pos-offset < nentry )
- ginInsertEntry( accum, heapptr, entries[ pos-offset ]);
- pos = (high+middle+1)>>1;
- if ( middle+1 != high && pos>=offset && pos-offset < nentry )
- ginInsertEntry( accum, heapptr, entries[ pos-offset ]);
-
- if ( low!=middle )
- ginChooseElem(accum, heapptr, entries, nentry, low, middle, offset );
- if ( high!=middle+1 )
- ginChooseElem(accum, heapptr, entries, nentry, middle+1, high, offset );
+ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, Datum *entries, uint32 nentry,
+ uint32 low, uint32 high, uint32 offset)
+{
+ uint32 pos;
+ uint32 middle = (low + high) >> 1;
+
+ pos = (low + middle) >> 1;
+ if (low != middle && pos >= offset && pos - offset < nentry)
+ ginInsertEntry(accum, heapptr, entries[pos - offset]);
+ pos = (high + middle + 1) >> 1;
+ if (middle + 1 != high && pos >= offset && pos - offset < nentry)
+ ginInsertEntry(accum, heapptr, entries[pos - offset]);
+
+ if (low != middle)
+ ginChooseElem(accum, heapptr, entries, nentry, low, middle, offset);
+ if (high != middle + 1)
+ ginChooseElem(accum, heapptr, entries, nentry, middle + 1, high, offset);
}
/*
@@ -176,56 +191,71 @@ ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, Datum *entries, uint
* next middle on left part and middle of right part.
*/
void
-ginInsertRecordBA( BuildAccumulator *accum, ItemPointer heapptr, Datum *entries, uint32 nentry ) {
- uint32 i, nbit=0, offset;
+ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, Datum *entries, uint32 nentry)
+{
+ uint32 i,
+ nbit = 0,
+ offset;
- if (nentry==0)
+ if (nentry == 0)
return;
- i=nentry-1;
- for(;i>0;i>>=1) nbit++;
+ i = nentry - 1;
+ for (; i > 0; i >>= 1)
+ nbit++;
- nbit = 1<<nbit;
- offset = (nbit-nentry)/2;
+ nbit = 1 << nbit;
+ offset = (nbit - nentry) / 2;
- ginInsertEntry( accum, heapptr, entries[ (nbit>>1)-offset ]);
+ ginInsertEntry(accum, heapptr, entries[(nbit >> 1) - offset]);
ginChooseElem(accum, heapptr, entries, nentry, 0, nbit, offset);
}
-static int
-qsortCompareItemPointers( const void *a, const void *b ) {
- int res = compareItemPointers( (ItemPointer)a, (ItemPointer)b );
- Assert( res!=0 );
+static int
+qsortCompareItemPointers(const void *a, const void *b)
+{
+ int res = compareItemPointers((ItemPointer) a, (ItemPointer) b);
+
+ Assert(res != 0);
return res;
}
/*
- * walk on binary tree and returns ordered nodes
- */
-static EntryAccumulator*
-walkTree( BuildAccumulator *accum ) {
- EntryAccumulator *entry = accum->stack[ accum->stackpos ];
+ * walk on binary tree and returns ordered nodes
+ */
+static EntryAccumulator *
+walkTree(BuildAccumulator *accum)
+{
+ EntryAccumulator *entry = accum->stack[accum->stackpos];
- if ( entry->list != NULL ) {
+ if (entry->list != NULL)
+ {
/* return entry itself: we already was at left sublink */
return entry;
- } else if ( entry->right && entry->right != accum->stack[ accum->stackpos+1 ] ) {
+ }
+ else if (entry->right && entry->right != accum->stack[accum->stackpos + 1])
+ {
/* go on right sublink */
accum->stackpos++;
entry = entry->right;
/* find most-left value */
- for(;;) {
- accum->stack[ accum->stackpos ] = entry;
- if ( entry->left ) {
+ for (;;)
+ {
+ accum->stack[accum->stackpos] = entry;
+ if (entry->left)
+ {
accum->stackpos++;
entry = entry->left;
- } else
+ }
+ else
break;
}
- } else {
+ }
+ else
+ {
/* we already return all left subtree, itself and right subtree */
- if ( accum->stackpos == 0 )
+ if (accum->stackpos == 0)
return 0;
accum->stackpos--;
return walkTree(accum);
@@ -234,47 +264,53 @@ walkTree( BuildAccumulator *accum ) {
return entry;
}
-ItemPointerData*
-ginGetEntry(BuildAccumulator *accum, Datum *value, uint32 *n) {
- EntryAccumulator *entry;
+ItemPointerData *
+ginGetEntry(BuildAccumulator *accum, Datum *value, uint32 *n)
+{
+ EntryAccumulator *entry;
ItemPointerData *list;
- if ( accum->stack == NULL ) {
+ if (accum->stack == NULL)
+ {
/* first call */
- accum->stack = palloc0(sizeof(EntryAccumulator*)*(accum->maxdepth+1));
+ accum->stack = palloc0(sizeof(EntryAccumulator *) * (accum->maxdepth + 1));
entry = accum->entries;
- if ( entry == NULL )
+ if (entry == NULL)
return NULL;
/* find most-left value */
- for(;;) {
- accum->stack[ accum->stackpos ] = entry;
- if ( entry->left ) {
+ for (;;)
+ {
+ accum->stack[accum->stackpos] = entry;
+ if (entry->left)
+ {
accum->stackpos++;
entry = entry->left;
- } else
+ }
+ else
break;
}
- } else {
- pfree( accum->stack[ accum->stackpos ]->list );
- accum->stack[ accum->stackpos ]->list = NULL;
- entry = walkTree( accum );
+ }
+ else
+ {
+ pfree(accum->stack[accum->stackpos]->list);
+ accum->stack[accum->stackpos]->list = NULL;
+ entry = walkTree(accum);
}
- if ( entry == NULL )
+ if (entry == NULL)
return NULL;
- *n = entry->number;
- *value = entry->value;
- list = entry->list;
+ *n = entry->number;
+ *value = entry->value;
+ list = entry->list;
Assert(list != NULL);
- if ( entry->shouldSort && entry->number > 1 )
+ if (entry->shouldSort && entry->number > 1)
qsort(list, *n, sizeof(ItemPointerData), qsortCompareItemPointers);
return list;
}
-
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 5789dc18f9..94b07f3ed9 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gindatapage.c
- * page utilities routines for the postgres inverted index access method.
+ * page utilities routines for the postgres inverted index access method.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.3 2006/07/16 00:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.4 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -16,50 +16,56 @@
#include "access/gin.h"
int
-compareItemPointers( ItemPointer a, ItemPointer b ) {
- if ( GinItemPointerGetBlockNumber(a) == GinItemPointerGetBlockNumber(b) ) {
- if ( GinItemPointerGetOffsetNumber(a) == GinItemPointerGetOffsetNumber(b) )
+compareItemPointers(ItemPointer a, ItemPointer b)
+{
+ if (GinItemPointerGetBlockNumber(a) == GinItemPointerGetBlockNumber(b))
+ {
+ if (GinItemPointerGetOffsetNumber(a) == GinItemPointerGetOffsetNumber(b))
return 0;
- return ( GinItemPointerGetOffsetNumber(a) > GinItemPointerGetOffsetNumber(b) ) ? 1 : -1;
- }
+ return (GinItemPointerGetOffsetNumber(a) > GinItemPointerGetOffsetNumber(b)) ? 1 : -1;
+ }
- return ( GinItemPointerGetBlockNumber(a) > GinItemPointerGetBlockNumber(b) ) ? 1 : -1;
+ return (GinItemPointerGetBlockNumber(a) > GinItemPointerGetBlockNumber(b)) ? 1 : -1;
}
/*
* Merge two ordered array of itempointer
*/
-void
-MergeItemPointers(ItemPointerData *dst, ItemPointerData *a, uint32 na, ItemPointerData *b, uint32 nb) {
+void
+MergeItemPointers(ItemPointerData *dst, ItemPointerData *a, uint32 na, ItemPointerData *b, uint32 nb)
+{
ItemPointerData *dptr = dst;
- ItemPointerData *aptr = a, *bptr = b;
+ ItemPointerData *aptr = a,
+ *bptr = b;
- while( aptr - a < na && bptr - b < nb ) {
- if ( compareItemPointers(aptr, bptr) > 0 )
+ while (aptr - a < na && bptr - b < nb)
+ {
+ if (compareItemPointers(aptr, bptr) > 0)
*dptr++ = *bptr++;
else
*dptr++ = *aptr++;
}
- while( aptr - a < na )
+ while (aptr - a < na)
*dptr++ = *aptr++;
- while( bptr - b < nb )
+ while (bptr - b < nb)
*dptr++ = *bptr++;
}
/*
- * Checks, should we move to right link...
+ * Checks, should we move to right link...
* Compares inserting itemp pointer with right bound of current page
*/
static bool
-dataIsMoveRight(GinBtree btree, Page page) {
- ItemPointer iptr = GinDataPageGetRightBound(page);
+dataIsMoveRight(GinBtree btree, Page page)
+{
+ ItemPointer iptr = GinDataPageGetRightBound(page);
- if ( GinPageRightMost(page) )
- return FALSE;
+ if (GinPageRightMost(page))
+ return FALSE;
- return ( compareItemPointers( btree->items + btree->curitem, iptr ) > 0 ) ? TRUE : FALSE;
+ return (compareItemPointers(btree->items + btree->curitem, iptr) > 0) ? TRUE : FALSE;
}
/*
@@ -67,94 +73,113 @@ dataIsMoveRight(GinBtree btree, Page page) {
* page correctly choosen and searching value SHOULD be on page
*/
static BlockNumber
-dataLocateItem(GinBtree btree, GinBtreeStack *stack) {
- OffsetNumber low, high, maxoff;
- PostingItem *pitem=NULL;
- int result;
- Page page = BufferGetPage( stack->buffer );
-
- Assert( !GinPageIsLeaf(page) );
- Assert( GinPageIsData(page) );
-
- if ( btree->fullScan ) {
+dataLocateItem(GinBtree btree, GinBtreeStack *stack)
+{
+ OffsetNumber low,
+ high,
+ maxoff;
+ PostingItem *pitem = NULL;
+ int result;
+ Page page = BufferGetPage(stack->buffer);
+
+ Assert(!GinPageIsLeaf(page));
+ Assert(GinPageIsData(page));
+
+ if (btree->fullScan)
+ {
stack->off = FirstOffsetNumber;
stack->predictNumber *= GinPageGetOpaque(page)->maxoff;
return btree->getLeftMostPage(btree, page);
}
low = FirstOffsetNumber;
- maxoff = high = GinPageGetOpaque(page)->maxoff;
- Assert( high >= low );
+ maxoff = high = GinPageGetOpaque(page)->maxoff;
+ Assert(high >= low);
high++;
- while (high > low) {
+ while (high > low)
+ {
OffsetNumber mid = low + ((high - low) / 2);
- pitem = (PostingItem*)GinDataPageGetItem(page,mid);
- if ( mid == maxoff )
- /* Right infinity, page already correctly choosen
- with a help of dataIsMoveRight */
+ pitem = (PostingItem *) GinDataPageGetItem(page, mid);
+
+ if (mid == maxoff)
+
+ /*
+ * Right infinity, page already correctly choosen with a help of
+ * dataIsMoveRight
+ */
result = -1;
- else {
- pitem = (PostingItem*)GinDataPageGetItem(page,mid);
- result = compareItemPointers( btree->items + btree->curitem, &( pitem->key ) );
+ else
+ {
+ pitem = (PostingItem *) GinDataPageGetItem(page, mid);
+ result = compareItemPointers(btree->items + btree->curitem, &(pitem->key));
}
- if ( result == 0 ) {
+ if (result == 0)
+ {
stack->off = mid;
return PostingItemGetBlockNumber(pitem);
- } else if ( result > 0 )
+ }
+ else if (result > 0)
low = mid + 1;
else
high = mid;
}
- Assert( high>=FirstOffsetNumber && high <= maxoff );
+ Assert(high >= FirstOffsetNumber && high <= maxoff);
stack->off = high;
- pitem = (PostingItem*)GinDataPageGetItem(page,high);
+ pitem = (PostingItem *) GinDataPageGetItem(page, high);
return PostingItemGetBlockNumber(pitem);
}
-/*
+/*
* Searches correct position for value on leaf page.
- * Page should be corrrectly choosen.
+ * Page should be corrrectly choosen.
* Returns true if value found on page.
*/
static bool
-dataLocateLeafItem(GinBtree btree, GinBtreeStack *stack) {
- Page page = BufferGetPage( stack->buffer );
- OffsetNumber low, high;
- int result;
-
- Assert( GinPageIsLeaf(page) );
- Assert( GinPageIsData(page) );
-
- if ( btree->fullScan ) {
+dataLocateLeafItem(GinBtree btree, GinBtreeStack *stack)
+{
+ Page page = BufferGetPage(stack->buffer);
+ OffsetNumber low,
+ high;
+ int result;
+
+ Assert(GinPageIsLeaf(page));
+ Assert(GinPageIsData(page));
+
+ if (btree->fullScan)
+ {
stack->off = FirstOffsetNumber;
return TRUE;
}
- low=FirstOffsetNumber;
+ low = FirstOffsetNumber;
high = GinPageGetOpaque(page)->maxoff;
- if ( high < low ) {
+ if (high < low)
+ {
stack->off = FirstOffsetNumber;
return false;
}
high++;
- while (high > low) {
+ while (high > low)
+ {
OffsetNumber mid = low + ((high - low) / 2);
- result = compareItemPointers( btree->items + btree->curitem, (ItemPointer)GinDataPageGetItem(page,mid) );
+ result = compareItemPointers(btree->items + btree->curitem, (ItemPointer) GinDataPageGetItem(page, mid));
- if ( result == 0 ) {
+ if (result == 0)
+ {
stack->off = mid;
return true;
- } else if ( result > 0 )
+ }
+ else if (result > 0)
low = mid + 1;
else
high = mid;
@@ -169,34 +194,41 @@ dataLocateLeafItem(GinBtree btree, GinBtreeStack *stack) {
* offset of PostingItem
*/
static OffsetNumber
-dataFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff) {
- OffsetNumber i, maxoff = GinPageGetOpaque(page)->maxoff;
+dataFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff)
+{
+ OffsetNumber i,
+ maxoff = GinPageGetOpaque(page)->maxoff;
PostingItem *pitem;
- Assert( !GinPageIsLeaf(page) );
- Assert( GinPageIsData(page) );
+ Assert(!GinPageIsLeaf(page));
+ Assert(GinPageIsData(page));
/* if page isn't changed, we returns storedOff */
- if ( storedOff>= FirstOffsetNumber && storedOff<=maxoff) {
- pitem = (PostingItem*)GinDataPageGetItem(page, storedOff);
- if ( PostingItemGetBlockNumber(pitem) == blkno )
+ if (storedOff >= FirstOffsetNumber && storedOff <= maxoff)
+ {
+ pitem = (PostingItem *) GinDataPageGetItem(page, storedOff);
+ if (PostingItemGetBlockNumber(pitem) == blkno)
return storedOff;
- /* we hope, that needed pointer goes to right. It's true
- if there wasn't a deletion */
- for( i=storedOff+1 ; i <= maxoff ; i++ ) {
- pitem = (PostingItem*)GinDataPageGetItem(page, i);
- if ( PostingItemGetBlockNumber(pitem) == blkno )
+ /*
+ * we hope, that needed pointer goes to right. It's true if there
+ * wasn't a deletion
+ */
+ for (i = storedOff + 1; i <= maxoff; i++)
+ {
+ pitem = (PostingItem *) GinDataPageGetItem(page, i);
+ if (PostingItemGetBlockNumber(pitem) == blkno)
return i;
}
- maxoff = storedOff-1;
+ maxoff = storedOff - 1;
}
/* last chance */
- for( i=FirstOffsetNumber; i <= maxoff ; i++ ) {
- pitem = (PostingItem*)GinDataPageGetItem(page, i);
- if ( PostingItemGetBlockNumber(pitem) == blkno )
+ for (i = FirstOffsetNumber; i <= maxoff; i++)
+ {
+ pitem = (PostingItem *) GinDataPageGetItem(page, i);
+ if (PostingItemGetBlockNumber(pitem) == blkno)
return i;
}
@@ -207,14 +239,15 @@ dataFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber stor
* retunrs blkno of lefmost child
*/
static BlockNumber
-dataGetLeftMostPage(GinBtree btree, Page page) {
+dataGetLeftMostPage(GinBtree btree, Page page)
+{
PostingItem *pitem;
- Assert( !GinPageIsLeaf(page) );
- Assert( GinPageIsData(page) );
- Assert( GinPageGetOpaque(page)->maxoff >= FirstOffsetNumber );
+ Assert(!GinPageIsLeaf(page));
+ Assert(GinPageIsData(page));
+ Assert(GinPageGetOpaque(page)->maxoff >= FirstOffsetNumber);
- pitem = (PostingItem*)GinDataPageGetItem(page, FirstOffsetNumber);
+ pitem = (PostingItem *) GinDataPageGetItem(page, FirstOffsetNumber);
return PostingItemGetBlockNumber(pitem);
}
@@ -223,18 +256,22 @@ dataGetLeftMostPage(GinBtree btree, Page page) {
* correct value! depending on leaf or non-leaf page
*/
void
-GinDataPageAddItem( Page page, void *data, OffsetNumber offset ) {
+GinDataPageAddItem(Page page, void *data, OffsetNumber offset)
+{
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
- char *ptr;
-
- if ( offset == InvalidOffsetNumber ) {
- ptr = GinDataPageGetItem(page,maxoff+1);
- } else {
- ptr = GinDataPageGetItem(page,offset);
- if ( maxoff+1-offset != 0 )
- memmove( ptr+GinSizeOfItem(page), ptr, (maxoff-offset+1) * GinSizeOfItem(page) );
+ char *ptr;
+
+ if (offset == InvalidOffsetNumber)
+ {
+ ptr = GinDataPageGetItem(page, maxoff + 1);
}
- memcpy( ptr, data, GinSizeOfItem(page) );
+ else
+ {
+ ptr = GinDataPageGetItem(page, offset);
+ if (maxoff + 1 - offset != 0)
+ memmove(ptr + GinSizeOfItem(page), ptr, (maxoff - offset + 1) * GinSizeOfItem(page));
+ }
+ memcpy(ptr, data, GinSizeOfItem(page));
GinPageGetOpaque(page)->maxoff++;
}
@@ -243,15 +280,16 @@ GinDataPageAddItem( Page page, void *data, OffsetNumber offset ) {
* Deletes posting item from non-leaf page
*/
void
-PageDeletePostingItem(Page page, OffsetNumber offset) {
- OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
+PageDeletePostingItem(Page page, OffsetNumber offset)
+{
+ OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
- Assert( !GinPageIsLeaf(page) );
- Assert( offset>=FirstOffsetNumber && offset <= maxoff );
+ Assert(!GinPageIsLeaf(page));
+ Assert(offset >= FirstOffsetNumber && offset <= maxoff);
- if ( offset != maxoff )
- memmove( GinDataPageGetItem(page,offset), GinDataPageGetItem(page,offset+1),
- sizeof(PostingItem) * (maxoff-offset) );
+ if (offset != maxoff)
+ memmove(GinDataPageGetItem(page, offset), GinDataPageGetItem(page, offset + 1),
+ sizeof(PostingItem) * (maxoff - offset));
GinPageGetOpaque(page)->maxoff--;
}
@@ -261,19 +299,24 @@ PageDeletePostingItem(Page page, OffsetNumber offset) {
* item pointer never deletes!
*/
static bool
-dataIsEnoughSpace( GinBtree btree, Buffer buf, OffsetNumber off ) {
- Page page = BufferGetPage(buf);
-
- Assert( GinPageIsData(page) );
- Assert( !btree->isDelete );
-
- if ( GinPageIsLeaf(page) ) {
- if ( GinPageRightMost(page) && off > GinPageGetOpaque(page)->maxoff ) {
- if ( (btree->nitem - btree->curitem) * sizeof(ItemPointerData) <= GinDataPageGetFreeSpace(page) )
+dataIsEnoughSpace(GinBtree btree, Buffer buf, OffsetNumber off)
+{
+ Page page = BufferGetPage(buf);
+
+ Assert(GinPageIsData(page));
+ Assert(!btree->isDelete);
+
+ if (GinPageIsLeaf(page))
+ {
+ if (GinPageRightMost(page) && off > GinPageGetOpaque(page)->maxoff)
+ {
+ if ((btree->nitem - btree->curitem) * sizeof(ItemPointerData) <= GinDataPageGetFreeSpace(page))
return true;
- } else if ( sizeof(ItemPointerData) <= GinDataPageGetFreeSpace(page) )
+ }
+ else if (sizeof(ItemPointerData) <= GinDataPageGetFreeSpace(page))
return true;
- } else if ( sizeof(PostingItem) <= GinDataPageGetFreeSpace(page) )
+ }
+ else if (sizeof(PostingItem) <= GinDataPageGetFreeSpace(page))
return true;
return false;
@@ -285,14 +328,17 @@ dataIsEnoughSpace( GinBtree btree, Buffer buf, OffsetNumber off ) {
* item pointer never deletes!
*/
static BlockNumber
-dataPrepareData( GinBtree btree, Page page, OffsetNumber off) {
+dataPrepareData(GinBtree btree, Page page, OffsetNumber off)
+{
BlockNumber ret = InvalidBlockNumber;
- Assert( GinPageIsData(page) );
+ Assert(GinPageIsData(page));
- if ( !GinPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber ) {
- PostingItem *pitem = (PostingItem*)GinDataPageGetItem(page,off);
- PostingItemSetBlockNumber( pitem, btree->rightblkno );
+ if (!GinPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber)
+ {
+ PostingItem *pitem = (PostingItem *) GinDataPageGetItem(page, off);
+
+ PostingItemSetBlockNumber(pitem, btree->rightblkno);
ret = btree->rightblkno;
}
@@ -301,24 +347,25 @@ dataPrepareData( GinBtree btree, Page page, OffsetNumber off) {
return ret;
}
-/*
+/*
* Places keys to page and fills WAL record. In case leaf page and
* build mode puts all ItemPointers to page.
*/
static void
-dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prdata) {
- Page page = BufferGetPage(buf);
+dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prdata)
+{
+ Page page = BufferGetPage(buf);
static XLogRecData rdata[3];
- int sizeofitem = GinSizeOfItem(page);
- static ginxlogInsert data;
+ int sizeofitem = GinSizeOfItem(page);
+ static ginxlogInsert data;
*prdata = rdata;
- Assert( GinPageIsData(page) );
+ Assert(GinPageIsData(page));
- data.updateBlkno = dataPrepareData( btree, page, off );
+ data.updateBlkno = dataPrepareData(btree, page, off);
data.node = btree->index->rd_node;
- data.blkno = BufferGetBlockNumber( buf );
+ data.blkno = BufferGetBlockNumber(buf);
data.offset = off;
data.nitem = 1;
data.isDelete = FALSE;
@@ -337,109 +384,124 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
rdata[1].next = &rdata[2];
rdata[2].buffer = InvalidBuffer;
- rdata[2].data = (GinPageIsLeaf(page)) ? ((char*)(btree->items+btree->curitem)) : ((char*)&(btree->pitem));
+ rdata[2].data = (GinPageIsLeaf(page)) ? ((char *) (btree->items + btree->curitem)) : ((char *) &(btree->pitem));
rdata[2].len = sizeofitem;
rdata[2].next = NULL;
- if ( GinPageIsLeaf(page) ) {
- if ( GinPageRightMost(page) && off > GinPageGetOpaque(page)->maxoff ) {
+ if (GinPageIsLeaf(page))
+ {
+ if (GinPageRightMost(page) && off > GinPageGetOpaque(page)->maxoff)
+ {
/* usually, create index... */
- uint32 savedPos = btree->curitem;
+ uint32 savedPos = btree->curitem;
- while( btree->curitem < btree->nitem ) {
- GinDataPageAddItem(page, btree->items+btree->curitem, off);
+ while (btree->curitem < btree->nitem)
+ {
+ GinDataPageAddItem(page, btree->items + btree->curitem, off);
off++;
btree->curitem++;
}
- data.nitem = btree->curitem-savedPos;
+ data.nitem = btree->curitem - savedPos;
rdata[2].len = sizeofitem * data.nitem;
- } else {
- GinDataPageAddItem(page, btree->items+btree->curitem, off);
+ }
+ else
+ {
+ GinDataPageAddItem(page, btree->items + btree->curitem, off);
btree->curitem++;
}
- } else
- GinDataPageAddItem(page, &(btree->pitem), off);
+ }
+ else
+ GinDataPageAddItem(page, &(btree->pitem), off);
}
/*
* split page and fills WAL record. original buffer(lbuf) leaves untouched,
- * returns shadow page of lbuf filled new data. In leaf page and build mode puts all
+ * returns shadow page of lbuf filled new data. In leaf page and build mode puts all
* ItemPointers to pages. Also, in build mode splits data by way to full fulled
* left page
*/
static Page
-dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRecData **prdata) {
+dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRecData **prdata)
+{
static ginxlogSplit data;
static XLogRecData rdata[4];
- static char vector[2*BLCKSZ];
- char *ptr;
+ static char vector[2 * BLCKSZ];
+ char *ptr;
OffsetNumber separator;
- ItemPointer bound;
- Page lpage = GinPageGetCopyPage( BufferGetPage( lbuf ) );
- ItemPointerData oldbound = *GinDataPageGetRightBound(lpage);
- int sizeofitem = GinSizeOfItem(lpage);
+ ItemPointer bound;
+ Page lpage = GinPageGetCopyPage(BufferGetPage(lbuf));
+ ItemPointerData oldbound = *GinDataPageGetRightBound(lpage);
+ int sizeofitem = GinSizeOfItem(lpage);
OffsetNumber maxoff = GinPageGetOpaque(lpage)->maxoff;
- Page rpage = BufferGetPage( rbuf );
- Size pageSize = PageGetPageSize( lpage );
- Size freeSpace;
- uint32 nCopied = 1;
+ Page rpage = BufferGetPage(rbuf);
+ Size pageSize = PageGetPageSize(lpage);
+ Size freeSpace;
+ uint32 nCopied = 1;
- GinInitPage( rpage, GinPageGetOpaque(lpage)->flags, pageSize );
+ GinInitPage(rpage, GinPageGetOpaque(lpage)->flags, pageSize);
freeSpace = GinDataPageGetFreeSpace(rpage);
*prdata = rdata;
- data.leftChildBlkno = ( GinPageIsLeaf(lpage) ) ?
- InvalidOffsetNumber : PostingItemGetBlockNumber( &(btree->pitem) );
- data.updateBlkno = dataPrepareData( btree, lpage, off );
+ data.leftChildBlkno = (GinPageIsLeaf(lpage)) ?
+ InvalidOffsetNumber : PostingItemGetBlockNumber(&(btree->pitem));
+ data.updateBlkno = dataPrepareData(btree, lpage, off);
- memcpy(vector, GinDataPageGetItem(lpage, FirstOffsetNumber),
- maxoff*sizeofitem);
+ memcpy(vector, GinDataPageGetItem(lpage, FirstOffsetNumber),
+ maxoff * sizeofitem);
- if ( GinPageIsLeaf(lpage) && GinPageRightMost(lpage) && off > GinPageGetOpaque(lpage)->maxoff ) {
+ if (GinPageIsLeaf(lpage) && GinPageRightMost(lpage) && off > GinPageGetOpaque(lpage)->maxoff)
+ {
nCopied = 0;
- while( btree->curitem < btree->nitem && maxoff*sizeof(ItemPointerData) < 2*(freeSpace - sizeof(ItemPointerData)) ) {
- memcpy( vector + maxoff*sizeof(ItemPointerData), btree->items+btree->curitem,
- sizeof(ItemPointerData) );
+ while (btree->curitem < btree->nitem && maxoff * sizeof(ItemPointerData) < 2 * (freeSpace - sizeof(ItemPointerData)))
+ {
+ memcpy(vector + maxoff * sizeof(ItemPointerData), btree->items + btree->curitem,
+ sizeof(ItemPointerData));
maxoff++;
nCopied++;
btree->curitem++;
}
- } else {
- ptr = vector + (off-1)*sizeofitem;
- if ( maxoff+1-off != 0 )
- memmove( ptr+sizeofitem, ptr, (maxoff-off+1) * sizeofitem );
- if ( GinPageIsLeaf(lpage) ) {
- memcpy(ptr, btree->items+btree->curitem, sizeofitem );
+ }
+ else
+ {
+ ptr = vector + (off - 1) * sizeofitem;
+ if (maxoff + 1 - off != 0)
+ memmove(ptr + sizeofitem, ptr, (maxoff - off + 1) * sizeofitem);
+ if (GinPageIsLeaf(lpage))
+ {
+ memcpy(ptr, btree->items + btree->curitem, sizeofitem);
btree->curitem++;
- } else
- memcpy(ptr, &(btree->pitem), sizeofitem );
-
+ }
+ else
+ memcpy(ptr, &(btree->pitem), sizeofitem);
+
maxoff++;
}
- /* we suppose that during index creation table scaned from
- begin to end, so ItemPointers are monotonically increased.. */
- if ( btree->isBuild && GinPageRightMost(lpage) )
- separator=freeSpace/sizeofitem;
+ /*
+ * we suppose that during index creation table scaned from begin to end,
+ * so ItemPointers are monotonically increased..
+ */
+ if (btree->isBuild && GinPageRightMost(lpage))
+ separator = freeSpace / sizeofitem;
else
- separator=maxoff/2;
+ separator = maxoff / 2;
- GinInitPage( rpage, GinPageGetOpaque(lpage)->flags, pageSize );
- GinInitPage( lpage, GinPageGetOpaque(rpage)->flags, pageSize );
+ GinInitPage(rpage, GinPageGetOpaque(lpage)->flags, pageSize);
+ GinInitPage(lpage, GinPageGetOpaque(rpage)->flags, pageSize);
- memcpy( GinDataPageGetItem(lpage, FirstOffsetNumber), vector, separator * sizeofitem );
+ memcpy(GinDataPageGetItem(lpage, FirstOffsetNumber), vector, separator * sizeofitem);
GinPageGetOpaque(lpage)->maxoff = separator;
- memcpy( GinDataPageGetItem(rpage, FirstOffsetNumber),
- vector + separator * sizeofitem, (maxoff-separator) * sizeofitem );
- GinPageGetOpaque(rpage)->maxoff = maxoff-separator;
-
- PostingItemSetBlockNumber( &(btree->pitem), BufferGetBlockNumber(lbuf) );
- if ( GinPageIsLeaf(lpage) )
- btree->pitem.key = *(ItemPointerData*)GinDataPageGetItem(lpage,
- GinPageGetOpaque(lpage)->maxoff);
- else
- btree->pitem.key = ((PostingItem*)GinDataPageGetItem(lpage,
- GinPageGetOpaque(lpage)->maxoff))->key;
+ memcpy(GinDataPageGetItem(rpage, FirstOffsetNumber),
+ vector + separator * sizeofitem, (maxoff - separator) * sizeofitem);
+ GinPageGetOpaque(rpage)->maxoff = maxoff - separator;
+
+ PostingItemSetBlockNumber(&(btree->pitem), BufferGetBlockNumber(lbuf));
+ if (GinPageIsLeaf(lpage))
+ btree->pitem.key = *(ItemPointerData *) GinDataPageGetItem(lpage,
+ GinPageGetOpaque(lpage)->maxoff);
+ else
+ btree->pitem.key = ((PostingItem *) GinDataPageGetItem(lpage,
+ GinPageGetOpaque(lpage)->maxoff))->key;
btree->rightblkno = BufferGetBlockNumber(rbuf);
/* set up right bound for left page */
@@ -452,8 +514,8 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe
data.node = btree->index->rd_node;
data.rootBlkno = InvalidBlockNumber;
- data.lblkno = BufferGetBlockNumber( lbuf );
- data.rblkno = BufferGetBlockNumber( rbuf );
+ data.lblkno = BufferGetBlockNumber(lbuf);
+ data.rblkno = BufferGetBlockNumber(rbuf);
data.separator = separator;
data.nitem = maxoff;
data.isData = TRUE;
@@ -468,34 +530,37 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe
rdata[1].buffer = InvalidBuffer;
rdata[1].data = vector;
- rdata[1].len = MAXALIGN( maxoff * sizeofitem );
+ rdata[1].len = MAXALIGN(maxoff * sizeofitem);
rdata[1].next = NULL;
return lpage;
}
/*
- * Fills new root by right bound values from child.
+ * Fills new root by right bound values from child.
* Also called from ginxlog, should not use btree
*/
void
-dataFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf) {
- Page page = BufferGetPage(root),
- lpage = BufferGetPage(lbuf),
- rpage = BufferGetPage(rbuf);
- PostingItem li, ri;
+dataFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf)
+{
+ Page page = BufferGetPage(root),
+ lpage = BufferGetPage(lbuf),
+ rpage = BufferGetPage(rbuf);
+ PostingItem li,
+ ri;
li.key = *GinDataPageGetRightBound(lpage);
- PostingItemSetBlockNumber( &li, BufferGetBlockNumber(lbuf) );
- GinDataPageAddItem(page, &li, InvalidOffsetNumber );
+ PostingItemSetBlockNumber(&li, BufferGetBlockNumber(lbuf));
+ GinDataPageAddItem(page, &li, InvalidOffsetNumber);
ri.key = *GinDataPageGetRightBound(rpage);
- PostingItemSetBlockNumber( &ri, BufferGetBlockNumber(rbuf) );
- GinDataPageAddItem(page, &ri, InvalidOffsetNumber );
+ PostingItemSetBlockNumber(&ri, BufferGetBlockNumber(rbuf));
+ GinDataPageAddItem(page, &ri, InvalidOffsetNumber);
}
void
-prepareDataScan( GinBtree btree, Relation index) {
+prepareDataScan(GinBtree btree, Relation index)
+{
memset(btree, 0, sizeof(GinBtreeData));
btree->index = index;
btree->isMoveRight = dataIsMoveRight;
@@ -509,21 +574,22 @@ prepareDataScan( GinBtree btree, Relation index) {
btree->fillRoot = dataFillRoot;
btree->searchMode = FALSE;
- btree->isDelete = FALSE;
+ btree->isDelete = FALSE;
btree->fullScan = FALSE;
- btree->isBuild= FALSE;
+ btree->isBuild = FALSE;
}
-GinPostingTreeScan*
-prepareScanPostingTree( Relation index, BlockNumber rootBlkno, bool searchMode) {
- GinPostingTreeScan *gdi = (GinPostingTreeScan*)palloc0( sizeof(GinPostingTreeScan) );
+GinPostingTreeScan *
+prepareScanPostingTree(Relation index, BlockNumber rootBlkno, bool searchMode)
+{
+ GinPostingTreeScan *gdi = (GinPostingTreeScan *) palloc0(sizeof(GinPostingTreeScan));
+
+ prepareDataScan(&gdi->btree, index);
- prepareDataScan( &gdi->btree, index );
-
gdi->btree.searchMode = searchMode;
gdi->btree.fullScan = searchMode;
- gdi->stack = ginPrepareFindLeafPage( &gdi->btree, rootBlkno );
+ gdi->stack = ginPrepareFindLeafPage(&gdi->btree, rootBlkno);
return gdi;
}
@@ -532,33 +598,35 @@ prepareScanPostingTree( Relation index, BlockNumber rootBlkno, bool searchMode)
* Inserts array of item pointers, may execute several tree scan (very rare)
*/
void
-insertItemPointer(GinPostingTreeScan *gdi, ItemPointerData *items, uint32 nitem) {
+insertItemPointer(GinPostingTreeScan *gdi, ItemPointerData *items, uint32 nitem)
+{
BlockNumber rootBlkno = gdi->stack->blkno;
gdi->btree.items = items;
gdi->btree.nitem = nitem;
gdi->btree.curitem = 0;
- while( gdi->btree.curitem < gdi->btree.nitem ) {
+ while (gdi->btree.curitem < gdi->btree.nitem)
+ {
if (!gdi->stack)
- gdi->stack = ginPrepareFindLeafPage( &gdi->btree, rootBlkno );
+ gdi->stack = ginPrepareFindLeafPage(&gdi->btree, rootBlkno);
- gdi->stack = ginFindLeafPage( &gdi->btree, gdi->stack );
+ gdi->stack = ginFindLeafPage(&gdi->btree, gdi->stack);
- if ( gdi->btree.findItem( &(gdi->btree), gdi->stack ) )
- elog(ERROR,"item pointer (%u,%d) already exists",
- ItemPointerGetBlockNumber(gdi->btree.items + gdi->btree.curitem),
- ItemPointerGetOffsetNumber(gdi->btree.items + gdi->btree.curitem));
+ if (gdi->btree.findItem(&(gdi->btree), gdi->stack))
+ elog(ERROR, "item pointer (%u,%d) already exists",
+ ItemPointerGetBlockNumber(gdi->btree.items + gdi->btree.curitem),
+ ItemPointerGetOffsetNumber(gdi->btree.items + gdi->btree.curitem));
ginInsertValue(&(gdi->btree), gdi->stack);
- gdi->stack=NULL;
+ gdi->stack = NULL;
}
}
Buffer
-scanBeginPostingTree( GinPostingTreeScan *gdi ) {
- gdi->stack = ginFindLeafPage( &gdi->btree, gdi->stack );
+scanBeginPostingTree(GinPostingTreeScan *gdi)
+{
+ gdi->stack = ginFindLeafPage(&gdi->btree, gdi->stack);
return gdi->stack->buffer;
}
-
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 6e3cc75ce0..129c955096 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginentrypage.c
- * page utilities routines for the postgres inverted index access method.
+ * page utilities routines for the postgres inverted index access method.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.3 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.4 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -23,48 +23,52 @@
* 1) Posting list
* - itup->t_info & INDEX_SIZE_MASK contains size of tuple as usial
* - ItemPointerGetBlockNumber(&itup->t_tid) contains original
- * size of tuple (without posting list).
+ * size of tuple (without posting list).
* Macroses: GinGetOrigSizePosting(itup) / GinSetOrigSizePosting(itup,n)
* - ItemPointerGetOffsetNumber(&itup->t_tid) contains number
* of elements in posting list (number of heap itempointer)
* Macroses: GinGetNPosting(itup) / GinSetNPosting(itup,n)
- * - After usial part of tuple there is a posting list
+ * - After usial part of tuple there is a posting list
* Macros: GinGetPosting(itup)
* 2) Posting tree
* - itup->t_info & INDEX_SIZE_MASK contains size of tuple as usial
- * - ItemPointerGetBlockNumber(&itup->t_tid) contains block number of
+ * - ItemPointerGetBlockNumber(&itup->t_tid) contains block number of
* root of posting tree
* - ItemPointerGetOffsetNumber(&itup->t_tid) contains magick number GIN_TREE_POSTING
*/
IndexTuple
-GinFormTuple(GinState *ginstate, Datum key, ItemPointerData *ipd, uint32 nipd) {
- bool isnull=FALSE;
+GinFormTuple(GinState *ginstate, Datum key, ItemPointerData *ipd, uint32 nipd)
+{
+ bool isnull = FALSE;
IndexTuple itup;
- itup = index_form_tuple(ginstate->tupdesc, &key, &isnull);
+ itup = index_form_tuple(ginstate->tupdesc, &key, &isnull);
- GinSetOrigSizePosting( itup, IndexTupleSize(itup) );
+ GinSetOrigSizePosting(itup, IndexTupleSize(itup));
- if ( nipd > 0 ) {
- uint32 newsize = MAXALIGN(SHORTALIGN(IndexTupleSize(itup)) + sizeof(ItemPointerData)*nipd);
+ if (nipd > 0)
+ {
+ uint32 newsize = MAXALIGN(SHORTALIGN(IndexTupleSize(itup)) + sizeof(ItemPointerData) * nipd);
- if ( newsize >= INDEX_SIZE_MASK )
+ if (newsize >= INDEX_SIZE_MASK)
return NULL;
- if ( newsize > TOAST_INDEX_TARGET && nipd > 1 )
+ if (newsize > TOAST_INDEX_TARGET && nipd > 1)
return NULL;
- itup = repalloc( itup, newsize );
+ itup = repalloc(itup, newsize);
/* set new size */
- itup->t_info &= ~INDEX_SIZE_MASK;
+ itup->t_info &= ~INDEX_SIZE_MASK;
itup->t_info |= newsize;
- if ( ipd )
- memcpy( GinGetPosting(itup), ipd, sizeof(ItemPointerData)*nipd );
- GinSetNPosting(itup, nipd);
- } else {
- GinSetNPosting(itup, 0);
+ if (ipd)
+ memcpy(GinGetPosting(itup), ipd, sizeof(ItemPointerData) * nipd);
+ GinSetNPosting(itup, nipd);
+ }
+ else
+ {
+ GinSetNPosting(itup, 0);
}
return itup;
}
@@ -74,31 +78,35 @@ GinFormTuple(GinState *ginstate, Datum key, ItemPointerData *ipd, uint32 nipd) {
* so we don't use right bound, we use rightest key instead.
*/
static IndexTuple
-getRightMostTuple(Page page) {
+getRightMostTuple(Page page)
+{
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
+
return (IndexTuple) PageGetItem(page, PageGetItemId(page, maxoff));
}
Datum
-ginGetHighKey(GinState *ginstate, Page page) {
- IndexTuple itup;
- bool isnull;
+ginGetHighKey(GinState *ginstate, Page page)
+{
+ IndexTuple itup;
+ bool isnull;
itup = getRightMostTuple(page);
- return index_getattr(itup, FirstOffsetNumber, ginstate->tupdesc, &isnull);
+ return index_getattr(itup, FirstOffsetNumber, ginstate->tupdesc, &isnull);
}
-static bool
-entryIsMoveRight(GinBtree btree, Page page) {
- Datum highkey;
+static bool
+entryIsMoveRight(GinBtree btree, Page page)
+{
+ Datum highkey;
- if ( GinPageRightMost(page) )
+ if (GinPageRightMost(page))
return FALSE;
highkey = ginGetHighKey(btree->ginstate, page);
- if ( compareEntries(btree->ginstate, btree->entryValue, highkey) > 0 )
+ if (compareEntries(btree->ginstate, btree->entryValue, highkey) > 0)
return TRUE;
return FALSE;
@@ -109,16 +117,20 @@ entryIsMoveRight(GinBtree btree, Page page) {
* page correctly choosen and searching value SHOULD be on page
*/
static BlockNumber
-entryLocateEntry(GinBtree btree, GinBtreeStack *stack) {
- OffsetNumber low, high, maxoff;
- IndexTuple itup = NULL;
- int result;
- Page page = BufferGetPage( stack->buffer );
-
- Assert( !GinPageIsLeaf(page) );
- Assert( !GinPageIsData(page) );
-
- if ( btree->fullScan ) {
+entryLocateEntry(GinBtree btree, GinBtreeStack *stack)
+{
+ OffsetNumber low,
+ high,
+ maxoff;
+ IndexTuple itup = NULL;
+ int result;
+ Page page = BufferGetPage(stack->buffer);
+
+ Assert(!GinPageIsLeaf(page));
+ Assert(!GinPageIsData(page));
+
+ if (btree->fullScan)
+ {
stack->off = FirstOffsetNumber;
stack->predictNumber *= PageGetMaxOffsetNumber(page);
return btree->getLeftMostPage(btree, page);
@@ -126,39 +138,43 @@ entryLocateEntry(GinBtree btree, GinBtreeStack *stack) {
low = FirstOffsetNumber;
maxoff = high = PageGetMaxOffsetNumber(page);
- Assert( high >= low );
+ Assert(high >= low);
high++;
- while (high > low) {
+ while (high > low)
+ {
OffsetNumber mid = low + ((high - low) / 2);
- if ( mid == maxoff && GinPageRightMost(page) )
+ if (mid == maxoff && GinPageRightMost(page))
/* Right infinity */
result = -1;
- else {
- bool isnull;
+ else
+ {
+ bool isnull;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid));
- result = compareEntries(btree->ginstate, btree->entryValue,
- index_getattr(itup, FirstOffsetNumber, btree->ginstate->tupdesc, &isnull) );
+ result = compareEntries(btree->ginstate, btree->entryValue,
+ index_getattr(itup, FirstOffsetNumber, btree->ginstate->tupdesc, &isnull));
}
- if ( result == 0 ) {
+ if (result == 0)
+ {
stack->off = mid;
- Assert( GinItemPointerGetBlockNumber(&(itup)->t_tid) != GIN_ROOT_BLKNO );
+ Assert(GinItemPointerGetBlockNumber(&(itup)->t_tid) != GIN_ROOT_BLKNO);
return GinItemPointerGetBlockNumber(&(itup)->t_tid);
- } else if ( result > 0 )
+ }
+ else if (result > 0)
low = mid + 1;
else
high = mid;
}
- Assert( high>=FirstOffsetNumber && high <= maxoff );
+ Assert(high >= FirstOffsetNumber && high <= maxoff);
stack->off = high;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, high));
- Assert( GinItemPointerGetBlockNumber(&(itup)->t_tid) != GIN_ROOT_BLKNO );
+ Assert(GinItemPointerGetBlockNumber(&(itup)->t_tid) != GIN_ROOT_BLKNO);
return GinItemPointerGetBlockNumber(&(itup)->t_tid);
}
@@ -168,15 +184,18 @@ entryLocateEntry(GinBtree btree, GinBtreeStack *stack) {
* Returns true if value found on page.
*/
static bool
-entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack) {
- Page page = BufferGetPage( stack->buffer );
- OffsetNumber low, high;
- IndexTuple itup;
+entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack)
+{
+ Page page = BufferGetPage(stack->buffer);
+ OffsetNumber low,
+ high;
+ IndexTuple itup;
- Assert( GinPageIsLeaf(page) );
- Assert( !GinPageIsData(page) );
+ Assert(GinPageIsLeaf(page));
+ Assert(!GinPageIsData(page));
- if ( btree->fullScan ) {
+ if (btree->fullScan)
+ {
stack->off = FirstOffsetNumber;
return TRUE;
}
@@ -184,26 +203,30 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack) {
low = FirstOffsetNumber;
high = PageGetMaxOffsetNumber(page);
- if ( high < low ) {
+ if (high < low)
+ {
stack->off = FirstOffsetNumber;
return false;
}
high++;
- while (high > low) {
+ while (high > low)
+ {
OffsetNumber mid = low + ((high - low) / 2);
- bool isnull;
- int result;
+ bool isnull;
+ int result;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid));
result = compareEntries(btree->ginstate, btree->entryValue,
- index_getattr(itup, FirstOffsetNumber, btree->ginstate->tupdesc, &isnull) );
+ index_getattr(itup, FirstOffsetNumber, btree->ginstate->tupdesc, &isnull));
- if ( result == 0 ) {
+ if (result == 0)
+ {
stack->off = mid;
return true;
- } else if ( result > 0 )
+ }
+ else if (result > 0)
low = mid + 1;
else
high = mid;
@@ -214,33 +237,40 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack) {
}
static OffsetNumber
-entryFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff) {
- OffsetNumber i, maxoff = PageGetMaxOffsetNumber(page);
- IndexTuple itup;
+entryFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff)
+{
+ OffsetNumber i,
+ maxoff = PageGetMaxOffsetNumber(page);
+ IndexTuple itup;
- Assert( !GinPageIsLeaf(page) );
- Assert( !GinPageIsData(page) );
+ Assert(!GinPageIsLeaf(page));
+ Assert(!GinPageIsData(page));
/* if page isn't changed, we returns storedOff */
- if ( storedOff>= FirstOffsetNumber && storedOff<=maxoff) {
+ if (storedOff >= FirstOffsetNumber && storedOff <= maxoff)
+ {
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, storedOff));
- if ( GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno )
+ if (GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno)
return storedOff;
- /* we hope, that needed pointer goes to right. It's true
- if there wasn't a deletion */
- for( i=storedOff+1 ; i <= maxoff ; i++ ) {
+ /*
+ * we hope, that needed pointer goes to right. It's true if there
+ * wasn't a deletion
+ */
+ for (i = storedOff + 1; i <= maxoff; i++)
+ {
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
- if ( GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno )
+ if (GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno)
return i;
}
- maxoff = storedOff-1;
+ maxoff = storedOff - 1;
}
/* last chance */
- for( i=FirstOffsetNumber; i <= maxoff ; i++ ) {
+ for (i = FirstOffsetNumber; i <= maxoff; i++)
+ {
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
- if ( GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno )
+ if (GinItemPointerGetBlockNumber(&(itup)->t_tid) == blkno)
return i;
}
@@ -248,31 +278,35 @@ entryFindChildPtr(GinBtree btree, Page page, BlockNumber blkno, OffsetNumber sto
}
static BlockNumber
-entryGetLeftMostPage(GinBtree btree, Page page) {
- IndexTuple itup;
+entryGetLeftMostPage(GinBtree btree, Page page)
+{
+ IndexTuple itup;
- Assert( !GinPageIsLeaf(page) );
- Assert( !GinPageIsData(page) );
- Assert( PageGetMaxOffsetNumber(page) >= FirstOffsetNumber );
+ Assert(!GinPageIsLeaf(page));
+ Assert(!GinPageIsData(page));
+ Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));
- return GinItemPointerGetBlockNumber(&(itup)->t_tid);
+ return GinItemPointerGetBlockNumber(&(itup)->t_tid);
}
static bool
-entryIsEnoughSpace( GinBtree btree, Buffer buf, OffsetNumber off ) {
- Size itupsz = 0;
- Page page = BufferGetPage(buf);
+entryIsEnoughSpace(GinBtree btree, Buffer buf, OffsetNumber off)
+{
+ Size itupsz = 0;
+ Page page = BufferGetPage(buf);
+
+ Assert(btree->entry);
+ Assert(!GinPageIsData(page));
- Assert( btree->entry );
- Assert( !GinPageIsData(page) );
+ if (btree->isDelete)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
- if ( btree->isDelete ) {
- IndexTuple itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, off));
- itupsz = MAXALIGN( IndexTupleSize( itup ) ) + sizeof(ItemIdData);
+ itupsz = MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
}
- if ( PageGetFreeSpace(page) + itupsz >= MAXALIGN(IndexTupleSize(btree->entry)) + sizeof(ItemIdData) )
+ if (PageGetFreeSpace(page) + itupsz >= MAXALIGN(IndexTupleSize(btree->entry)) + sizeof(ItemIdData))
return true;
return false;
@@ -284,19 +318,23 @@ entryIsEnoughSpace( GinBtree btree, Buffer buf, OffsetNumber off ) {
* if child split is occured
*/
static BlockNumber
-entryPreparePage( GinBtree btree, Page page, OffsetNumber off) {
+entryPreparePage(GinBtree btree, Page page, OffsetNumber off)
+{
BlockNumber ret = InvalidBlockNumber;
- Assert( btree->entry );
- Assert( !GinPageIsData(page) );
+ Assert(btree->entry);
+ Assert(!GinPageIsData(page));
- if ( btree->isDelete ) {
- Assert( GinPageIsLeaf(page) );
+ if (btree->isDelete)
+ {
+ Assert(GinPageIsLeaf(page));
PageIndexTupleDelete(page, off);
}
- if ( !GinPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber ) {
- IndexTuple itup = (IndexTuple)PageGetItem(page, PageGetItemId(page, off));
+ if (!GinPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
+
ItemPointerSet(&itup->t_tid, btree->rightblkno, InvalidOffsetNumber);
ret = btree->rightblkno;
}
@@ -310,22 +348,23 @@ entryPreparePage( GinBtree btree, Page page, OffsetNumber off) {
* Place tuple on page and fills WAL record
*/
static void
-entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prdata) {
- Page page = BufferGetPage(buf);
+entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prdata)
+{
+ Page page = BufferGetPage(buf);
static XLogRecData rdata[3];
- OffsetNumber placed;
- static ginxlogInsert data;
+ OffsetNumber placed;
+ static ginxlogInsert data;
*prdata = rdata;
- data.updateBlkno = entryPreparePage( btree, page, off );
+ data.updateBlkno = entryPreparePage(btree, page, off);
- placed = PageAddItem( page, (Item)btree->entry, IndexTupleSize(btree->entry), off, LP_USED);
- if ( placed != off )
+ placed = PageAddItem(page, (Item) btree->entry, IndexTupleSize(btree->entry), off, LP_USED);
+ if (placed != off)
elog(ERROR, "failed to add item to index page in \"%s\"",
- RelationGetRelationName(btree->index));
+ RelationGetRelationName(btree->index));
data.node = btree->index->rd_node;
- data.blkno = BufferGetBlockNumber( buf );
+ data.blkno = BufferGetBlockNumber(buf);
data.offset = off;
data.nitem = 1;
data.isDelete = btree->isDelete;
@@ -358,87 +397,99 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
* an equal number!
*/
static Page
-entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRecData **prdata) {
+entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRecData **prdata)
+{
static XLogRecData rdata[2];
- OffsetNumber i, maxoff, separator=InvalidOffsetNumber;
- Size totalsize=0;
- Size lsize = 0, size;
- static char tupstore[ 2*BLCKSZ ];
- char *ptr;
- IndexTuple itup, leftrightmost=NULL;
- static ginxlogSplit data;
- Datum value;
- bool isnull;
- Page page;
- Page lpage = GinPageGetCopyPage( BufferGetPage( lbuf ) );
- Page rpage = BufferGetPage( rbuf );
- Size pageSize = PageGetPageSize( lpage );
+ OffsetNumber i,
+ maxoff,
+ separator = InvalidOffsetNumber;
+ Size totalsize = 0;
+ Size lsize = 0,
+ size;
+ static char tupstore[2 * BLCKSZ];
+ char *ptr;
+ IndexTuple itup,
+ leftrightmost = NULL;
+ static ginxlogSplit data;
+ Datum value;
+ bool isnull;
+ Page page;
+ Page lpage = GinPageGetCopyPage(BufferGetPage(lbuf));
+ Page rpage = BufferGetPage(rbuf);
+ Size pageSize = PageGetPageSize(lpage);
*prdata = rdata;
- data.leftChildBlkno = ( GinPageIsLeaf(lpage) ) ?
- InvalidOffsetNumber : GinItemPointerGetBlockNumber( &(btree->entry->t_tid) );
- data.updateBlkno = entryPreparePage( btree, lpage, off );
+ data.leftChildBlkno = (GinPageIsLeaf(lpage)) ?
+ InvalidOffsetNumber : GinItemPointerGetBlockNumber(&(btree->entry->t_tid));
+ data.updateBlkno = entryPreparePage(btree, lpage, off);
maxoff = PageGetMaxOffsetNumber(lpage);
- ptr = tupstore;
+ ptr = tupstore;
- for(i=FirstOffsetNumber; i<=maxoff; i++) {
- if ( i==off ) {
- size = MAXALIGN( IndexTupleSize(btree->entry) );
+ for (i = FirstOffsetNumber; i <= maxoff; i++)
+ {
+ if (i == off)
+ {
+ size = MAXALIGN(IndexTupleSize(btree->entry));
memcpy(ptr, btree->entry, size);
- ptr+=size;
+ ptr += size;
totalsize += size + sizeof(ItemIdData);
}
- itup = (IndexTuple)PageGetItem(lpage, PageGetItemId(lpage, i));
- size = MAXALIGN( IndexTupleSize(itup) );
+ itup = (IndexTuple) PageGetItem(lpage, PageGetItemId(lpage, i));
+ size = MAXALIGN(IndexTupleSize(itup));
memcpy(ptr, itup, size);
- ptr+=size;
+ ptr += size;
totalsize += size + sizeof(ItemIdData);
}
- if ( off==maxoff+1 ) {
- size = MAXALIGN( IndexTupleSize(btree->entry) );
+ if (off == maxoff + 1)
+ {
+ size = MAXALIGN(IndexTupleSize(btree->entry));
memcpy(ptr, btree->entry, size);
- ptr+=size;
+ ptr += size;
totalsize += size + sizeof(ItemIdData);
}
- GinInitPage( rpage, GinPageGetOpaque(lpage)->flags, pageSize );
- GinInitPage( lpage, GinPageGetOpaque(rpage)->flags, pageSize );
+ GinInitPage(rpage, GinPageGetOpaque(lpage)->flags, pageSize);
+ GinInitPage(lpage, GinPageGetOpaque(rpage)->flags, pageSize);
ptr = tupstore;
- maxoff++;
+ maxoff++;
lsize = 0;
page = lpage;
- for(i=FirstOffsetNumber; i<=maxoff; i++) {
- itup = (IndexTuple)ptr;
-
- if ( lsize > totalsize/2 ) {
- if ( separator==InvalidOffsetNumber )
- separator = i-1;
+ for (i = FirstOffsetNumber; i <= maxoff; i++)
+ {
+ itup = (IndexTuple) ptr;
+
+ if (lsize > totalsize / 2)
+ {
+ if (separator == InvalidOffsetNumber)
+ separator = i - 1;
page = rpage;
- } else {
+ }
+ else
+ {
leftrightmost = itup;
- lsize += MAXALIGN( IndexTupleSize(itup) ) + sizeof(ItemIdData);
+ lsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData);
}
- if ( PageAddItem( page, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
- RelationGetRelationName(btree->index));
- ptr += MAXALIGN( IndexTupleSize(itup) );
+ RelationGetRelationName(btree->index));
+ ptr += MAXALIGN(IndexTupleSize(itup));
}
-
+
value = index_getattr(leftrightmost, FirstOffsetNumber, btree->ginstate->tupdesc, &isnull);
- btree->entry = GinFormTuple( btree->ginstate, value, NULL, 0);
- ItemPointerSet(&(btree->entry)->t_tid, BufferGetBlockNumber( lbuf ), InvalidOffsetNumber);
- btree->rightblkno = BufferGetBlockNumber( rbuf );
-
+ btree->entry = GinFormTuple(btree->ginstate, value, NULL, 0);
+ ItemPointerSet(&(btree->entry)->t_tid, BufferGetBlockNumber(lbuf), InvalidOffsetNumber);
+ btree->rightblkno = BufferGetBlockNumber(rbuf);
+
data.node = btree->index->rd_node;
data.rootBlkno = InvalidBlockNumber;
- data.lblkno = BufferGetBlockNumber( lbuf );
- data.rblkno = BufferGetBlockNumber( rbuf );
+ data.lblkno = BufferGetBlockNumber(lbuf);
+ data.rblkno = BufferGetBlockNumber(rbuf);
data.separator = separator;
data.nitem = maxoff;
data.isData = FALSE;
@@ -458,23 +509,28 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR
return lpage;
}
-/*
+/*
* return newly allocate rightmost tuple
*/
IndexTuple
-ginPageGetLinkItup(Buffer buf) {
- IndexTuple itup, nitup;
- Page page = BufferGetPage(buf);
-
- itup = getRightMostTuple( page );
- if ( GinPageIsLeaf(page) && !GinIsPostingTree(itup) ) {
- nitup = (IndexTuple)palloc( MAXALIGN(GinGetOrigSizePosting(itup)) );
- memcpy( nitup, itup, GinGetOrigSizePosting(itup) );
+ginPageGetLinkItup(Buffer buf)
+{
+ IndexTuple itup,
+ nitup;
+ Page page = BufferGetPage(buf);
+
+ itup = getRightMostTuple(page);
+ if (GinPageIsLeaf(page) && !GinIsPostingTree(itup))
+ {
+ nitup = (IndexTuple) palloc(MAXALIGN(GinGetOrigSizePosting(itup)));
+ memcpy(nitup, itup, GinGetOrigSizePosting(itup));
nitup->t_info &= ~INDEX_SIZE_MASK;
nitup->t_info |= GinGetOrigSizePosting(itup);
- } else {
- nitup = (IndexTuple)palloc( MAXALIGN(IndexTupleSize(itup)) );
- memcpy( nitup, itup, IndexTupleSize(itup) );
+ }
+ else
+ {
+ nitup = (IndexTuple) palloc(MAXALIGN(IndexTupleSize(itup)));
+ memcpy(nitup, itup, IndexTupleSize(itup));
}
ItemPointerSet(&nitup->t_tid, BufferGetBlockNumber(buf), InvalidOffsetNumber);
@@ -486,23 +542,25 @@ ginPageGetLinkItup(Buffer buf) {
* Also called from ginxlog, should not use btree
*/
void
-entryFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf) {
- Page page;
- IndexTuple itup;
+entryFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf)
+{
+ Page page;
+ IndexTuple itup;
page = BufferGetPage(root);
- itup = ginPageGetLinkItup( lbuf );
- if ( PageAddItem( page, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ itup = ginPageGetLinkItup(lbuf);
+ if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index root page");
- itup = ginPageGetLinkItup( rbuf );
- if ( PageAddItem( page, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ itup = ginPageGetLinkItup(rbuf);
+ if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index root page");
}
void
-prepareEntryScan( GinBtree btree, Relation index, Datum value, GinState *ginstate) {
+prepareEntryScan(GinBtree btree, Relation index, Datum value, GinState *ginstate)
+{
memset(btree, 0, sizeof(GinBtreeData));
btree->isMoveRight = entryIsMoveRight;
@@ -524,4 +582,3 @@ prepareEntryScan( GinBtree btree, Relation index, Datum value, GinState *ginstat
btree->fullScan = FALSE;
btree->isBuild = FALSE;
}
-
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index d40612f3ad..090bbe4f25 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginget.c
- * fetch tuples from a GIN scan.
+ * fetch tuples from a GIN scan.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.2 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.3 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -18,15 +18,17 @@
#include "utils/memutils.h"
static OffsetNumber
-findItemInPage( Page page, ItemPointer item, OffsetNumber off ) {
+findItemInPage(Page page, ItemPointer item, OffsetNumber off)
+{
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
- int res;
+ int res;
- for(; off<=maxoff; off++) {
- res = compareItemPointers( item, (ItemPointer)GinDataPageGetItem(page, off) );
- Assert( res>= 0 );
+ for (; off <= maxoff; off++)
+ {
+ res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, off));
+ Assert(res >= 0);
- if ( res == 0 )
+ if (res == 0)
return off;
}
@@ -38,24 +40,29 @@ findItemInPage( Page page, ItemPointer item, OffsetNumber off ) {
* Stop* functions unlock buffer (but don't release!)
*/
static void
-startScanEntry( Relation index, GinState *ginstate, GinScanEntry entry, bool firstCall ) {
- if ( entry->master != NULL ) {
+startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firstCall)
+{
+ if (entry->master != NULL)
+ {
entry->isFinished = entry->master->isFinished;
return;
}
- if ( firstCall ) {
- /* at first call we should find entry, and
- begin scan of posting tree or just store posting list in memory */
+ if (firstCall)
+ {
+ /*
+ * at first call we should find entry, and begin scan of posting tree
+ * or just store posting list in memory
+ */
GinBtreeData btreeEntry;
- GinBtreeStack *stackEntry;
- Page page;
- bool needUnlock = TRUE;
+ GinBtreeStack *stackEntry;
+ Page page;
+ bool needUnlock = TRUE;
- prepareEntryScan( &btreeEntry, index, entry->entry, ginstate );
+ prepareEntryScan(&btreeEntry, index, entry->entry, ginstate);
btreeEntry.searchMode = TRUE;
stackEntry = ginFindLeafPage(&btreeEntry, NULL);
- page = BufferGetPage( stackEntry->buffer );
+ page = BufferGetPage(stackEntry->buffer);
entry->isFinished = TRUE;
entry->buffer = InvalidBuffer;
@@ -65,103 +72,115 @@ startScanEntry( Relation index, GinState *ginstate, GinScanEntry entry, bool fir
entry->reduceResult = FALSE;
entry->predictNumberResult = 0;
- if ( btreeEntry.findItem( &btreeEntry, stackEntry ) ) {
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
+ if (btreeEntry.findItem(&btreeEntry, stackEntry))
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off));
- if ( GinIsPostingTree(itup) ) {
+ if (GinIsPostingTree(itup))
+ {
BlockNumber rootPostingTree = GinGetPostingTree(itup);
GinPostingTreeScan *gdi;
- Page page;
+ Page page;
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
- needUnlock = FALSE;
- gdi = prepareScanPostingTree( index, rootPostingTree, TRUE );
+ needUnlock = FALSE;
+ gdi = prepareScanPostingTree(index, rootPostingTree, TRUE);
- entry->buffer = scanBeginPostingTree( gdi );
- IncrBufferRefCount( entry->buffer );
+ entry->buffer = scanBeginPostingTree(gdi);
+ IncrBufferRefCount(entry->buffer);
- page = BufferGetPage( entry->buffer );
- entry->predictNumberResult = gdi->stack->predictNumber * GinPageGetOpaque(page)->maxoff;
+ page = BufferGetPage(entry->buffer);
+ entry->predictNumberResult = gdi->stack->predictNumber * GinPageGetOpaque(page)->maxoff;
- freeGinBtreeStack( gdi->stack );
- pfree( gdi );
+ freeGinBtreeStack(gdi->stack);
+ pfree(gdi);
entry->isFinished = FALSE;
- } else if ( GinGetNPosting(itup) > 0 ) {
+ }
+ else if (GinGetNPosting(itup) > 0)
+ {
entry->nlist = GinGetNPosting(itup);
- entry->list = (ItemPointerData*)palloc( sizeof(ItemPointerData) * entry->nlist );
- memcpy( entry->list, GinGetPosting(itup), sizeof(ItemPointerData) * entry->nlist );
+ entry->list = (ItemPointerData *) palloc(sizeof(ItemPointerData) * entry->nlist);
+ memcpy(entry->list, GinGetPosting(itup), sizeof(ItemPointerData) * entry->nlist);
entry->isFinished = FALSE;
}
}
- if ( needUnlock )
+ if (needUnlock)
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
- freeGinBtreeStack( stackEntry );
- } else if ( entry->buffer != InvalidBuffer ) {
+ freeGinBtreeStack(stackEntry);
+ }
+ else if (entry->buffer != InvalidBuffer)
+ {
/* we should find place were we was stopped */
BlockNumber blkno;
- Page page;
+ Page page;
- LockBuffer( entry->buffer, GIN_SHARE );
+ LockBuffer(entry->buffer, GIN_SHARE);
- if ( !ItemPointerIsValid( &entry->curItem ) )
+ if (!ItemPointerIsValid(&entry->curItem))
/* start position */
return;
- Assert( entry->offset!=InvalidOffsetNumber );
+ Assert(entry->offset != InvalidOffsetNumber);
- page = BufferGetPage( entry->buffer );
+ page = BufferGetPage(entry->buffer);
/* try to find curItem in current buffer */
- if ( (entry->offset=findItemInPage(page , &entry->curItem, entry->offset))!=InvalidOffsetNumber )
+ if ((entry->offset = findItemInPage(page, &entry->curItem, entry->offset)) != InvalidOffsetNumber)
return;
/* walk to right */
- while( (blkno = GinPageGetOpaque( page )->rightlink)!=InvalidBlockNumber ) {
- LockBuffer( entry->buffer, GIN_UNLOCK );
- entry->buffer = ReleaseAndReadBuffer( entry->buffer, index, blkno );
- LockBuffer( entry->buffer, GIN_SHARE );
- page = BufferGetPage( entry->buffer );
-
- if ( (entry->offset=findItemInPage(page , &entry->curItem, FirstOffsetNumber))!=InvalidOffsetNumber )
+ while ((blkno = GinPageGetOpaque(page)->rightlink) != InvalidBlockNumber)
+ {
+ LockBuffer(entry->buffer, GIN_UNLOCK);
+ entry->buffer = ReleaseAndReadBuffer(entry->buffer, index, blkno);
+ LockBuffer(entry->buffer, GIN_SHARE);
+ page = BufferGetPage(entry->buffer);
+
+ if ((entry->offset = findItemInPage(page, &entry->curItem, FirstOffsetNumber)) != InvalidOffsetNumber)
return;
}
- elog(ERROR,"Logic error: lost previously founded ItemId");
+ elog(ERROR, "Logic error: lost previously founded ItemId");
}
}
static void
-stopScanEntry( GinScanEntry entry ) {
- if ( entry->buffer != InvalidBuffer )
- LockBuffer( entry->buffer, GIN_UNLOCK );
+stopScanEntry(GinScanEntry entry)
+{
+ if (entry->buffer != InvalidBuffer)
+ LockBuffer(entry->buffer, GIN_UNLOCK);
}
static void
-startScanKey( Relation index, GinState *ginstate, GinScanKey key ) {
- uint32 i;
-
- for(i=0;i<key->nentries;i++)
- startScanEntry( index, ginstate, key->scanEntry+i, key->firstCall );
-
- if ( key->firstCall ) {
- memset( key->entryRes, TRUE, sizeof(bool) * key->nentries );
+startScanKey(Relation index, GinState *ginstate, GinScanKey key)
+{
+ uint32 i;
+
+ for (i = 0; i < key->nentries; i++)
+ startScanEntry(index, ginstate, key->scanEntry + i, key->firstCall);
+
+ if (key->firstCall)
+ {
+ memset(key->entryRes, TRUE, sizeof(bool) * key->nentries);
key->isFinished = FALSE;
key->firstCall = FALSE;
- if ( GinFuzzySearchLimit > 0 ) {
+ if (GinFuzzySearchLimit > 0)
+ {
/*
- * If all of keys more than treshold we will try to reduce
- * result, we hope (and only hope, for intersection operation of array
- * our supposition isn't true), that total result will not more
- * than minimal predictNumberResult.
+ * If all of keys more than treshold we will try to reduce result,
+ * we hope (and only hope, for intersection operation of array our
+ * supposition isn't true), that total result will not more than
+ * minimal predictNumberResult.
*/
- for(i=0;i<key->nentries;i++)
- if ( key->scanEntry[i].predictNumberResult <= key->nentries * GinFuzzySearchLimit )
- return;
-
- for(i=0;i<key->nentries;i++)
- if ( key->scanEntry[i].predictNumberResult > key->nentries * GinFuzzySearchLimit ) {
+ for (i = 0; i < key->nentries; i++)
+ if (key->scanEntry[i].predictNumberResult <= key->nentries * GinFuzzySearchLimit)
+ return;
+
+ for (i = 0; i < key->nentries; i++)
+ if (key->scanEntry[i].predictNumberResult > key->nentries * GinFuzzySearchLimit)
+ {
key->scanEntry[i].predictNumberResult /= key->nentries;
key->scanEntry[i].reduceResult = TRUE;
}
@@ -170,50 +189,60 @@ startScanKey( Relation index, GinState *ginstate, GinScanKey key ) {
}
static void
-stopScanKey( GinScanKey key ) {
- uint32 i;
+stopScanKey(GinScanKey key)
+{
+ uint32 i;
- for(i=0;i<key->nentries;i++)
- stopScanEntry( key->scanEntry+i );
+ for (i = 0; i < key->nentries; i++)
+ stopScanEntry(key->scanEntry + i);
}
static void
-startScan( IndexScanDesc scan ) {
- uint32 i;
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
+startScan(IndexScanDesc scan)
+{
+ uint32 i;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
- for(i=0; i<so->nkeys; i++)
- startScanKey( scan->indexRelation, &so->ginstate, so->keys + i );
+ for (i = 0; i < so->nkeys; i++)
+ startScanKey(scan->indexRelation, &so->ginstate, so->keys + i);
}
static void
-stopScan( IndexScanDesc scan ) {
- uint32 i;
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
+stopScan(IndexScanDesc scan)
+{
+ uint32 i;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
- for(i=0; i<so->nkeys; i++)
- stopScanKey( so->keys + i );
+ for (i = 0; i < so->nkeys; i++)
+ stopScanKey(so->keys + i);
}
static void
-entryGetNextItem( Relation index, GinScanEntry entry ) {
- Page page = BufferGetPage( entry->buffer );
+entryGetNextItem(Relation index, GinScanEntry entry)
+{
+ Page page = BufferGetPage(entry->buffer);
entry->offset++;
- if ( entry->offset <= GinPageGetOpaque( page )->maxoff && GinPageGetOpaque( page )->maxoff >= FirstOffsetNumber ) {
- entry->curItem = *(ItemPointerData*)GinDataPageGetItem(page, entry->offset);
- } else {
- BlockNumber blkno = GinPageGetOpaque( page )->rightlink;
-
- LockBuffer( entry->buffer, GIN_UNLOCK );
- if ( blkno == InvalidBlockNumber ) {
- ReleaseBuffer( entry->buffer );
+ if (entry->offset <= GinPageGetOpaque(page)->maxoff && GinPageGetOpaque(page)->maxoff >= FirstOffsetNumber)
+ {
+ entry->curItem = *(ItemPointerData *) GinDataPageGetItem(page, entry->offset);
+ }
+ else
+ {
+ BlockNumber blkno = GinPageGetOpaque(page)->rightlink;
+
+ LockBuffer(entry->buffer, GIN_UNLOCK);
+ if (blkno == InvalidBlockNumber)
+ {
+ ReleaseBuffer(entry->buffer);
entry->buffer = InvalidBuffer;
entry->isFinished = TRUE;
- } else {
- entry->buffer = ReleaseAndReadBuffer( entry->buffer, index, blkno );
- LockBuffer( entry->buffer, GIN_SHARE );
+ }
+ else
+ {
+ entry->buffer = ReleaseAndReadBuffer(entry->buffer, index, blkno);
+ LockBuffer(entry->buffer, GIN_SHARE);
entry->offset = InvalidOffsetNumber;
entryGetNextItem(index, entry);
}
@@ -221,29 +250,37 @@ entryGetNextItem( Relation index, GinScanEntry entry ) {
}
#define gin_rand() (((double) random()) / ((double) MAX_RANDOM_VALUE))
-#define dropItem(e) ( gin_rand() > ((double)GinFuzzySearchLimit)/((double)((e)->predictNumberResult)) )
+#define dropItem(e) ( gin_rand() > ((double)GinFuzzySearchLimit)/((double)((e)->predictNumberResult)) )
/*
- * Sets entry->curItem to new found heap item pointer for one
+ * Sets entry->curItem to new found heap item pointer for one
* entry of one scan key
*/
static bool
-entryGetItem( Relation index, GinScanEntry entry ) {
- if ( entry->master ) {
+entryGetItem(Relation index, GinScanEntry entry)
+{
+ if (entry->master)
+ {
entry->isFinished = entry->master->isFinished;
entry->curItem = entry->master->curItem;
- } else if ( entry->list ) {
+ }
+ else if (entry->list)
+ {
entry->offset++;
- if ( entry->offset <= entry->nlist )
- entry->curItem = entry->list[ entry->offset - 1 ];
- else {
- ItemPointerSet( &entry->curItem, InvalidBlockNumber, InvalidOffsetNumber );
+ if (entry->offset <= entry->nlist)
+ entry->curItem = entry->list[entry->offset - 1];
+ else
+ {
+ ItemPointerSet(&entry->curItem, InvalidBlockNumber, InvalidOffsetNumber);
entry->isFinished = TRUE;
}
- } else {
- do {
+ }
+ else
+ {
+ do
+ {
entryGetNextItem(index, entry);
- } while ( entry->isFinished == FALSE && entry->reduceResult == TRUE && dropItem(entry) );
+ } while (entry->isFinished == FALSE && entry->reduceResult == TRUE && dropItem(entry));
}
return entry->isFinished;
@@ -254,155 +291,180 @@ entryGetItem( Relation index, GinScanEntry entry ) {
* returns isFinished!
*/
static bool
-keyGetItem( Relation index, GinState *ginstate, MemoryContext tempCtx, GinScanKey key ) {
- uint32 i;
- GinScanEntry entry;
- bool res;
- MemoryContext oldCtx;
-
- if ( key->isFinished )
+keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
+{
+ uint32 i;
+ GinScanEntry entry;
+ bool res;
+ MemoryContext oldCtx;
+
+ if (key->isFinished)
return TRUE;
- do {
- /* move forward from previously value and set new curItem,
- which is minimal from entries->curItems */
- ItemPointerSetMax( &key->curItem );
- for(i=0;i<key->nentries;i++) {
- entry = key->scanEntry+i;
-
- if ( key->entryRes[i] ) {
- if ( entry->isFinished == FALSE && entryGetItem(index, entry) == FALSE ) {
- if (compareItemPointers( &entry->curItem, &key->curItem ) < 0)
+ do
+ {
+ /*
+ * move forward from previously value and set new curItem, which is
+ * minimal from entries->curItems
+ */
+ ItemPointerSetMax(&key->curItem);
+ for (i = 0; i < key->nentries; i++)
+ {
+ entry = key->scanEntry + i;
+
+ if (key->entryRes[i])
+ {
+ if (entry->isFinished == FALSE && entryGetItem(index, entry) == FALSE)
+ {
+ if (compareItemPointers(&entry->curItem, &key->curItem) < 0)
key->curItem = entry->curItem;
- } else
+ }
+ else
key->entryRes[i] = FALSE;
- } else if ( entry->isFinished == FALSE ) {
- if (compareItemPointers( &entry->curItem, &key->curItem ) < 0)
+ }
+ else if (entry->isFinished == FALSE)
+ {
+ if (compareItemPointers(&entry->curItem, &key->curItem) < 0)
key->curItem = entry->curItem;
- }
+ }
}
- if ( ItemPointerIsMax( &key->curItem ) ) {
+ if (ItemPointerIsMax(&key->curItem))
+ {
/* all entries are finished */
key->isFinished = TRUE;
return TRUE;
}
-
- if ( key->nentries == 1 ) {
+
+ if (key->nentries == 1)
+ {
/* we can do not call consistentFn !! */
key->entryRes[0] = TRUE;
return FALSE;
}
/* setting up array for consistentFn */
- for(i=0;i<key->nentries;i++) {
- entry = key->scanEntry+i;
-
- if ( entry->isFinished == FALSE && compareItemPointers( &entry->curItem, &key->curItem )==0 )
+ for (i = 0; i < key->nentries; i++)
+ {
+ entry = key->scanEntry + i;
+
+ if (entry->isFinished == FALSE && compareItemPointers(&entry->curItem, &key->curItem) == 0)
key->entryRes[i] = TRUE;
else
key->entryRes[i] = FALSE;
}
oldCtx = MemoryContextSwitchTo(tempCtx);
- res = DatumGetBool( FunctionCall3(
- &ginstate->consistentFn,
- PointerGetDatum( key->entryRes ),
- UInt16GetDatum( key->strategy ),
- key->query
- ));
+ res = DatumGetBool(FunctionCall3(
+ &ginstate->consistentFn,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query
+ ));
MemoryContextSwitchTo(oldCtx);
MemoryContextReset(tempCtx);
- } while( !res );
-
+ } while (!res);
+
return FALSE;
}
/*
- * Get heap item pointer from scan
- * returns true if found
+ * Get heap item pointer from scan
+ * returns true if found
*/
static bool
-scanGetItem( IndexScanDesc scan, ItemPointerData *item ) {
- uint32 i;
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
-
- ItemPointerSetMin( item );
- for(i=0;i<so->nkeys;i++) {
- GinScanKey key = so->keys+i;
-
- if ( keyGetItem( scan->indexRelation, &so->ginstate, so->tempCtx, key )==FALSE ) {
- if ( compareItemPointers( item, &key->curItem ) < 0 )
+scanGetItem(IndexScanDesc scan, ItemPointerData *item)
+{
+ uint32 i;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
+
+ ItemPointerSetMin(item);
+ for (i = 0; i < so->nkeys; i++)
+ {
+ GinScanKey key = so->keys + i;
+
+ if (keyGetItem(scan->indexRelation, &so->ginstate, so->tempCtx, key) == FALSE)
+ {
+ if (compareItemPointers(item, &key->curItem) < 0)
*item = key->curItem;
- } else
- return FALSE; /* finshed one of keys */
+ }
+ else
+ return FALSE; /* finshed one of keys */
}
-
- for(i=1;i<=so->nkeys;i++) {
- GinScanKey key = so->keys+i-1;
- for(;;) {
- int cmp = compareItemPointers( item, &key->curItem );
+ for (i = 1; i <= so->nkeys; i++)
+ {
+ GinScanKey key = so->keys + i - 1;
+
+ for (;;)
+ {
+ int cmp = compareItemPointers(item, &key->curItem);
- if ( cmp == 0 )
+ if (cmp == 0)
break;
- else if ( cmp > 0 ) {
- if ( keyGetItem( scan->indexRelation, &so->ginstate, so->tempCtx, key )==TRUE )
- return FALSE; /* finshed one of keys */
- } else { /* returns to begin */
+ else if (cmp > 0)
+ {
+ if (keyGetItem(scan->indexRelation, &so->ginstate, so->tempCtx, key) == TRUE)
+ return FALSE; /* finshed one of keys */
+ }
+ else
+ { /* returns to begin */
*item = key->curItem;
- i=0;
+ i = 0;
break;
}
}
}
- return TRUE;
+ return TRUE;
}
#define GinIsNewKey(s) ( ((GinScanOpaque) scan->opaque)->keys == NULL )
-Datum
-gingetmulti(PG_FUNCTION_ARGS) {
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+Datum
+gingetmulti(PG_FUNCTION_ARGS)
+{
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
- int32 max_tids = PG_GETARG_INT32(2);
- int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
+ int32 max_tids = PG_GETARG_INT32(2);
+ int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
- if ( GinIsNewKey(scan) )
- newScanKey( scan );
+ if (GinIsNewKey(scan))
+ newScanKey(scan);
- startScan( scan );
+ startScan(scan);
*returned_tids = 0;
- do {
- if ( scanGetItem( scan, tids + *returned_tids ) )
+ do
+ {
+ if (scanGetItem(scan, tids + *returned_tids))
(*returned_tids)++;
else
break;
- } while ( *returned_tids < max_tids );
+ } while (*returned_tids < max_tids);
- stopScan( scan );
+ stopScan(scan);
PG_RETURN_BOOL(*returned_tids == max_tids);
}
Datum
-gingettuple(PG_FUNCTION_ARGS) {
+gingettuple(PG_FUNCTION_ARGS)
+{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
- bool res;
+ bool res;
- if ( dir != ForwardScanDirection )
+ if (dir != ForwardScanDirection)
elog(ERROR, "Gin doesn't support other scan directions than forward");
-
- if ( GinIsNewKey(scan) )
- newScanKey( scan );
- startScan( scan );
+ if (GinIsNewKey(scan))
+ newScanKey(scan);
+
+ startScan(scan);
res = scanGetItem(scan, &scan->xs_ctup.t_self);
- stopScan( scan );
+ stopScan(scan);
PG_RETURN_BOOL(res);
}
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 60107a5770..e4f87e720d 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gininsert.c
- * insert routines for the postgres inverted index access method.
+ * insert routines for the postgres inverted index access method.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gininsert.c,v 1.4 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gininsert.c,v 1.5 2006/10/04 00:29:47 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -19,12 +19,13 @@
#include "miscadmin.h"
#include "utils/memutils.h"
-typedef struct {
- GinState ginstate;
- double indtuples;
- MemoryContext tmpCtx;
- MemoryContext funcCtx;
- BuildAccumulator accum;
+typedef struct
+{
+ GinState ginstate;
+ double indtuples;
+ MemoryContext tmpCtx;
+ MemoryContext funcCtx;
+ BuildAccumulator accum;
} GinBuildState;
/*
@@ -32,24 +33,26 @@ typedef struct {
* suppose that items[] fits to page
*/
static BlockNumber
-createPostingTree( Relation index, ItemPointerData *items, uint32 nitems ) {
+createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
+{
BlockNumber blkno;
- Buffer buffer = GinNewBuffer(index);
- Page page;
+ Buffer buffer = GinNewBuffer(index);
+ Page page;
START_CRIT_SECTION();
- GinInitBuffer( buffer, GIN_DATA|GIN_LEAF );
+ GinInitBuffer(buffer, GIN_DATA | GIN_LEAF);
page = BufferGetPage(buffer);
blkno = BufferGetBlockNumber(buffer);
- memcpy( GinDataPageGetData(page), items, sizeof(ItemPointerData) * nitems );
+ memcpy(GinDataPageGetData(page), items, sizeof(ItemPointerData) * nitems);
GinPageGetOpaque(page)->maxoff = nitems;
- if (!index->rd_istemp) {
- XLogRecPtr recptr;
+ if (!index->rd_istemp)
+ {
+ XLogRecPtr recptr;
XLogRecData rdata[2];
- ginxlogCreatePostingTree data;
+ ginxlogCreatePostingTree data;
data.node = index->rd_node;
data.blkno = blkno;
@@ -71,7 +74,7 @@ createPostingTree( Relation index, ItemPointerData *items, uint32 nitems ) {
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
- }
+ }
MarkBufferDirty(buffer);
UnlockReleaseBuffer(buffer);
@@ -89,21 +92,25 @@ createPostingTree( Relation index, ItemPointerData *items, uint32 nitems ) {
* GinFormTuple().
*/
static IndexTuple
-addItemPointersToTuple(Relation index, GinState *ginstate, GinBtreeStack *stack,
- IndexTuple old, ItemPointerData *items, uint32 nitem, bool isBuild) {
- bool isnull;
- Datum key = index_getattr(old, FirstOffsetNumber, ginstate->tupdesc, &isnull);
- IndexTuple res = GinFormTuple(ginstate, key, NULL, nitem + GinGetNPosting(old));
-
- if ( res ) {
+addItemPointersToTuple(Relation index, GinState *ginstate, GinBtreeStack *stack,
+ IndexTuple old, ItemPointerData *items, uint32 nitem, bool isBuild)
+{
+ bool isnull;
+ Datum key = index_getattr(old, FirstOffsetNumber, ginstate->tupdesc, &isnull);
+ IndexTuple res = GinFormTuple(ginstate, key, NULL, nitem + GinGetNPosting(old));
+
+ if (res)
+ {
/* good, small enough */
- MergeItemPointers( GinGetPosting(res),
- GinGetPosting(old), GinGetNPosting(old),
- items, nitem
- );
-
+ MergeItemPointers(GinGetPosting(res),
+ GinGetPosting(old), GinGetNPosting(old),
+ items, nitem
+ );
+
GinSetNPosting(res, nitem + GinGetNPosting(old));
- } else {
+ }
+ else
+ {
BlockNumber postingRoot;
GinPostingTreeScan *gdi;
@@ -112,7 +119,7 @@ addItemPointersToTuple(Relation index, GinState *ginstate, GinBtreeStack *stack,
postingRoot = createPostingTree(index, GinGetPosting(old), GinGetNPosting(old));
GinSetPostingTree(res, postingRoot);
- gdi = prepareScanPostingTree(index, postingRoot, FALSE);
+ gdi = prepareScanPostingTree(index, postingRoot, FALSE);
gdi->btree.isBuild = isBuild;
insertItemPointer(gdi, items, nitem);
@@ -124,36 +131,39 @@ addItemPointersToTuple(Relation index, GinState *ginstate, GinBtreeStack *stack,
}
/*
- * Inserts only one entry to the index, but it can adds more that 1
- * ItemPointer.
+ * Inserts only one entry to the index, but it can adds more that 1
+ * ItemPointer.
*/
static void
-ginEntryInsert( Relation index, GinState *ginstate, Datum value, ItemPointerData *items, uint32 nitem, bool isBuild) {
- GinBtreeData btree;
+ginEntryInsert(Relation index, GinState *ginstate, Datum value, ItemPointerData *items, uint32 nitem, bool isBuild)
+{
+ GinBtreeData btree;
GinBtreeStack *stack;
- IndexTuple itup;
- Page page;
+ IndexTuple itup;
+ Page page;
- prepareEntryScan( &btree, index, value, ginstate );
+ prepareEntryScan(&btree, index, value, ginstate);
stack = ginFindLeafPage(&btree, NULL);
- page = BufferGetPage( stack->buffer );
+ page = BufferGetPage(stack->buffer);
- if ( btree.findItem( &btree, stack ) ) {
+ if (btree.findItem(&btree, stack))
+ {
/* found entry */
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off));
- if ( GinIsPostingTree(itup) ) {
+ if (GinIsPostingTree(itup))
+ {
/* lock root of posting tree */
GinPostingTreeScan *gdi;
- BlockNumber rootPostingTree = GinGetPostingTree(itup);
+ BlockNumber rootPostingTree = GinGetPostingTree(itup);
/* release all stack */
LockBuffer(stack->buffer, GIN_UNLOCK);
- freeGinBtreeStack( stack );
+ freeGinBtreeStack(stack);
/* insert into posting tree */
- gdi = prepareScanPostingTree( index, rootPostingTree, FALSE );
+ gdi = prepareScanPostingTree(index, rootPostingTree, FALSE);
gdi->btree.isBuild = isBuild;
insertItemPointer(gdi, items, nitem);
@@ -163,23 +173,26 @@ ginEntryInsert( Relation index, GinState *ginstate, Datum value, ItemPointerData
itup = addItemPointersToTuple(index, ginstate, stack, itup, items, nitem, isBuild);
btree.isDelete = TRUE;
- } else {
+ }
+ else
+ {
/* We suppose, that tuple can store at list one itempointer */
- itup = GinFormTuple( ginstate, value, items, 1);
- if ( itup==NULL || IndexTupleSize(itup) >= GinMaxItemSize )
+ itup = GinFormTuple(ginstate, value, items, 1);
+ if (itup == NULL || IndexTupleSize(itup) >= GinMaxItemSize)
elog(ERROR, "huge tuple");
- if ( nitem>1 ) {
+ if (nitem > 1)
+ {
IndexTuple previtup = itup;
- itup = addItemPointersToTuple(index, ginstate, stack, previtup, items+1, nitem-1, isBuild);
+ itup = addItemPointersToTuple(index, ginstate, stack, previtup, items + 1, nitem - 1, isBuild);
pfree(previtup);
}
}
btree.entry = itup;
ginInsertValue(&btree, stack);
- pfree( itup );
+ pfree(itup);
}
/*
@@ -187,48 +200,53 @@ ginEntryInsert( Relation index, GinState *ginstate, Datum value, ItemPointerData
* Function isnt use during normal insert
*/
static uint32
-ginHeapTupleBulkInsert(GinBuildState *buildstate, Datum value, ItemPointer heapptr) {
- Datum *entries;
- uint32 nentries;
+ginHeapTupleBulkInsert(GinBuildState *buildstate, Datum value, ItemPointer heapptr)
+{
+ Datum *entries;
+ uint32 nentries;
MemoryContext oldCtx;
oldCtx = MemoryContextSwitchTo(buildstate->funcCtx);
- entries = extractEntriesSU( buildstate->accum.ginstate, value, &nentries);
+ entries = extractEntriesSU(buildstate->accum.ginstate, value, &nentries);
MemoryContextSwitchTo(oldCtx);
- if ( nentries==0 )
+ if (nentries == 0)
/* nothing to insert */
return 0;
- ginInsertRecordBA( &buildstate->accum, heapptr, entries, nentries);
+ ginInsertRecordBA(&buildstate->accum, heapptr, entries, nentries);
MemoryContextReset(buildstate->funcCtx);
return nentries;
}
-static void
+static void
ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
- bool *isnull, bool tupleIsAlive, void *state) {
+ bool *isnull, bool tupleIsAlive, void *state)
+{
- GinBuildState *buildstate = (GinBuildState*)state;
+ GinBuildState *buildstate = (GinBuildState *) state;
MemoryContext oldCtx;
- if ( *isnull )
+ if (*isnull)
return;
oldCtx = MemoryContextSwitchTo(buildstate->tmpCtx);
buildstate->indtuples += ginHeapTupleBulkInsert(buildstate, *values, &htup->t_self);
- /* we use only half maintenance_work_mem, because there is some leaks
- during insertion and extract values */
- if ( buildstate->accum.allocatedMemory >= maintenance_work_mem*1024L/2L ) {
- ItemPointerData *list;
- Datum entry;
- uint32 nlist;
+ /*
+ * we use only half maintenance_work_mem, because there is some leaks
+ * during insertion and extract values
+ */
+ if (buildstate->accum.allocatedMemory >= maintenance_work_mem * 1024L / 2L)
+ {
+ ItemPointerData *list;
+ Datum entry;
+ uint32 nlist;
- while( (list=ginGetEntry(&buildstate->accum, &entry, &nlist)) != NULL )
+ while ((list = ginGetEntry(&buildstate->accum, &entry, &nlist)) != NULL)
ginEntryInsert(index, &buildstate->ginstate, entry, list, nlist, TRUE);
MemoryContextReset(buildstate->tmpCtx);
@@ -239,22 +257,23 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
}
Datum
-ginbuild(PG_FUNCTION_ARGS) {
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
+ginbuild(PG_FUNCTION_ARGS)
+{
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
- double reltuples;
- GinBuildState buildstate;
+ double reltuples;
+ GinBuildState buildstate;
Buffer buffer;
- ItemPointerData *list;
- Datum entry;
- uint32 nlist;
+ ItemPointerData *list;
+ Datum entry;
+ uint32 nlist;
MemoryContext oldCtx;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
- RelationGetRelationName(index));
+ RelationGetRelationName(index));
initGinState(&buildstate.ginstate, index);
@@ -262,10 +281,11 @@ ginbuild(PG_FUNCTION_ARGS) {
buffer = GinNewBuffer(index);
START_CRIT_SECTION();
GinInitBuffer(buffer, GIN_LEAF);
- if (!index->rd_istemp) {
- XLogRecPtr recptr;
+ if (!index->rd_istemp)
+ {
+ XLogRecPtr recptr;
XLogRecData rdata;
- Page page;
+ Page page;
rdata.buffer = InvalidBuffer;
rdata.data = (char *) &(index->rd_node);
@@ -279,7 +299,7 @@ ginbuild(PG_FUNCTION_ARGS) {
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
- }
+ }
MarkBufferDirty(buffer);
UnlockReleaseBuffer(buffer);
@@ -293,26 +313,26 @@ ginbuild(PG_FUNCTION_ARGS) {
* inserted into the index
*/
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
- "Gin build temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "Gin build temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
buildstate.funcCtx = AllocSetContextCreate(buildstate.tmpCtx,
- "Gin build temporary context for user-defined function",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "Gin build temporary context for user-defined function",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
buildstate.accum.ginstate = &buildstate.ginstate;
- ginInitBA( &buildstate.accum );
+ ginInitBA(&buildstate.accum);
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
- ginBuildCallback, (void *) &buildstate);
+ ginBuildCallback, (void *) &buildstate);
oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx);
- while( (list=ginGetEntry(&buildstate.accum, &entry, &nlist)) != NULL )
+ while ((list = ginGetEntry(&buildstate.accum, &entry, &nlist)) != NULL)
ginEntryInsert(index, &buildstate.ginstate, entry, list, nlist, TRUE);
MemoryContextSwitchTo(oldCtx);
@@ -333,55 +353,58 @@ ginbuild(PG_FUNCTION_ARGS) {
* Inserts value during normal insertion
*/
static uint32
-ginHeapTupleInsert( Relation index, GinState *ginstate, Datum value, ItemPointer item) {
- Datum *entries;
- uint32 i,nentries;
+ginHeapTupleInsert(Relation index, GinState *ginstate, Datum value, ItemPointer item)
+{
+ Datum *entries;
+ uint32 i,
+ nentries;
- entries = extractEntriesSU( ginstate, value, &nentries);
+ entries = extractEntriesSU(ginstate, value, &nentries);
- if ( nentries==0 )
+ if (nentries == 0)
/* nothing to insert */
return 0;
- for(i=0;i<nentries;i++)
+ for (i = 0; i < nentries; i++)
ginEntryInsert(index, ginstate, entries[i], item, 1, FALSE);
return nentries;
}
Datum
-gininsert(PG_FUNCTION_ARGS) {
- Relation index = (Relation) PG_GETARG_POINTER(0);
- Datum *values = (Datum *) PG_GETARG_POINTER(1);
- bool *isnull = (bool *) PG_GETARG_POINTER(2);
+gininsert(PG_FUNCTION_ARGS)
+{
+ Relation index = (Relation) PG_GETARG_POINTER(0);
+ Datum *values = (Datum *) PG_GETARG_POINTER(1);
+ bool *isnull = (bool *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
- bool checkUnique = PG_GETARG_BOOL(5);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ bool checkUnique = PG_GETARG_BOOL(5);
#endif
- GinState ginstate;
+ GinState ginstate;
MemoryContext oldCtx;
MemoryContext insertCtx;
- uint32 res;
+ uint32 res;
- if ( *isnull )
+ if (*isnull)
PG_RETURN_BOOL(false);
insertCtx = AllocSetContextCreate(CurrentMemoryContext,
- "Gin insert temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "Gin insert temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
oldCtx = MemoryContextSwitchTo(insertCtx);
initGinState(&ginstate, index);
- res = ginHeapTupleInsert(index, &ginstate, *values, ht_ctid);
+ res = ginHeapTupleInsert(index, &ginstate, *values, ht_ctid);
MemoryContextSwitchTo(oldCtx);
MemoryContextDelete(insertCtx);
- PG_RETURN_BOOL(res>0);
+ PG_RETURN_BOOL(res > 0);
}
-
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index 2093f1ce8a..b69f409e1c 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginscan.c
- * routines to manage scans inverted index relations
+ * routines to manage scans inverted index relations
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.5 2006/09/14 11:26:49 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.6 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -19,11 +19,12 @@
#include "utils/memutils.h"
-Datum
-ginbeginscan(PG_FUNCTION_ARGS) {
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- int keysz = PG_GETARG_INT32(1);
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+Datum
+ginbeginscan(PG_FUNCTION_ARGS)
+{
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ int keysz = PG_GETARG_INT32(1);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
IndexScanDesc scan;
scan = RelationGetIndexScan(rel, keysz, scankey);
@@ -32,22 +33,25 @@ ginbeginscan(PG_FUNCTION_ARGS) {
}
static void
-fillScanKey( GinState *ginstate, GinScanKey key, Datum query,
- Datum *entryValues, uint32 nEntryValues, StrategyNumber strategy ) {
- uint32 i,j;
+fillScanKey(GinState *ginstate, GinScanKey key, Datum query,
+ Datum *entryValues, uint32 nEntryValues, StrategyNumber strategy)
+{
+ uint32 i,
+ j;
key->nentries = nEntryValues;
- key->entryRes = (bool*)palloc0( sizeof(bool) * nEntryValues );
- key->scanEntry = (GinScanEntry) palloc( sizeof(GinScanEntryData) * nEntryValues );
+ key->entryRes = (bool *) palloc0(sizeof(bool) * nEntryValues);
+ key->scanEntry = (GinScanEntry) palloc(sizeof(GinScanEntryData) * nEntryValues);
key->strategy = strategy;
key->query = query;
- key->firstCall= TRUE;
- ItemPointerSet( &(key->curItem), InvalidBlockNumber, InvalidOffsetNumber );
+ key->firstCall = TRUE;
+ ItemPointerSet(&(key->curItem), InvalidBlockNumber, InvalidOffsetNumber);
- for(i=0; i<nEntryValues; i++) {
+ for (i = 0; i < nEntryValues; i++)
+ {
key->scanEntry[i].pval = key->entryRes + i;
key->scanEntry[i].entry = entryValues[i];
- ItemPointerSet( &(key->scanEntry[i].curItem), InvalidBlockNumber, InvalidOffsetNumber );
+ ItemPointerSet(&(key->scanEntry[i].curItem), InvalidBlockNumber, InvalidOffsetNumber);
key->scanEntry[i].offset = InvalidOffsetNumber;
key->scanEntry[i].buffer = InvalidBuffer;
key->scanEntry[i].list = NULL;
@@ -55,8 +59,9 @@ fillScanKey( GinState *ginstate, GinScanKey key, Datum query,
/* link to the equals entry in current scan key */
key->scanEntry[i].master = NULL;
- for( j=0; j<i; j++)
- if ( compareEntries( ginstate, entryValues[i], entryValues[j] ) == 0 ) {
+ for (j = 0; j < i; j++)
+ if (compareEntries(ginstate, entryValues[i], entryValues[j]) == 0)
+ {
key->scanEntry[i].master = key->scanEntry + j;
break;
}
@@ -66,23 +71,27 @@ fillScanKey( GinState *ginstate, GinScanKey key, Datum query,
#ifdef NOT_USED
static void
-resetScanKeys(GinScanKey keys, uint32 nkeys) {
- uint32 i, j;
+resetScanKeys(GinScanKey keys, uint32 nkeys)
+{
+ uint32 i,
+ j;
- if ( keys == NULL )
+ if (keys == NULL)
return;
- for(i=0;i<nkeys;i++) {
- GinScanKey key = keys + i;
+ for (i = 0; i < nkeys; i++)
+ {
+ GinScanKey key = keys + i;
key->firstCall = TRUE;
- ItemPointerSet( &(key->curItem), InvalidBlockNumber, InvalidOffsetNumber );
+ ItemPointerSet(&(key->curItem), InvalidBlockNumber, InvalidOffsetNumber);
- for(j=0;j<key->nentries;j++) {
- if ( key->scanEntry[j].buffer != InvalidBuffer )
- ReleaseBuffer( key->scanEntry[i].buffer );
+ for (j = 0; j < key->nentries; j++)
+ {
+ if (key->scanEntry[j].buffer != InvalidBuffer)
+ ReleaseBuffer(key->scanEntry[i].buffer);
- ItemPointerSet( &(key->scanEntry[j].curItem), InvalidBlockNumber, InvalidOffsetNumber );
+ ItemPointerSet(&(key->scanEntry[j].curItem), InvalidBlockNumber, InvalidOffsetNumber);
key->scanEntry[j].offset = InvalidOffsetNumber;
key->scanEntry[j].buffer = InvalidBuffer;
key->scanEntry[j].list = NULL;
@@ -90,111 +99,121 @@ resetScanKeys(GinScanKey keys, uint32 nkeys) {
}
}
}
-
#endif
static void
-freeScanKeys(GinScanKey keys, uint32 nkeys, bool removeRes) {
- uint32 i, j;
+freeScanKeys(GinScanKey keys, uint32 nkeys, bool removeRes)
+{
+ uint32 i,
+ j;
- if ( keys == NULL )
+ if (keys == NULL)
return;
- for(i=0;i<nkeys;i++) {
- GinScanKey key = keys + i;
+ for (i = 0; i < nkeys; i++)
+ {
+ GinScanKey key = keys + i;
- for(j=0;j<key->nentries;j++) {
- if ( key->scanEntry[j].buffer != InvalidBuffer )
- ReleaseBuffer( key->scanEntry[j].buffer );
- if ( removeRes && key->scanEntry[j].list )
+ for (j = 0; j < key->nentries; j++)
+ {
+ if (key->scanEntry[j].buffer != InvalidBuffer)
+ ReleaseBuffer(key->scanEntry[j].buffer);
+ if (removeRes && key->scanEntry[j].list)
pfree(key->scanEntry[j].list);
}
- if ( removeRes )
+ if (removeRes)
pfree(key->entryRes);
pfree(key->scanEntry);
}
-
+
pfree(keys);
}
void
-newScanKey( IndexScanDesc scan ) {
- ScanKey scankey = scan->keyData;
+newScanKey(IndexScanDesc scan)
+{
+ ScanKey scankey = scan->keyData;
GinScanOpaque so = (GinScanOpaque) scan->opaque;
- int i;
- uint32 nkeys = 0;
+ int i;
+ uint32 nkeys = 0;
- so->keys = (GinScanKey) palloc( scan->numberOfKeys * sizeof(GinScanKeyData) );
+ so->keys = (GinScanKey) palloc(scan->numberOfKeys * sizeof(GinScanKeyData));
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("GIN indexes do not support whole-index scans")));
+ errmsg("GIN indexes do not support whole-index scans")));
- for(i=0; i<scan->numberOfKeys; i++) {
- Datum* entryValues;
- uint32 nEntryValues;
+ for (i = 0; i < scan->numberOfKeys; i++)
+ {
+ Datum *entryValues;
+ uint32 nEntryValues;
- if ( scankey[i].sk_flags & SK_ISNULL )
+ if (scankey[i].sk_flags & SK_ISNULL)
elog(ERROR, "Gin doesn't support NULL as scan key");
- Assert( scankey[i].sk_attno == 1 );
-
- entryValues = (Datum*)DatumGetPointer(
- FunctionCall3(
- &so->ginstate.extractQueryFn,
- scankey[i].sk_argument,
- PointerGetDatum( &nEntryValues ),
- UInt16GetDatum(scankey[i].sk_strategy)
- )
- );
- if ( entryValues==NULL || nEntryValues == 0 )
+ Assert(scankey[i].sk_attno == 1);
+
+ entryValues = (Datum *) DatumGetPointer(
+ FunctionCall3(
+ &so->ginstate.extractQueryFn,
+ scankey[i].sk_argument,
+ PointerGetDatum(&nEntryValues),
+ UInt16GetDatum(scankey[i].sk_strategy)
+ )
+ );
+ if (entryValues == NULL || nEntryValues == 0)
/* full scan... */
continue;
- fillScanKey( &so->ginstate, &(so->keys[nkeys]), scankey[i].sk_argument,
- entryValues, nEntryValues, scankey[i].sk_strategy );
+ fillScanKey(&so->ginstate, &(so->keys[nkeys]), scankey[i].sk_argument,
+ entryValues, nEntryValues, scankey[i].sk_strategy);
nkeys++;
}
so->nkeys = nkeys;
- if ( so->nkeys == 0 )
+ if (so->nkeys == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("GIN index doesn't support search with void query")));
+ errmsg("GIN index doesn't support search with void query")));
pgstat_count_index_scan(&scan->xs_pgstat_info);
}
Datum
-ginrescan(PG_FUNCTION_ARGS) {
+ginrescan(PG_FUNCTION_ARGS)
+{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
- GinScanOpaque so;
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
+ GinScanOpaque so;
so = (GinScanOpaque) scan->opaque;
- if ( so == NULL ) {
+ if (so == NULL)
+ {
/* if called from ginbeginscan */
- so = (GinScanOpaque)palloc( sizeof(GinScanOpaqueData) );
+ so = (GinScanOpaque) palloc(sizeof(GinScanOpaqueData));
so->tempCtx = AllocSetContextCreate(CurrentMemoryContext,
- "Gin scan temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "Gin scan temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
initGinState(&so->ginstate, scan->indexRelation);
scan->opaque = so;
- } else {
+ }
+ else
+ {
freeScanKeys(so->keys, so->nkeys, TRUE);
freeScanKeys(so->markPos, so->nkeys, FALSE);
}
- so->markPos=so->keys=NULL;
+ so->markPos = so->keys = NULL;
- if ( scankey && scan->numberOfKeys > 0 ) {
+ if (scankey && scan->numberOfKeys > 0)
+ {
memmove(scan->keyData, scankey,
- scan->numberOfKeys * sizeof(ScanKeyData));
+ scan->numberOfKeys * sizeof(ScanKeyData));
}
PG_RETURN_VOID();
@@ -202,13 +221,15 @@ ginrescan(PG_FUNCTION_ARGS) {
Datum
-ginendscan(PG_FUNCTION_ARGS) {
+ginendscan(PG_FUNCTION_ARGS)
+{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
- if ( so != NULL ) {
- freeScanKeys(so->keys, so->nkeys, TRUE);
- freeScanKeys(so->markPos, so->nkeys, FALSE);
+ if (so != NULL)
+ {
+ freeScanKeys(so->keys, so->nkeys, TRUE);
+ freeScanKeys(so->markPos, so->nkeys, FALSE);
MemoryContextDelete(so->tempCtx);
@@ -219,22 +240,28 @@ ginendscan(PG_FUNCTION_ARGS) {
}
static GinScanKey
-copyScanKeys( GinScanKey keys, uint32 nkeys ) {
+copyScanKeys(GinScanKey keys, uint32 nkeys)
+{
GinScanKey newkeys;
- uint32 i, j;
+ uint32 i,
+ j;
+
+ newkeys = (GinScanKey) palloc(sizeof(GinScanKeyData) * nkeys);
+ memcpy(newkeys, keys, sizeof(GinScanKeyData) * nkeys);
- newkeys = (GinScanKey)palloc( sizeof(GinScanKeyData) * nkeys );
- memcpy( newkeys, keys, sizeof(GinScanKeyData) * nkeys );
+ for (i = 0; i < nkeys; i++)
+ {
+ newkeys[i].scanEntry = (GinScanEntry) palloc(sizeof(GinScanEntryData) * keys[i].nentries);
+ memcpy(newkeys[i].scanEntry, keys[i].scanEntry, sizeof(GinScanEntryData) * keys[i].nentries);
- for(i=0;i<nkeys;i++) {
- newkeys[i].scanEntry = (GinScanEntry)palloc(sizeof(GinScanEntryData) * keys[i].nentries );
- memcpy( newkeys[i].scanEntry, keys[i].scanEntry, sizeof(GinScanEntryData) * keys[i].nentries );
+ for (j = 0; j < keys[i].nentries; j++)
+ {
+ if (keys[i].scanEntry[j].buffer != InvalidBuffer)
+ IncrBufferRefCount(keys[i].scanEntry[j].buffer);
+ if (keys[i].scanEntry[j].master)
+ {
+ int masterN = keys[i].scanEntry[j].master - keys[i].scanEntry;
- for(j=0;j<keys[i].nentries; j++) {
- if ( keys[i].scanEntry[j].buffer != InvalidBuffer )
- IncrBufferRefCount( keys[i].scanEntry[j].buffer );
- if ( keys[i].scanEntry[j].master ) {
- int masterN = keys[i].scanEntry[j].master - keys[i].scanEntry;
newkeys[i].scanEntry[j].master = newkeys[i].scanEntry + masterN;
}
}
@@ -243,24 +270,26 @@ copyScanKeys( GinScanKey keys, uint32 nkeys ) {
return newkeys;
}
-Datum
-ginmarkpos(PG_FUNCTION_ARGS) {
+Datum
+ginmarkpos(PG_FUNCTION_ARGS)
+{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
freeScanKeys(so->markPos, so->nkeys, FALSE);
- so->markPos = copyScanKeys( so->keys, so->nkeys );
+ so->markPos = copyScanKeys(so->keys, so->nkeys);
PG_RETURN_VOID();
}
-Datum
-ginrestrpos(PG_FUNCTION_ARGS) {
+Datum
+ginrestrpos(PG_FUNCTION_ARGS)
+{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
freeScanKeys(so->keys, so->nkeys, FALSE);
- so->keys = copyScanKeys( so->markPos, so->nkeys );
+ so->keys = copyScanKeys(so->markPos, so->nkeys);
PG_RETURN_VOID();
}
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 17eca3d239..e64137a106 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginutil.c
- * utilities routines for the postgres inverted index access method.
+ * utilities routines for the postgres inverted index access method.
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.6 2006/09/05 18:25:10 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.7 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -19,26 +19,27 @@
#include "access/reloptions.h"
#include "storage/freespace.h"
-void
-initGinState( GinState *state, Relation index ) {
- if ( index->rd_att->natts != 1 )
- elog(ERROR, "numberOfAttributes %d != 1",
- index->rd_att->natts);
-
+void
+initGinState(GinState *state, Relation index)
+{
+ if (index->rd_att->natts != 1)
+ elog(ERROR, "numberOfAttributes %d != 1",
+ index->rd_att->natts);
+
state->tupdesc = index->rd_att;
fmgr_info_copy(&(state->compareFn),
- index_getprocinfo(index, 1, GIN_COMPARE_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, 1, GIN_COMPARE_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->extractValueFn),
- index_getprocinfo(index, 1, GIN_EXTRACTVALUE_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, 1, GIN_EXTRACTVALUE_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->extractQueryFn),
- index_getprocinfo(index, 1, GIN_EXTRACTQUERY_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, 1, GIN_EXTRACTQUERY_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->consistentFn),
- index_getprocinfo(index, 1, GIN_CONSISTENT_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, 1, GIN_CONSISTENT_PROC),
+ CurrentMemoryContext);
}
/*
@@ -48,13 +49,16 @@ initGinState( GinState *state, Relation index ) {
*/
Buffer
-GinNewBuffer(Relation index) {
- Buffer buffer;
- bool needLock;
+GinNewBuffer(Relation index)
+{
+ Buffer buffer;
+ bool needLock;
/* First, try to get a page from FSM */
- for(;;) {
+ for (;;)
+ {
BlockNumber blkno = GetFreeIndexPage(&index->rd_node);
+
if (blkno == InvalidBlockNumber)
break;
@@ -64,14 +68,15 @@ GinNewBuffer(Relation index) {
* We have to guard against the possibility that someone else already
* recycled this page; the buffer may be locked if so.
*/
- if (ConditionalLockBuffer(buffer)) {
- Page page = BufferGetPage(buffer);
+ if (ConditionalLockBuffer(buffer))
+ {
+ Page page = BufferGetPage(buffer);
if (PageIsNew(page))
- return buffer; /* OK to use, if never initialized */
+ return buffer; /* OK to use, if never initialized */
if (GinPageIsDeleted(page))
- return buffer; /* OK to use */
+ return buffer; /* OK to use */
LockBuffer(buffer, GIN_UNLOCK);
}
@@ -95,36 +100,39 @@ GinNewBuffer(Relation index) {
}
void
-GinInitPage(Page page, uint32 f, Size pageSize) {
+GinInitPage(Page page, uint32 f, Size pageSize)
+{
GinPageOpaque opaque;
PageInit(page, pageSize, sizeof(GinPageOpaqueData));
opaque = GinPageGetOpaque(page);
- memset( opaque, 0, sizeof(GinPageOpaqueData) );
- opaque->flags = f;
+ memset(opaque, 0, sizeof(GinPageOpaqueData));
+ opaque->flags = f;
opaque->rightlink = InvalidBlockNumber;
}
void
-GinInitBuffer(Buffer b, uint32 f) {
- GinInitPage( BufferGetPage(b), f, BufferGetPageSize(b) );
+GinInitBuffer(Buffer b, uint32 f)
+{
+ GinInitPage(BufferGetPage(b), f, BufferGetPageSize(b));
}
int
-compareEntries(GinState *ginstate, Datum a, Datum b) {
+compareEntries(GinState *ginstate, Datum a, Datum b)
+{
return DatumGetInt32(
- FunctionCall2(
- &ginstate->compareFn,
- a, b
- )
+ FunctionCall2(
+ &ginstate->compareFn,
+ a, b
+ )
);
}
-static FmgrInfo* cmpDatumPtr=NULL;
+static FmgrInfo *cmpDatumPtr = NULL;
-#if defined(__INTEL_COMPILER) && (defined(__ia64__) || defined(__ia64))
-/*
+#if defined(__INTEL_COMPILER) && (defined(__ia64__) || defined(__ia64))
+/*
* Intel Compiler on Intel Itanium with -O2 has a bug around
* change static variable by user function called from
* libc func: it doesn't change. So mark it as volatile.
@@ -132,7 +140,7 @@ static FmgrInfo* cmpDatumPtr=NULL;
* It's a pity, but it's impossible to define optimization
* level here.
*/
-#define VOLATILE volatile
+#define VOLATILE volatile
#else
#define VOLATILE
#endif
@@ -140,57 +148,64 @@ static FmgrInfo* cmpDatumPtr=NULL;
static bool VOLATILE needUnique = FALSE;
static int
-cmpEntries(const void * a, const void * b) {
- int res = DatumGetInt32(
- FunctionCall2(
- cmpDatumPtr,
- *(Datum*)a,
- *(Datum*)b
- )
+cmpEntries(const void *a, const void *b)
+{
+ int res = DatumGetInt32(
+ FunctionCall2(
+ cmpDatumPtr,
+ *(Datum *) a,
+ *(Datum *) b
+ )
);
- if ( res == 0 )
+ if (res == 0)
needUnique = TRUE;
return res;
}
-Datum*
-extractEntriesS(GinState *ginstate, Datum value, uint32 *nentries) {
- Datum *entries;
+Datum *
+extractEntriesS(GinState *ginstate, Datum value, uint32 *nentries)
+{
+ Datum *entries;
- entries = (Datum*)DatumGetPointer(
- FunctionCall2(
- &ginstate->extractValueFn,
- value,
- PointerGetDatum( nentries )
- )
- );
+ entries = (Datum *) DatumGetPointer(
+ FunctionCall2(
+ &ginstate->extractValueFn,
+ value,
+ PointerGetDatum(nentries)
+ )
+ );
- if ( entries == NULL )
+ if (entries == NULL)
*nentries = 0;
- if ( *nentries > 1 ) {
+ if (*nentries > 1)
+ {
cmpDatumPtr = &ginstate->compareFn;
needUnique = FALSE;
- qsort(entries, *nentries, sizeof(Datum), cmpEntries);
+ qsort(entries, *nentries, sizeof(Datum), cmpEntries);
}
return entries;
}
-Datum*
-extractEntriesSU(GinState *ginstate, Datum value, uint32 *nentries) {
- Datum *entries = extractEntriesS(ginstate, value, nentries);
+Datum *
+extractEntriesSU(GinState *ginstate, Datum value, uint32 *nentries)
+{
+ Datum *entries = extractEntriesS(ginstate, value, nentries);
- if ( *nentries>1 && needUnique ) {
- Datum *ptr, *res;
+ if (*nentries > 1 && needUnique)
+ {
+ Datum *ptr,
+ *res;
ptr = res = entries;
- while( ptr - entries < *nentries ) {
- if ( compareEntries(ginstate, *ptr, *res ) != 0 )
+ while (ptr - entries < *nentries)
+ {
+ if (compareEntries(ginstate, *ptr, *res) != 0)
*(++res) = *ptr++;
else
ptr++;
@@ -206,13 +221,14 @@ extractEntriesSU(GinState *ginstate, Datum value, uint32 *nentries) {
* It's analog of PageGetTempPage(), but copies whole page
*/
Page
-GinPageGetCopyPage( Page page ) {
- Size pageSize = PageGetPageSize( page );
- Page tmppage;
+GinPageGetCopyPage(Page page)
+{
+ Size pageSize = PageGetPageSize(page);
+ Page tmppage;
+
+ tmppage = (Page) palloc(pageSize);
+ memcpy(tmppage, page, pageSize);
- tmppage=(Page)palloc( pageSize );
- memcpy( tmppage, page, pageSize );
-
return tmppage;
}
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 31e5f647f0..e0718862c5 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* ginvacuum.c
- * delete & vacuum routines for the postgres GIN
+ * delete & vacuum routines for the postgres GIN
*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.6 2006/09/21 20:31:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.7 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -21,42 +21,50 @@
#include "storage/freespace.h"
#include "commands/vacuum.h"
-typedef struct {
- Relation index;
- IndexBulkDeleteResult *result;
- IndexBulkDeleteCallback callback;
- void *callback_state;
- GinState ginstate;
+typedef struct
+{
+ Relation index;
+ IndexBulkDeleteResult *result;
+ IndexBulkDeleteCallback callback;
+ void *callback_state;
+ GinState ginstate;
} GinVacuumState;
/*
* Cleans array of ItemPointer (removes dead pointers)
* Results are always stored in *cleaned, which will be allocated
- * if its needed. In case of *cleaned!=NULL caller is resposible to
+ * if its needed. In case of *cleaned!=NULL caller is resposible to
* enough space. *cleaned and items may point to the same
* memory addres.
*/
static uint32
-ginVacuumPostingList( GinVacuumState *gvs, ItemPointerData *items, uint32 nitem, ItemPointerData **cleaned ) {
- uint32 i,j=0;
+ginVacuumPostingList(GinVacuumState *gvs, ItemPointerData *items, uint32 nitem, ItemPointerData **cleaned)
+{
+ uint32 i,
+ j = 0;
/*
* just scan over ItemPointer array
*/
- for(i=0;i<nitem;i++) {
- if ( gvs->callback(items+i, gvs->callback_state) ) {
+ for (i = 0; i < nitem; i++)
+ {
+ if (gvs->callback(items + i, gvs->callback_state))
+ {
gvs->result->tuples_removed += 1;
- if ( !*cleaned ) {
- *cleaned = (ItemPointerData*)palloc(sizeof(ItemPointerData)*nitem);
- if ( i!=0 )
- memcpy( *cleaned, items, sizeof(ItemPointerData)*i);
+ if (!*cleaned)
+ {
+ *cleaned = (ItemPointerData *) palloc(sizeof(ItemPointerData) * nitem);
+ if (i != 0)
+ memcpy(*cleaned, items, sizeof(ItemPointerData) * i);
}
- } else {
+ }
+ else
+ {
gvs->result->num_index_tuples += 1;
- if (i!=j)
+ if (i != j)
(*cleaned)[j] = items[i];
j++;
}
@@ -69,56 +77,65 @@ ginVacuumPostingList( GinVacuumState *gvs, ItemPointerData *items, uint32 nitem,
* fills WAL record for vacuum leaf page
*/
static void
-xlogVacuumPage(Relation index, Buffer buffer) {
- Page page = BufferGetPage( buffer );
- XLogRecPtr recptr;
+xlogVacuumPage(Relation index, Buffer buffer)
+{
+ Page page = BufferGetPage(buffer);
+ XLogRecPtr recptr;
XLogRecData rdata[3];
- ginxlogVacuumPage data;
- char *backup;
- char itups[BLCKSZ];
- uint32 len=0;
+ ginxlogVacuumPage data;
+ char *backup;
+ char itups[BLCKSZ];
+ uint32 len = 0;
- Assert( GinPageIsLeaf( page ) );
+ Assert(GinPageIsLeaf(page));
if (index->rd_istemp)
- return;
+ return;
data.node = index->rd_node;
data.blkno = BufferGetBlockNumber(buffer);
- if ( GinPageIsData( page ) ) {
- backup = GinDataPageGetData( page );
- data.nitem = GinPageGetOpaque( page )->maxoff;
- if ( data.nitem )
- len = MAXALIGN( sizeof(ItemPointerData)*data.nitem );
- } else {
- char *ptr;
+ if (GinPageIsData(page))
+ {
+ backup = GinDataPageGetData(page);
+ data.nitem = GinPageGetOpaque(page)->maxoff;
+ if (data.nitem)
+ len = MAXALIGN(sizeof(ItemPointerData) * data.nitem);
+ }
+ else
+ {
+ char *ptr;
OffsetNumber i;
ptr = backup = itups;
- for(i=FirstOffsetNumber;i<=PageGetMaxOffsetNumber(page);i++) {
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
- memcpy( ptr, itup, IndexTupleSize( itup ) );
- ptr += MAXALIGN( IndexTupleSize( itup ) );
+ for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i++)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
+
+ memcpy(ptr, itup, IndexTupleSize(itup));
+ ptr += MAXALIGN(IndexTupleSize(itup));
}
data.nitem = PageGetMaxOffsetNumber(page);
- len = ptr-backup;
+ len = ptr - backup;
}
rdata[0].buffer = buffer;
- rdata[0].buffer_std = ( GinPageIsData( page ) ) ? FALSE : TRUE;
+ rdata[0].buffer_std = (GinPageIsData(page)) ? FALSE : TRUE;
rdata[0].len = 0;
rdata[0].data = NULL;
rdata[0].next = rdata + 1;
rdata[1].buffer = InvalidBuffer;
rdata[1].len = sizeof(ginxlogVacuumPage);
- rdata[1].data = (char*)&data;
+ rdata[1].data = (char *) &data;
- if ( len == 0 ) {
+ if (len == 0)
+ {
rdata[1].next = NULL;
- } else {
+ }
+ else
+ {
rdata[1].next = rdata + 2;
rdata[2].buffer = InvalidBuffer;
@@ -133,71 +150,84 @@ xlogVacuumPage(Relation index, Buffer buffer) {
}
static bool
-ginVacuumPostingTreeLeaves( GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer ) {
- Buffer buffer = ReadBuffer( gvs->index, blkno );
- Page page = BufferGetPage( buffer );
- bool hasVoidPage = FALSE;
+ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer)
+{
+ Buffer buffer = ReadBuffer(gvs->index, blkno);
+ Page page = BufferGetPage(buffer);
+ bool hasVoidPage = FALSE;
- /*
+ /*
* We should be sure that we don't concurrent with inserts, insert process
- * never release root page until end (but it can unlock it and lock again).
- * If we lock root with with LockBufferForCleanup, new scan process can't begin,
- * but previous may run.
- * ginmarkpos/start* keeps buffer pinned, so we will wait for it.
- * We lock only one posting tree in whole index, so, it's concurrent enough..
- * Side effect: after this is full complete, tree is unused by any other process
+ * never release root page until end (but it can unlock it and lock
+ * again). If we lock root with with LockBufferForCleanup, new scan
+ * process can't begin, but previous may run. ginmarkpos/start* keeps
+ * buffer pinned, so we will wait for it. We lock only one posting tree in
+ * whole index, so, it's concurrent enough.. Side effect: after this is
+ * full complete, tree is unused by any other process
*/
- LockBufferForCleanup( buffer );
+ LockBufferForCleanup(buffer);
- Assert( GinPageIsData(page) );
+ Assert(GinPageIsData(page));
- if ( GinPageIsLeaf(page) ) {
- OffsetNumber newMaxOff, oldMaxOff = GinPageGetOpaque(page)->maxoff;
+ if (GinPageIsLeaf(page))
+ {
+ OffsetNumber newMaxOff,
+ oldMaxOff = GinPageGetOpaque(page)->maxoff;
ItemPointerData *cleaned = NULL;
- newMaxOff = ginVacuumPostingList( gvs,
- (ItemPointer)GinDataPageGetData(page), oldMaxOff, &cleaned );
+ newMaxOff = ginVacuumPostingList(gvs,
+ (ItemPointer) GinDataPageGetData(page), oldMaxOff, &cleaned);
/* saves changes about deleted tuple ... */
- if ( oldMaxOff != newMaxOff ) {
+ if (oldMaxOff != newMaxOff)
+ {
START_CRIT_SECTION();
- if ( newMaxOff > 0 )
- memcpy( GinDataPageGetData(page), cleaned, sizeof(ItemPointerData) * newMaxOff );
- pfree( cleaned );
+ if (newMaxOff > 0)
+ memcpy(GinDataPageGetData(page), cleaned, sizeof(ItemPointerData) * newMaxOff);
+ pfree(cleaned);
GinPageGetOpaque(page)->maxoff = newMaxOff;
- xlogVacuumPage(gvs->index, buffer);
+ xlogVacuumPage(gvs->index, buffer);
- MarkBufferDirty( buffer );
+ MarkBufferDirty(buffer);
END_CRIT_SECTION();
-
- /* if root is a leaf page, we don't desire futher processing */
- if ( !isRoot && GinPageGetOpaque(page)->maxoff < FirstOffsetNumber )
+
+ /* if root is a leaf page, we don't desire futher processing */
+ if (!isRoot && GinPageGetOpaque(page)->maxoff < FirstOffsetNumber)
hasVoidPage = TRUE;
}
- } else {
+ }
+ else
+ {
OffsetNumber i;
- bool isChildHasVoid = FALSE;
+ bool isChildHasVoid = FALSE;
+
+ for (i = FirstOffsetNumber; i <= GinPageGetOpaque(page)->maxoff; i++)
+ {
+ PostingItem *pitem = (PostingItem *) GinDataPageGetItem(page, i);
- for( i=FirstOffsetNumber ; i <= GinPageGetOpaque(page)->maxoff ; i++ ) {
- PostingItem *pitem = (PostingItem*)GinDataPageGetItem(page, i);
- if ( ginVacuumPostingTreeLeaves( gvs, PostingItemGetBlockNumber(pitem), FALSE, NULL ) )
+ if (ginVacuumPostingTreeLeaves(gvs, PostingItemGetBlockNumber(pitem), FALSE, NULL))
isChildHasVoid = TRUE;
}
- if ( isChildHasVoid )
+ if (isChildHasVoid)
hasVoidPage = TRUE;
}
- /* if we have root and theres void pages in tree, then we don't release lock
- to go further processing and guarantee that tree is unused */
- if ( !(isRoot && hasVoidPage) ) {
- UnlockReleaseBuffer( buffer );
- } else {
- Assert( rootBuffer );
+ /*
+ * if we have root and theres void pages in tree, then we don't release
+ * lock to go further processing and guarantee that tree is unused
+ */
+ if (!(isRoot && hasVoidPage))
+ {
+ UnlockReleaseBuffer(buffer);
+ }
+ else
+ {
+ Assert(rootBuffer);
*rootBuffer = buffer;
}
@@ -205,49 +235,54 @@ ginVacuumPostingTreeLeaves( GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
}
static void
-ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
- BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot ) {
- Buffer dBuffer = ReadBuffer( gvs->index, deleteBlkno );
- Buffer lBuffer = (leftBlkno==InvalidBlockNumber) ? InvalidBuffer : ReadBuffer( gvs->index, leftBlkno );
- Buffer pBuffer = ReadBuffer( gvs->index, parentBlkno );
- Page page, parentPage;
-
- LockBuffer( dBuffer, GIN_EXCLUSIVE );
- if ( !isParentRoot ) /* parent is already locked by LockBufferForCleanup() */
- LockBuffer( pBuffer, GIN_EXCLUSIVE );
+ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
+ BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
+{
+ Buffer dBuffer = ReadBuffer(gvs->index, deleteBlkno);
+ Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ? InvalidBuffer : ReadBuffer(gvs->index, leftBlkno);
+ Buffer pBuffer = ReadBuffer(gvs->index, parentBlkno);
+ Page page,
+ parentPage;
+
+ LockBuffer(dBuffer, GIN_EXCLUSIVE);
+ if (!isParentRoot) /* parent is already locked by
+ * LockBufferForCleanup() */
+ LockBuffer(pBuffer, GIN_EXCLUSIVE);
START_CRIT_SECTION();
- if ( leftBlkno!= InvalidBlockNumber ) {
+ if (leftBlkno != InvalidBlockNumber)
+ {
BlockNumber rightlink;
- LockBuffer( lBuffer, GIN_EXCLUSIVE );
+ LockBuffer(lBuffer, GIN_EXCLUSIVE);
- page = BufferGetPage( dBuffer );
+ page = BufferGetPage(dBuffer);
rightlink = GinPageGetOpaque(page)->rightlink;
- page = BufferGetPage( lBuffer );
+ page = BufferGetPage(lBuffer);
GinPageGetOpaque(page)->rightlink = rightlink;
}
- parentPage = BufferGetPage( pBuffer );
+ parentPage = BufferGetPage(pBuffer);
PageDeletePostingItem(parentPage, myoff);
- page = BufferGetPage( dBuffer );
+ page = BufferGetPage(dBuffer);
GinPageGetOpaque(page)->flags = GIN_DELETED;
- if (!gvs->index->rd_istemp) {
- XLogRecPtr recptr;
+ if (!gvs->index->rd_istemp)
+ {
+ XLogRecPtr recptr;
XLogRecData rdata[4];
- ginxlogDeletePage data;
- int n;
+ ginxlogDeletePage data;
+ int n;
data.node = gvs->index->rd_node;
data.blkno = deleteBlkno;
data.parentBlkno = parentBlkno;
data.parentOffset = myoff;
- data.leftBlkno = leftBlkno;
- data.rightLink = GinPageGetOpaque(page)->rightlink;
+ data.leftBlkno = leftBlkno;
+ data.rightLink = GinPageGetOpaque(page)->rightlink;
rdata[0].buffer = dBuffer;
rdata[0].buffer_std = FALSE;
@@ -261,20 +296,22 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
rdata[1].len = 0;
rdata[1].next = rdata + 2;
- if ( leftBlkno!= InvalidBlockNumber ) {
+ if (leftBlkno != InvalidBlockNumber)
+ {
rdata[2].buffer = lBuffer;
rdata[2].buffer_std = FALSE;
rdata[2].data = NULL;
rdata[2].len = 0;
rdata[2].next = rdata + 3;
n = 3;
- } else
+ }
+ else
n = 2;
rdata[n].buffer = InvalidBuffer;
rdata[n].buffer_std = FALSE;
rdata[n].len = sizeof(ginxlogDeletePage);
- rdata[n].data = (char*)&data;
+ rdata[n].data = (char *) &data;
rdata[n].next = NULL;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE, rdata);
@@ -282,122 +319,141 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
PageSetTLI(page, ThisTimeLineID);
PageSetLSN(parentPage, recptr);
PageSetTLI(parentPage, ThisTimeLineID);
- if ( leftBlkno!= InvalidBlockNumber ) {
- page = BufferGetPage( lBuffer );
+ if (leftBlkno != InvalidBlockNumber)
+ {
+ page = BufferGetPage(lBuffer);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
}
- MarkBufferDirty( pBuffer );
- if ( !isParentRoot )
- LockBuffer( pBuffer, GIN_UNLOCK );
- ReleaseBuffer( pBuffer );
+ MarkBufferDirty(pBuffer);
+ if (!isParentRoot)
+ LockBuffer(pBuffer, GIN_UNLOCK);
+ ReleaseBuffer(pBuffer);
- if ( leftBlkno!= InvalidBlockNumber ) {
- MarkBufferDirty( lBuffer );
- UnlockReleaseBuffer( lBuffer );
+ if (leftBlkno != InvalidBlockNumber)
+ {
+ MarkBufferDirty(lBuffer);
+ UnlockReleaseBuffer(lBuffer);
}
- MarkBufferDirty( dBuffer );
- UnlockReleaseBuffer( dBuffer );
+ MarkBufferDirty(dBuffer);
+ UnlockReleaseBuffer(dBuffer);
END_CRIT_SECTION();
gvs->result->pages_deleted++;
}
-typedef struct DataPageDeleteStack {
- struct DataPageDeleteStack *child;
- struct DataPageDeleteStack *parent;
+typedef struct DataPageDeleteStack
+{
+ struct DataPageDeleteStack *child;
+ struct DataPageDeleteStack *parent;
- BlockNumber blkno;
- bool isRoot;
+ BlockNumber blkno;
+ bool isRoot;
} DataPageDeleteStack;
/*
* scans posting tree and deletes empty pages
*/
static bool
-ginScanToDelete( GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff ) {
- DataPageDeleteStack *me;
- Buffer buffer;
- Page page;
- bool meDelete = FALSE;
-
- if ( isRoot ) {
+ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff)
+{
+ DataPageDeleteStack *me;
+ Buffer buffer;
+ Page page;
+ bool meDelete = FALSE;
+
+ if (isRoot)
+ {
me = parent;
- } else {
- if ( ! parent->child ) {
- me = (DataPageDeleteStack*)palloc0(sizeof(DataPageDeleteStack));
- me->parent=parent;
+ }
+ else
+ {
+ if (!parent->child)
+ {
+ me = (DataPageDeleteStack *) palloc0(sizeof(DataPageDeleteStack));
+ me->parent = parent;
parent->child = me;
me->blkno = InvalidBlockNumber;
- } else
+ }
+ else
me = parent->child;
}
- buffer = ReadBuffer( gvs->index, blkno );
- page = BufferGetPage( buffer );
+ buffer = ReadBuffer(gvs->index, blkno);
+ page = BufferGetPage(buffer);
- Assert( GinPageIsData(page) );
+ Assert(GinPageIsData(page));
- if ( !GinPageIsLeaf(page) ) {
+ if (!GinPageIsLeaf(page))
+ {
OffsetNumber i;
- for(i=FirstOffsetNumber;i<=GinPageGetOpaque(page)->maxoff;i++) {
- PostingItem *pitem = (PostingItem*)GinDataPageGetItem(page, i);
+ for (i = FirstOffsetNumber; i <= GinPageGetOpaque(page)->maxoff; i++)
+ {
+ PostingItem *pitem = (PostingItem *) GinDataPageGetItem(page, i);
- if ( ginScanToDelete( gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i ) )
+ if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i))
i--;
}
}
- if ( GinPageGetOpaque(page)->maxoff < FirstOffsetNumber ) {
- if ( !( me->blkno == InvalidBlockNumber && GinPageRightMost(page) ) ) {
+ if (GinPageGetOpaque(page)->maxoff < FirstOffsetNumber)
+ {
+ if (!(me->blkno == InvalidBlockNumber && GinPageRightMost(page)))
+ {
/* we never delete right most branch */
- Assert( !isRoot );
- if ( GinPageGetOpaque(page)->maxoff < FirstOffsetNumber ) {
- ginDeletePage( gvs, blkno, me->blkno, me->parent->blkno, myoff, me->parent->isRoot );
+ Assert(!isRoot);
+ if (GinPageGetOpaque(page)->maxoff < FirstOffsetNumber)
+ {
+ ginDeletePage(gvs, blkno, me->blkno, me->parent->blkno, myoff, me->parent->isRoot);
meDelete = TRUE;
}
}
}
- ReleaseBuffer( buffer );
+ ReleaseBuffer(buffer);
- if ( !meDelete )
+ if (!meDelete)
me->blkno = blkno;
return meDelete;
}
static void
-ginVacuumPostingTree( GinVacuumState *gvs, BlockNumber rootBlkno ) {
- Buffer rootBuffer = InvalidBuffer;
- DataPageDeleteStack root, *ptr, *tmp;
-
- if ( ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE, &rootBuffer)==FALSE ) {
- Assert( rootBuffer == InvalidBuffer );
+ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
+{
+ Buffer rootBuffer = InvalidBuffer;
+ DataPageDeleteStack root,
+ *ptr,
+ *tmp;
+
+ if (ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE, &rootBuffer) == FALSE)
+ {
+ Assert(rootBuffer == InvalidBuffer);
return;
}
- memset(&root,0,sizeof(DataPageDeleteStack));
+ memset(&root, 0, sizeof(DataPageDeleteStack));
root.blkno = rootBlkno;
root.isRoot = TRUE;
vacuum_delay_point();
- ginScanToDelete( gvs, rootBlkno, TRUE, &root, InvalidOffsetNumber );
+ ginScanToDelete(gvs, rootBlkno, TRUE, &root, InvalidOffsetNumber);
ptr = root.child;
- while( ptr ) {
+ while (ptr)
+ {
tmp = ptr->child;
- pfree( ptr );
+ pfree(ptr);
ptr = tmp;
}
- UnlockReleaseBuffer( rootBuffer );
+ UnlockReleaseBuffer(rootBuffer);
}
/*
@@ -406,48 +462,65 @@ ginVacuumPostingTree( GinVacuumState *gvs, BlockNumber rootBlkno ) {
* then page is copied into temprorary one.
*/
static Page
-ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint32 *nroot) {
- Page origpage = BufferGetPage( buffer ), tmppage;
- OffsetNumber i, maxoff = PageGetMaxOffsetNumber( origpage );
+ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint32 *nroot)
+{
+ Page origpage = BufferGetPage(buffer),
+ tmppage;
+ OffsetNumber i,
+ maxoff = PageGetMaxOffsetNumber(origpage);
tmppage = origpage;
- *nroot=0;
+ *nroot = 0;
- for(i=FirstOffsetNumber; i<= maxoff; i++) {
- IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
+ for (i = FirstOffsetNumber; i <= maxoff; i++)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
- if ( GinIsPostingTree(itup) ) {
- /* store posting tree's roots for further processing,
- we can't vacuum it just now due to risk of deadlocks with scans/inserts */
- roots[ *nroot ] = GinItemPointerGetBlockNumber(&itup->t_tid);
+ if (GinIsPostingTree(itup))
+ {
+ /*
+ * store posting tree's roots for further processing, we can't
+ * vacuum it just now due to risk of deadlocks with scans/inserts
+ */
+ roots[*nroot] = GinItemPointerGetBlockNumber(&itup->t_tid);
(*nroot)++;
- } else if ( GinGetNPosting(itup) > 0 ) {
- /* if we already create temrorary page, we will make changes in place */
- ItemPointerData *cleaned = (tmppage==origpage) ? NULL : GinGetPosting(itup );
- uint32 newN = ginVacuumPostingList( gvs, GinGetPosting(itup), GinGetNPosting(itup), &cleaned );
-
- if ( GinGetNPosting(itup) != newN ) {
- bool isnull;
- Datum value;
+ }
+ else if (GinGetNPosting(itup) > 0)
+ {
+ /*
+ * if we already create temrorary page, we will make changes in
+ * place
+ */
+ ItemPointerData *cleaned = (tmppage == origpage) ? NULL : GinGetPosting(itup);
+ uint32 newN = ginVacuumPostingList(gvs, GinGetPosting(itup), GinGetNPosting(itup), &cleaned);
+
+ if (GinGetNPosting(itup) != newN)
+ {
+ bool isnull;
+ Datum value;
/*
- * Some ItemPointers was deleted, so we should remake our tuple
+ * Some ItemPointers was deleted, so we should remake our
+ * tuple
*/
- if ( tmppage==origpage ) {
+ if (tmppage == origpage)
+ {
/*
* On first difference we create temprorary page in memory
* and copies content in to it.
*/
- tmppage=GinPageGetCopyPage ( origpage );
+ tmppage = GinPageGetCopyPage(origpage);
+
+ if (newN > 0)
+ {
+ Size pos = ((char *) GinGetPosting(itup)) - ((char *) origpage);
- if ( newN > 0 ) {
- Size pos = ((char*)GinGetPosting(itup)) - ((char*)origpage);
- memcpy( tmppage+pos, cleaned, sizeof(ItemPointerData)*newN );
+ memcpy(tmppage + pos, cleaned, sizeof(ItemPointerData) * newN);
}
- pfree( cleaned );
+ pfree(cleaned);
/* set itup pointer to new page */
itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
@@ -457,30 +530,31 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
itup = GinFormTuple(&gvs->ginstate, value, GinGetPosting(itup), newN);
PageIndexTupleDelete(tmppage, i);
- if ( PageAddItem( tmppage, (Item)itup, IndexTupleSize(itup), i, LP_USED ) != i )
- elog(ERROR, "failed to add item to index page in \"%s\"",
- RelationGetRelationName(gvs->index));
+ if (PageAddItem(tmppage, (Item) itup, IndexTupleSize(itup), i, LP_USED) != i)
+ elog(ERROR, "failed to add item to index page in \"%s\"",
+ RelationGetRelationName(gvs->index));
- pfree( itup );
+ pfree(itup);
}
}
}
- return ( tmppage==origpage ) ? NULL : tmppage;
+ return (tmppage == origpage) ? NULL : tmppage;
}
Datum
-ginbulkdelete(PG_FUNCTION_ARGS) {
+ginbulkdelete(PG_FUNCTION_ARGS)
+{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation index = info->index;
- BlockNumber blkno = GIN_ROOT_BLKNO;
- GinVacuumState gvs;
- Buffer buffer;
- BlockNumber rootOfPostingTree[ BLCKSZ/ (sizeof(IndexTupleData)+sizeof(ItemId)) ];
- uint32 nRoot;
+ BlockNumber blkno = GIN_ROOT_BLKNO;
+ GinVacuumState gvs;
+ Buffer buffer;
+ BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];
+ uint32 nRoot;
/* first time through? */
if (stats == NULL)
@@ -494,107 +568,117 @@ ginbulkdelete(PG_FUNCTION_ARGS) {
gvs.callback_state = callback_state;
initGinState(&gvs.ginstate, index);
- buffer = ReadBuffer( index, blkno );
+ buffer = ReadBuffer(index, blkno);
/* find leaf page */
- for(;;) {
- Page page = BufferGetPage( buffer );
- IndexTuple itup;
+ for (;;)
+ {
+ Page page = BufferGetPage(buffer);
+ IndexTuple itup;
- LockBuffer(buffer,GIN_SHARE);
+ LockBuffer(buffer, GIN_SHARE);
- Assert( !GinPageIsData(page) );
+ Assert(!GinPageIsData(page));
- if ( GinPageIsLeaf(page) ) {
- LockBuffer(buffer,GIN_UNLOCK);
- LockBuffer(buffer,GIN_EXCLUSIVE);
+ if (GinPageIsLeaf(page))
+ {
+ LockBuffer(buffer, GIN_UNLOCK);
+ LockBuffer(buffer, GIN_EXCLUSIVE);
- if ( blkno==GIN_ROOT_BLKNO && !GinPageIsLeaf(page) ) {
- LockBuffer(buffer,GIN_UNLOCK);
- continue; /* check it one more */
+ if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))
+ {
+ LockBuffer(buffer, GIN_UNLOCK);
+ continue; /* check it one more */
}
- break;
+ break;
}
- Assert( PageGetMaxOffsetNumber(page) >= FirstOffsetNumber );
+ Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));
blkno = GinItemPointerGetBlockNumber(&(itup)->t_tid);
- Assert( blkno!= InvalidBlockNumber );
+ Assert(blkno != InvalidBlockNumber);
- LockBuffer(buffer,GIN_UNLOCK);
- buffer = ReleaseAndReadBuffer( buffer, index, blkno );
+ LockBuffer(buffer, GIN_UNLOCK);
+ buffer = ReleaseAndReadBuffer(buffer, index, blkno);
}
/* right now we found leftmost page in entry's BTree */
- for(;;) {
- Page page = BufferGetPage( buffer );
- Page resPage;
- uint32 i;
+ for (;;)
+ {
+ Page page = BufferGetPage(buffer);
+ Page resPage;
+ uint32 i;
- Assert( !GinPageIsData(page) );
+ Assert(!GinPageIsData(page));
resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);
- blkno = GinPageGetOpaque( page )->rightlink;
+ blkno = GinPageGetOpaque(page)->rightlink;
- if ( resPage ) {
+ if (resPage)
+ {
START_CRIT_SECTION();
- PageRestoreTempPage( resPage, page );
- xlogVacuumPage(gvs.index, buffer);
- MarkBufferDirty( buffer );
+ PageRestoreTempPage(resPage, page);
+ xlogVacuumPage(gvs.index, buffer);
+ MarkBufferDirty(buffer);
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
- } else {
+ }
+ else
+ {
UnlockReleaseBuffer(buffer);
}
vacuum_delay_point();
- for(i=0; i<nRoot; i++) {
- ginVacuumPostingTree( &gvs, rootOfPostingTree[i] );
+ for (i = 0; i < nRoot; i++)
+ {
+ ginVacuumPostingTree(&gvs, rootOfPostingTree[i]);
vacuum_delay_point();
}
- if ( blkno==InvalidBlockNumber ) /*rightmost page*/
+ if (blkno == InvalidBlockNumber) /* rightmost page */
break;
- buffer = ReadBuffer( index, blkno );
- LockBuffer(buffer,GIN_EXCLUSIVE);
+ buffer = ReadBuffer(index, blkno);
+ LockBuffer(buffer, GIN_EXCLUSIVE);
}
PG_RETURN_POINTER(gvs.result);
}
-Datum
-ginvacuumcleanup(PG_FUNCTION_ARGS) {
+Datum
+ginvacuumcleanup(PG_FUNCTION_ARGS)
+{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
- Relation index = info->index;
- bool needLock;
- BlockNumber npages,
+ Relation index = info->index;
+ bool needLock;
+ BlockNumber npages,
blkno;
BlockNumber totFreePages,
nFreePages,
*freePages,
- maxFreePages;
+ maxFreePages;
BlockNumber lastBlock = GIN_ROOT_BLKNO,
- lastFilledBlock = GIN_ROOT_BLKNO;
+ lastFilledBlock = GIN_ROOT_BLKNO;
/* Set up all-zero stats if ginbulkdelete wasn't called */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+
/*
* XXX we always report the heap tuple count as the number of index
- * entries. This is bogus if the index is partial, but it's real hard
- * to tell how many distinct heap entries are referenced by a GIN index.
+ * entries. This is bogus if the index is partial, but it's real hard to
+ * tell how many distinct heap entries are referenced by a GIN index.
*/
stats->num_index_tuples = info->num_heap_tuples;
/*
- * If vacuum full, we already have exclusive lock on the index.
- * Otherwise, need lock unless it's local to this backend.
+ * If vacuum full, we already have exclusive lock on the index. Otherwise,
+ * need lock unless it's local to this backend.
*/
if (info->vacuum_full)
needLock = false;
@@ -614,32 +698,38 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) {
totFreePages = nFreePages = 0;
freePages = (BlockNumber *) palloc(sizeof(BlockNumber) * maxFreePages);
- for (blkno = GIN_ROOT_BLKNO + 1; blkno < npages; blkno++) {
- Buffer buffer;
- Page page;
+ for (blkno = GIN_ROOT_BLKNO + 1; blkno < npages; blkno++)
+ {
+ Buffer buffer;
+ Page page;
vacuum_delay_point();
-
+
buffer = ReadBuffer(index, blkno);
LockBuffer(buffer, GIN_SHARE);
page = (Page) BufferGetPage(buffer);
- if ( GinPageIsDeleted(page) ) {
+ if (GinPageIsDeleted(page))
+ {
if (nFreePages < maxFreePages)
freePages[nFreePages++] = blkno;
totFreePages++;
- } else
+ }
+ else
lastFilledBlock = blkno;
UnlockReleaseBuffer(buffer);
}
lastBlock = npages - 1;
- if (info->vacuum_full && nFreePages > 0) {
+ if (info->vacuum_full && nFreePages > 0)
+ {
/* try to truncate index */
- int i;
- for (i = 0; i < nFreePages; i++)
- if (freePages[i] >= lastFilledBlock) {
+ int i;
+
+ for (i = 0; i < nFreePages; i++)
+ if (freePages[i] >= lastFilledBlock)
+ {
totFreePages = nFreePages = i;
break;
}
@@ -661,4 +751,3 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(stats);
}
-
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index 265e7de70c..788f290b84 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.4 2006/08/07 16:57:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.5 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -17,12 +17,13 @@
#include "access/heapam.h"
#include "utils/memutils.h"
-static MemoryContext opCtx; /* working memory for operations */
+static MemoryContext opCtx; /* working memory for operations */
static MemoryContext topCtx;
-typedef struct ginIncompleteSplit {
- RelFileNode node;
- BlockNumber leftBlkno;
+typedef struct ginIncompleteSplit
+{
+ RelFileNode node;
+ BlockNumber leftBlkno;
BlockNumber rightBlkno;
BlockNumber rootBlkno;
} ginIncompleteSplit;
@@ -30,10 +31,11 @@ typedef struct ginIncompleteSplit {
static List *incomplete_splits;
static void
-pushIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber rightBlkno, BlockNumber rootBlkno) {
- ginIncompleteSplit *split;
+pushIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber rightBlkno, BlockNumber rootBlkno)
+{
+ ginIncompleteSplit *split;
- MemoryContextSwitchTo( topCtx );
+ MemoryContextSwitchTo(topCtx);
split = palloc(sizeof(ginIncompleteSplit));
@@ -44,17 +46,20 @@ pushIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber rightBl
incomplete_splits = lappend(incomplete_splits, split);
- MemoryContextSwitchTo( opCtx );
+ MemoryContextSwitchTo(opCtx);
}
static void
-forgetIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber updateBlkno) {
+forgetIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber updateBlkno)
+{
ListCell *l;
- foreach(l, incomplete_splits) {
- ginIncompleteSplit *split = (ginIncompleteSplit *) lfirst(l);
+ foreach(l, incomplete_splits)
+ {
+ ginIncompleteSplit *split = (ginIncompleteSplit *) lfirst(l);
- if ( RelFileNodeEquals(node, split->node) && leftBlkno == split->leftBlkno && updateBlkno == split->rightBlkno ) {
+ if (RelFileNodeEquals(node, split->node) && leftBlkno == split->leftBlkno && updateBlkno == split->rightBlkno)
+ {
incomplete_splits = list_delete_ptr(incomplete_splits, split);
break;
}
@@ -62,7 +67,8 @@ forgetIncompleteSplit(RelFileNode node, BlockNumber leftBlkno, BlockNumber updat
}
static void
-ginRedoCreateIndex(XLogRecPtr lsn, XLogRecord *record) {
+ginRedoCreateIndex(XLogRecPtr lsn, XLogRecord *record)
+{
RelFileNode *node = (RelFileNode *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
@@ -83,9 +89,10 @@ ginRedoCreateIndex(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-ginRedoCreatePTree(XLogRecPtr lsn, XLogRecord *record) {
- ginxlogCreatePostingTree *data = (ginxlogCreatePostingTree*)XLogRecGetData(record);
- ItemPointerData *items = (ItemPointerData*)(XLogRecGetData(record) + sizeof(ginxlogCreatePostingTree));
+ginRedoCreatePTree(XLogRecPtr lsn, XLogRecord *record)
+{
+ ginxlogCreatePostingTree *data = (ginxlogCreatePostingTree *) XLogRecGetData(record);
+ ItemPointerData *items = (ItemPointerData *) (XLogRecGetData(record) + sizeof(ginxlogCreatePostingTree));
Relation reln;
Buffer buffer;
Page page;
@@ -95,8 +102,8 @@ ginRedoCreatePTree(XLogRecPtr lsn, XLogRecord *record) {
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
- GinInitBuffer(buffer, GIN_DATA|GIN_LEAF);
- memcpy( GinDataPageGetData(page), items, sizeof(ItemPointerData) * data->nitem );
+ GinInitBuffer(buffer, GIN_DATA | GIN_LEAF);
+ memcpy(GinDataPageGetData(page), items, sizeof(ItemPointerData) * data->nitem);
GinPageGetOpaque(page)->maxoff = data->nitem;
PageSetLSN(page, lsn);
@@ -107,8 +114,9 @@ ginRedoCreatePTree(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) {
- ginxlogInsert *data = (ginxlogInsert*)XLogRecGetData(record);
+ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
+{
+ ginxlogInsert *data = (ginxlogInsert *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
Page page;
@@ -122,64 +130,73 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) {
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
- if ( data->isData ) {
- Assert( data->isDelete == FALSE );
- Assert( GinPageIsData( page ) );
+ if (data->isData)
+ {
+ Assert(data->isDelete == FALSE);
+ Assert(GinPageIsData(page));
- if ( data->isLeaf ) {
+ if (data->isLeaf)
+ {
OffsetNumber i;
- ItemPointerData *items = (ItemPointerData*)( XLogRecGetData(record) + sizeof(ginxlogInsert) );
+ ItemPointerData *items = (ItemPointerData *) (XLogRecGetData(record) + sizeof(ginxlogInsert));
- Assert( GinPageIsLeaf( page ) );
- Assert( data->updateBlkno == InvalidBlockNumber );
+ Assert(GinPageIsLeaf(page));
+ Assert(data->updateBlkno == InvalidBlockNumber);
- for(i=0;i<data->nitem;i++)
- GinDataPageAddItem( page, items+i, data->offset + i );
- } else {
+ for (i = 0; i < data->nitem; i++)
+ GinDataPageAddItem(page, items + i, data->offset + i);
+ }
+ else
+ {
PostingItem *pitem;
- Assert( !GinPageIsLeaf( page ) );
+ Assert(!GinPageIsLeaf(page));
- if ( data->updateBlkno != InvalidBlockNumber ) {
- /* update link to right page after split */
- pitem = (PostingItem*)GinDataPageGetItem(page, data->offset);
- PostingItemSetBlockNumber( pitem, data->updateBlkno );
+ if (data->updateBlkno != InvalidBlockNumber)
+ {
+ /* update link to right page after split */
+ pitem = (PostingItem *) GinDataPageGetItem(page, data->offset);
+ PostingItemSetBlockNumber(pitem, data->updateBlkno);
}
- pitem = (PostingItem*)( XLogRecGetData(record) + sizeof(ginxlogInsert) );
+ pitem = (PostingItem *) (XLogRecGetData(record) + sizeof(ginxlogInsert));
- GinDataPageAddItem( page, pitem, data->offset );
+ GinDataPageAddItem(page, pitem, data->offset);
- if ( data->updateBlkno != InvalidBlockNumber )
- forgetIncompleteSplit(data->node, PostingItemGetBlockNumber( pitem ), data->updateBlkno);
+ if (data->updateBlkno != InvalidBlockNumber)
+ forgetIncompleteSplit(data->node, PostingItemGetBlockNumber(pitem), data->updateBlkno);
}
- } else {
- IndexTuple itup;
+ }
+ else
+ {
+ IndexTuple itup;
- Assert( !GinPageIsData( page ) );
+ Assert(!GinPageIsData(page));
- if ( data->updateBlkno != InvalidBlockNumber ) {
- /* update link to right page after split */
- Assert( !GinPageIsLeaf( page ) );
- Assert( data->offset>=FirstOffsetNumber && data->offset<=PageGetMaxOffsetNumber(page) );
+ if (data->updateBlkno != InvalidBlockNumber)
+ {
+ /* update link to right page after split */
+ Assert(!GinPageIsLeaf(page));
+ Assert(data->offset >= FirstOffsetNumber && data->offset <= PageGetMaxOffsetNumber(page));
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, data->offset));
ItemPointerSet(&itup->t_tid, data->updateBlkno, InvalidOffsetNumber);
}
- if ( data->isDelete ) {
- Assert( GinPageIsLeaf( page ) );
- Assert( data->offset>=FirstOffsetNumber && data->offset<=PageGetMaxOffsetNumber(page) );
+ if (data->isDelete)
+ {
+ Assert(GinPageIsLeaf(page));
+ Assert(data->offset >= FirstOffsetNumber && data->offset <= PageGetMaxOffsetNumber(page));
PageIndexTupleDelete(page, data->offset);
}
- itup = (IndexTuple)( XLogRecGetData(record) + sizeof(ginxlogInsert) );
+ itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogInsert));
- if ( PageAddItem( page, (Item)itup, IndexTupleSize(itup), data->offset, LP_USED) == InvalidOffsetNumber )
- elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode );
+ if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, LP_USED) == InvalidOffsetNumber)
+ elog(ERROR, "failed to add item to index page in %u/%u/%u",
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
- if ( !data->isLeaf && data->updateBlkno != InvalidBlockNumber )
- forgetIncompleteSplit(data->node, GinItemPointerGetBlockNumber( &itup->t_tid ), data->updateBlkno);
+ if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
+ forgetIncompleteSplit(data->node, GinItemPointerGetBlockNumber(&itup->t_tid), data->updateBlkno);
}
PageSetLSN(page, lsn);
@@ -190,18 +207,21 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) {
- ginxlogSplit *data = (ginxlogSplit*)XLogRecGetData(record);
+ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
+{
+ ginxlogSplit *data = (ginxlogSplit *) XLogRecGetData(record);
Relation reln;
- Buffer lbuffer, rbuffer;
- Page lpage, rpage;
+ Buffer lbuffer,
+ rbuffer;
+ Page lpage,
+ rpage;
uint32 flags = 0;
reln = XLogOpenRelation(data->node);
- if ( data->isLeaf )
+ if (data->isLeaf)
flags |= GIN_LEAF;
- if ( data->isData )
+ if (data->isData)
flags |= GIN_DATA;
lbuffer = XLogReadBuffer(reln, data->lblkno, data->isRootSplit);
@@ -214,50 +234,57 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) {
rpage = (Page) BufferGetPage(rbuffer);
GinInitBuffer(rbuffer, flags);
- GinPageGetOpaque(lpage)->rightlink = BufferGetBlockNumber( rbuffer );
+ GinPageGetOpaque(lpage)->rightlink = BufferGetBlockNumber(rbuffer);
GinPageGetOpaque(rpage)->rightlink = data->rrlink;
- if ( data->isData ) {
- char *ptr = XLogRecGetData(record) + sizeof(ginxlogSplit);
- Size sizeofitem = GinSizeOfItem(lpage);
+ if (data->isData)
+ {
+ char *ptr = XLogRecGetData(record) + sizeof(ginxlogSplit);
+ Size sizeofitem = GinSizeOfItem(lpage);
OffsetNumber i;
- ItemPointer bound;
+ ItemPointer bound;
- for(i=0;i<data->separator;i++) {
- GinDataPageAddItem( lpage, ptr, InvalidOffsetNumber );
+ for (i = 0; i < data->separator; i++)
+ {
+ GinDataPageAddItem(lpage, ptr, InvalidOffsetNumber);
ptr += sizeofitem;
}
- for(i=data->separator;i<data->nitem;i++) {
- GinDataPageAddItem( rpage, ptr, InvalidOffsetNumber );
+ for (i = data->separator; i < data->nitem; i++)
+ {
+ GinDataPageAddItem(rpage, ptr, InvalidOffsetNumber);
ptr += sizeofitem;
}
/* set up right key */
bound = GinDataPageGetRightBound(lpage);
- if ( data->isLeaf )
- *bound = *(ItemPointerData*)GinDataPageGetItem(lpage, GinPageGetOpaque(lpage)->maxoff);
+ if (data->isLeaf)
+ *bound = *(ItemPointerData *) GinDataPageGetItem(lpage, GinPageGetOpaque(lpage)->maxoff);
else
- *bound = ((PostingItem*)GinDataPageGetItem(lpage, GinPageGetOpaque(lpage)->maxoff))->key;
+ *bound = ((PostingItem *) GinDataPageGetItem(lpage, GinPageGetOpaque(lpage)->maxoff))->key;
bound = GinDataPageGetRightBound(rpage);
*bound = data->rightbound;
- } else {
- IndexTuple itup = (IndexTuple)( XLogRecGetData(record) + sizeof(ginxlogSplit) );
+ }
+ else
+ {
+ IndexTuple itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogSplit));
OffsetNumber i;
- for(i=0;i<data->separator;i++) {
- if ( PageAddItem( lpage, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
- elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode );
- itup = (IndexTuple)( ((char*)itup) + MAXALIGN( IndexTupleSize(itup) ) );
+ for (i = 0; i < data->separator; i++)
+ {
+ if (PageAddItem(lpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ elog(ERROR, "failed to add item to index page in %u/%u/%u",
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
+ itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup)));
}
- for(i=data->separator;i<data->nitem;i++) {
- if ( PageAddItem( rpage, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
- elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode );
- itup = (IndexTuple)( ((char*)itup) + MAXALIGN( IndexTupleSize(itup) ) );
+ for (i = data->separator; i < data->nitem; i++)
+ {
+ if (PageAddItem(rpage, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ elog(ERROR, "failed to add item to index page in %u/%u/%u",
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
+ itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup)));
}
}
@@ -269,20 +296,24 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) {
PageSetTLI(lpage, ThisTimeLineID);
MarkBufferDirty(lbuffer);
- if ( !data->isLeaf && data->updateBlkno != InvalidBlockNumber )
+ if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
forgetIncompleteSplit(data->node, data->leftChildBlkno, data->updateBlkno);
- if ( data->isRootSplit ) {
- Buffer rootBuf = XLogReadBuffer(reln, data->rootBlkno, false);
- Page rootPage = BufferGetPage( rootBuf );
+ if (data->isRootSplit)
+ {
+ Buffer rootBuf = XLogReadBuffer(reln, data->rootBlkno, false);
+ Page rootPage = BufferGetPage(rootBuf);
- GinInitBuffer( rootBuf, flags & ~GIN_LEAF );
+ GinInitBuffer(rootBuf, flags & ~GIN_LEAF);
- if ( data->isData ) {
- Assert( data->rootBlkno != GIN_ROOT_BLKNO );
+ if (data->isData)
+ {
+ Assert(data->rootBlkno != GIN_ROOT_BLKNO);
dataFillRoot(NULL, rootBuf, lbuffer, rbuffer);
- } else {
- Assert( data->rootBlkno == GIN_ROOT_BLKNO );
+ }
+ else
+ {
+ Assert(data->rootBlkno == GIN_ROOT_BLKNO);
entryFillRoot(NULL, rootBuf, lbuffer, rbuffer);
}
@@ -291,7 +322,8 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) {
MarkBufferDirty(rootBuf);
UnlockReleaseBuffer(rootBuf);
- } else
+ }
+ else
pushIncompleteSplit(data->node, data->lblkno, data->rblkno, data->rootBlkno);
UnlockReleaseBuffer(rbuffer);
@@ -299,8 +331,9 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) {
- ginxlogVacuumPage *data = (ginxlogVacuumPage*)XLogRecGetData(record);
+ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record)
+{
+ ginxlogVacuumPage *data = (ginxlogVacuumPage *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
Page page;
@@ -314,25 +347,30 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) {
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
- if ( GinPageIsData( page ) ) {
- memcpy( GinDataPageGetData(page), XLogRecGetData(record) + sizeof(ginxlogVacuumPage),
- GinSizeOfItem(page) * data->nitem );
+ if (GinPageIsData(page))
+ {
+ memcpy(GinDataPageGetData(page), XLogRecGetData(record) + sizeof(ginxlogVacuumPage),
+ GinSizeOfItem(page) *data->nitem);
GinPageGetOpaque(page)->maxoff = data->nitem;
- } else {
- OffsetNumber i, *tod;
- IndexTuple itup = (IndexTuple)( XLogRecGetData(record) + sizeof(ginxlogVacuumPage) );
-
- tod = (OffsetNumber*)palloc( sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page) );
- for(i=FirstOffsetNumber;i<=PageGetMaxOffsetNumber(page);i++)
- tod[i-1] = i;
-
- PageIndexMultiDelete(page, tod, PageGetMaxOffsetNumber(page));
-
- for(i=0;i<data->nitem;i++) {
- if ( PageAddItem( page, (Item)itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber )
- elog(ERROR, "failed to add item to index page in %u/%u/%u",
- data->node.spcNode, data->node.dbNode, data->node.relNode );
- itup = (IndexTuple)( ((char*)itup) + MAXALIGN( IndexTupleSize(itup) ) );
+ }
+ else
+ {
+ OffsetNumber i,
+ *tod;
+ IndexTuple itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogVacuumPage));
+
+ tod = (OffsetNumber *) palloc(sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page));
+ for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i++)
+ tod[i - 1] = i;
+
+ PageIndexMultiDelete(page, tod, PageGetMaxOffsetNumber(page));
+
+ for (i = 0; i < data->nitem; i++)
+ {
+ if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ elog(ERROR, "failed to add item to index page in %u/%u/%u",
+ data->node.spcNode, data->node.dbNode, data->node.relNode);
+ itup = (IndexTuple) (((char *) itup) + MAXALIGN(IndexTupleSize(itup)));
}
}
@@ -344,17 +382,19 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) {
- ginxlogDeletePage *data = (ginxlogDeletePage*)XLogRecGetData(record);
+ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
+{
+ ginxlogDeletePage *data = (ginxlogDeletePage *) XLogRecGetData(record);
Relation reln;
Buffer buffer;
Page page;
reln = XLogOpenRelation(data->node);
- if ( !( record->xl_info & XLR_BKP_BLOCK_1) ) {
+ if (!(record->xl_info & XLR_BKP_BLOCK_1))
+ {
buffer = XLogReadBuffer(reln, data->blkno, false);
- page = BufferGetPage( buffer );
+ page = BufferGetPage(buffer);
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->flags = GIN_DELETED;
PageSetLSN(page, lsn);
@@ -363,9 +403,10 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) {
UnlockReleaseBuffer(buffer);
}
- if ( !( record->xl_info & XLR_BKP_BLOCK_2) ) {
+ if (!(record->xl_info & XLR_BKP_BLOCK_2))
+ {
buffer = XLogReadBuffer(reln, data->parentBlkno, false);
- page = BufferGetPage( buffer );
+ page = BufferGetPage(buffer);
Assert(GinPageIsData(page));
Assert(!GinPageIsLeaf(page));
PageDeletePostingItem(page, data->parentOffset);
@@ -375,9 +416,10 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) {
UnlockReleaseBuffer(buffer);
}
- if ( !( record->xl_info & XLR_BKP_BLOCK_2) && data->leftBlkno != InvalidBlockNumber ) {
+ if (!(record->xl_info & XLR_BKP_BLOCK_2) && data->leftBlkno != InvalidBlockNumber)
+ {
buffer = XLogReadBuffer(reln, data->leftBlkno, false);
- page = BufferGetPage( buffer );
+ page = BufferGetPage(buffer);
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->rightlink = data->rightLink;
PageSetLSN(page, lsn);
@@ -387,28 +429,30 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) {
}
}
-void
-gin_redo(XLogRecPtr lsn, XLogRecord *record) {
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+void
+gin_redo(XLogRecPtr lsn, XLogRecord *record)
+{
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
topCtx = MemoryContextSwitchTo(opCtx);
- switch (info) {
- case XLOG_GIN_CREATE_INDEX:
+ switch (info)
+ {
+ case XLOG_GIN_CREATE_INDEX:
ginRedoCreateIndex(lsn, record);
break;
- case XLOG_GIN_CREATE_PTREE:
+ case XLOG_GIN_CREATE_PTREE:
ginRedoCreatePTree(lsn, record);
break;
- case XLOG_GIN_INSERT:
+ case XLOG_GIN_INSERT:
ginRedoInsert(lsn, record);
break;
- case XLOG_GIN_SPLIT:
+ case XLOG_GIN_SPLIT:
ginRedoSplit(lsn, record);
break;
- case XLOG_GIN_VACUUM_PAGE:
+ case XLOG_GIN_VACUUM_PAGE:
ginRedoVacuumPage(lsn, record);
break;
- case XLOG_GIN_DELETE_PAGE:
+ case XLOG_GIN_DELETE_PAGE:
ginRedoDeletePage(lsn, record);
break;
default:
@@ -419,110 +463,122 @@ gin_redo(XLogRecPtr lsn, XLogRecord *record) {
}
static void
-desc_node( StringInfo buf, RelFileNode node, BlockNumber blkno ) {
- appendStringInfo(buf,"node: %u/%u/%u blkno: %u",
- node.spcNode, node.dbNode, node.relNode, blkno);
+desc_node(StringInfo buf, RelFileNode node, BlockNumber blkno)
+{
+ appendStringInfo(buf, "node: %u/%u/%u blkno: %u",
+ node.spcNode, node.dbNode, node.relNode, blkno);
}
-void
-gin_desc(StringInfo buf, uint8 xl_info, char *rec) {
- uint8 info = xl_info & ~XLR_INFO_MASK;
+void
+gin_desc(StringInfo buf, uint8 xl_info, char *rec)
+{
+ uint8 info = xl_info & ~XLR_INFO_MASK;
- switch (info) {
- case XLOG_GIN_CREATE_INDEX:
- appendStringInfo(buf,"Create index, ");
- desc_node(buf, *(RelFileNode*)rec, GIN_ROOT_BLKNO );
+ switch (info)
+ {
+ case XLOG_GIN_CREATE_INDEX:
+ appendStringInfo(buf, "Create index, ");
+ desc_node(buf, *(RelFileNode *) rec, GIN_ROOT_BLKNO);
break;
- case XLOG_GIN_CREATE_PTREE:
- appendStringInfo(buf,"Create posting tree, ");
- desc_node(buf, ((ginxlogCreatePostingTree*)rec)->node, ((ginxlogCreatePostingTree*)rec)->blkno );
+ case XLOG_GIN_CREATE_PTREE:
+ appendStringInfo(buf, "Create posting tree, ");
+ desc_node(buf, ((ginxlogCreatePostingTree *) rec)->node, ((ginxlogCreatePostingTree *) rec)->blkno);
break;
- case XLOG_GIN_INSERT:
- appendStringInfo(buf,"Insert item, ");
- desc_node(buf, ((ginxlogInsert*)rec)->node, ((ginxlogInsert*)rec)->blkno );
- appendStringInfo(buf," offset: %u nitem: %u isdata: %c isleaf %c isdelete %c updateBlkno:%u",
- ((ginxlogInsert*)rec)->offset,
- ((ginxlogInsert*)rec)->nitem,
- ( ((ginxlogInsert*)rec)->isData ) ? 'T' : 'F',
- ( ((ginxlogInsert*)rec)->isLeaf ) ? 'T' : 'F',
- ( ((ginxlogInsert*)rec)->isDelete ) ? 'T' : 'F',
- ((ginxlogInsert*)rec)->updateBlkno
- );
+ case XLOG_GIN_INSERT:
+ appendStringInfo(buf, "Insert item, ");
+ desc_node(buf, ((ginxlogInsert *) rec)->node, ((ginxlogInsert *) rec)->blkno);
+ appendStringInfo(buf, " offset: %u nitem: %u isdata: %c isleaf %c isdelete %c updateBlkno:%u",
+ ((ginxlogInsert *) rec)->offset,
+ ((ginxlogInsert *) rec)->nitem,
+ (((ginxlogInsert *) rec)->isData) ? 'T' : 'F',
+ (((ginxlogInsert *) rec)->isLeaf) ? 'T' : 'F',
+ (((ginxlogInsert *) rec)->isDelete) ? 'T' : 'F',
+ ((ginxlogInsert *) rec)->updateBlkno
+ );
break;
- case XLOG_GIN_SPLIT:
- appendStringInfo(buf,"Page split, ");
- desc_node(buf, ((ginxlogSplit*)rec)->node, ((ginxlogSplit*)rec)->lblkno );
- appendStringInfo(buf," isrootsplit: %c", ( ((ginxlogSplit*)rec)->isRootSplit ) ? 'T' : 'F');
+ case XLOG_GIN_SPLIT:
+ appendStringInfo(buf, "Page split, ");
+ desc_node(buf, ((ginxlogSplit *) rec)->node, ((ginxlogSplit *) rec)->lblkno);
+ appendStringInfo(buf, " isrootsplit: %c", (((ginxlogSplit *) rec)->isRootSplit) ? 'T' : 'F');
break;
- case XLOG_GIN_VACUUM_PAGE:
- appendStringInfo(buf,"Vacuum page, ");
- desc_node(buf, ((ginxlogVacuumPage*)rec)->node, ((ginxlogVacuumPage*)rec)->blkno );
+ case XLOG_GIN_VACUUM_PAGE:
+ appendStringInfo(buf, "Vacuum page, ");
+ desc_node(buf, ((ginxlogVacuumPage *) rec)->node, ((ginxlogVacuumPage *) rec)->blkno);
break;
- case XLOG_GIN_DELETE_PAGE:
- appendStringInfo(buf,"Delete page, ");
- desc_node(buf, ((ginxlogDeletePage*)rec)->node, ((ginxlogDeletePage*)rec)->blkno );
+ case XLOG_GIN_DELETE_PAGE:
+ appendStringInfo(buf, "Delete page, ");
+ desc_node(buf, ((ginxlogDeletePage *) rec)->node, ((ginxlogDeletePage *) rec)->blkno);
break;
default:
elog(PANIC, "gin_desc: unknown op code %u", info);
}
}
-void
-gin_xlog_startup(void) {
+void
+gin_xlog_startup(void)
+{
incomplete_splits = NIL;
opCtx = AllocSetContextCreate(CurrentMemoryContext,
- "GIN recovery temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "GIN recovery temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
static void
-ginContinueSplit( ginIncompleteSplit *split ) {
+ginContinueSplit(ginIncompleteSplit *split)
+{
GinBtreeData btree;
Relation reln;
Buffer buffer;
- GinBtreeStack stack;
+ GinBtreeStack stack;
- /* elog(NOTICE,"ginContinueSplit root:%u l:%u r:%u", split->rootBlkno, split->leftBlkno, split->rightBlkno); */
+ /*
+ * elog(NOTICE,"ginContinueSplit root:%u l:%u r:%u", split->rootBlkno,
+ * split->leftBlkno, split->rightBlkno);
+ */
reln = XLogOpenRelation(split->node);
- buffer = XLogReadBuffer(reln, split->leftBlkno, false);
+ buffer = XLogReadBuffer(reln, split->leftBlkno, false);
- if ( split->rootBlkno == GIN_ROOT_BLKNO ) {
- prepareEntryScan( &btree, reln, (Datum)0, NULL );
- btree.entry = ginPageGetLinkItup( buffer );
- } else {
- Page page = BufferGetPage( buffer );
+ if (split->rootBlkno == GIN_ROOT_BLKNO)
+ {
+ prepareEntryScan(&btree, reln, (Datum) 0, NULL);
+ btree.entry = ginPageGetLinkItup(buffer);
+ }
+ else
+ {
+ Page page = BufferGetPage(buffer);
- prepareDataScan( &btree, reln );
+ prepareDataScan(&btree, reln);
- PostingItemSetBlockNumber( &(btree.pitem), split->leftBlkno );
- if ( GinPageIsLeaf(page) )
- btree.pitem.key = *(ItemPointerData*)GinDataPageGetItem(page,
- GinPageGetOpaque(page)->maxoff);
+ PostingItemSetBlockNumber(&(btree.pitem), split->leftBlkno);
+ if (GinPageIsLeaf(page))
+ btree.pitem.key = *(ItemPointerData *) GinDataPageGetItem(page,
+ GinPageGetOpaque(page)->maxoff);
else
- btree.pitem.key = ((PostingItem*)GinDataPageGetItem(page,
- GinPageGetOpaque(page)->maxoff))->key;
+ btree.pitem.key = ((PostingItem *) GinDataPageGetItem(page,
+ GinPageGetOpaque(page)->maxoff))->key;
}
- btree.rightblkno = split->rightBlkno;
+ btree.rightblkno = split->rightBlkno;
stack.blkno = split->leftBlkno;
stack.buffer = buffer;
stack.off = InvalidOffsetNumber;
stack.parent = NULL;
- findParents( &btree, &stack, split->rootBlkno);
- ginInsertValue( &btree, stack.parent );
+ findParents(&btree, &stack, split->rootBlkno);
+ ginInsertValue(&btree, stack.parent);
- UnlockReleaseBuffer( buffer );
+ UnlockReleaseBuffer(buffer);
}
-void
-gin_xlog_cleanup(void) {
+void
+gin_xlog_cleanup(void)
+{
ListCell *l;
MemoryContext topCtx;
@@ -531,8 +587,9 @@ gin_xlog_cleanup(void) {
foreach(l, incomplete_splits)
{
ginIncompleteSplit *split = (ginIncompleteSplit *) lfirst(l);
- ginContinueSplit( split );
- MemoryContextReset( opCtx );
+
+ ginContinueSplit(split);
+ MemoryContextReset(opCtx);
}
MemoryContextSwitchTo(topCtx);
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 7b0c0f6b38..60d0affbfc 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.142 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.143 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -185,7 +185,7 @@ gistbuildCallback(Relation index,
/* form an index tuple and point it at the heap tuple */
itup = gistFormTuple(&buildstate->giststate, index,
- values, isnull, true /* size is currently bogus */);
+ values, isnull, true /* size is currently bogus */ );
itup->t_tid = htup->t_self;
/*
@@ -199,7 +199,7 @@ gistbuildCallback(Relation index,
* after initial build do not.
*/
gistdoinsert(index, itup,
- RelationGetTargetPageFreeSpace(index, GIST_DEFAULT_FILLFACTOR),
+ RelationGetTargetPageFreeSpace(index, GIST_DEFAULT_FILLFACTOR),
&buildstate->giststate);
buildstate->indtuples += 1;
@@ -236,7 +236,7 @@ gistinsert(PG_FUNCTION_ARGS)
initGISTstate(&giststate, r);
itup = gistFormTuple(&giststate, r,
- values, isnull, true /* size is currently bogus */);
+ values, isnull, true /* size is currently bogus */ );
itup->t_tid = *ht_ctid;
gistdoinsert(r, itup, 0, &giststate);
@@ -285,18 +285,17 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
bool is_leaf = (GistPageIsLeaf(state->stack->page)) ? true : false;
/*
- * if (!is_leaf) remove old key:
- * This node's key has been modified, either because a child split
- * occurred or because we needed to adjust our key for an insert in a
- * child node. Therefore, remove the old version of this node's key.
+ * if (!is_leaf) remove old key: This node's key has been modified, either
+ * because a child split occurred or because we needed to adjust our key
+ * for an insert in a child node. Therefore, remove the old version of
+ * this node's key.
*
- * for WAL replay, in the non-split case we handle this by
- * setting up a one-element todelete array; in the split case, it's
- * handled implicitly because the tuple vector passed to gistSplit
- * won't include this tuple.
+ * for WAL replay, in the non-split case we handle this by setting up a
+ * one-element todelete array; in the split case, it's handled implicitly
+ * because the tuple vector passed to gistSplit won't include this tuple.
*
- * XXX: If we want to change fillfactors between node and leaf,
- * fillfactor = (is_leaf ? state->leaf_fillfactor : state->node_fillfactor)
+ * XXX: If we want to change fillfactors between node and leaf, fillfactor
+ * = (is_leaf ? state->leaf_fillfactor : state->node_fillfactor)
*/
if (gistnospace(state->stack->page, state->itup, state->ituplen,
is_leaf ? InvalidOffsetNumber : state->stack->childoffnum,
@@ -307,80 +306,88 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
int tlen;
SplitedPageLayout *dist = NULL,
*ptr;
- BlockNumber rrlink = InvalidBlockNumber;
+ BlockNumber rrlink = InvalidBlockNumber;
GistNSN oldnsn;
is_splitted = true;
/*
- * Form index tuples vector to split:
- * remove old tuple if t's needed and add new tuples to vector
+ * Form index tuples vector to split: remove old tuple if t's needed
+ * and add new tuples to vector
*/
itvec = gistextractpage(state->stack->page, &tlen);
- if ( !is_leaf ) {
+ if (!is_leaf)
+ {
/* on inner page we should remove old tuple */
- int pos = state->stack->childoffnum - FirstOffsetNumber;
+ int pos = state->stack->childoffnum - FirstOffsetNumber;
- tlen--;
- if ( pos != tlen )
- memmove( itvec+pos, itvec + pos + 1, sizeof( IndexTuple ) * (tlen-pos) );
+ tlen--;
+ if (pos != tlen)
+ memmove(itvec + pos, itvec + pos + 1, sizeof(IndexTuple) * (tlen - pos));
}
itvec = gistjoinvector(itvec, &tlen, state->itup, state->ituplen);
dist = gistSplit(state->r, state->stack->page, itvec, tlen, giststate);
- state->itup = (IndexTuple*)palloc( sizeof(IndexTuple) * tlen);
+ state->itup = (IndexTuple *) palloc(sizeof(IndexTuple) * tlen);
state->ituplen = 0;
- if (state->stack->blkno != GIST_ROOT_BLKNO) {
- /* if non-root split then we should not allocate new buffer,
- but we must create temporary page to operate */
+ if (state->stack->blkno != GIST_ROOT_BLKNO)
+ {
+ /*
+ * if non-root split then we should not allocate new buffer, but
+ * we must create temporary page to operate
+ */
dist->buffer = state->stack->buffer;
- dist->page = PageGetTempPage( BufferGetPage(dist->buffer), sizeof(GISTPageOpaqueData) );
+ dist->page = PageGetTempPage(BufferGetPage(dist->buffer), sizeof(GISTPageOpaqueData));
- /*clean all flags except F_LEAF */
+ /* clean all flags except F_LEAF */
GistPageGetOpaque(dist->page)->flags = (is_leaf) ? F_LEAF : 0;
}
/* make new pages and fills them */
- for (ptr = dist; ptr; ptr = ptr->next) {
- int i;
- char *data;
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
+ int i;
+ char *data;
/* get new page */
- if ( ptr->buffer == InvalidBuffer ) {
- ptr->buffer = gistNewBuffer( state->r );
- GISTInitBuffer( ptr->buffer, (is_leaf) ? F_LEAF : 0 );
+ if (ptr->buffer == InvalidBuffer)
+ {
+ ptr->buffer = gistNewBuffer(state->r);
+ GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0);
ptr->page = BufferGetPage(ptr->buffer);
}
- ptr->block.blkno = BufferGetBlockNumber( ptr->buffer );
+ ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
- /* fill page, we can do it becouse all this pages are new (ie not linked in tree
- or masked by temp page */
- data = (char*)(ptr->list);
- for(i=0;i<ptr->block.num;i++) {
- if ( PageAddItem(ptr->page, (Item)data, IndexTupleSize((IndexTuple)data), i+FirstOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ /*
+ * fill page, we can do it becouse all this pages are new (ie not
+ * linked in tree or masked by temp page
+ */
+ data = (char *) (ptr->list);
+ for (i = 0; i < ptr->block.num; i++)
+ {
+ if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(state->r));
- data += IndexTupleSize((IndexTuple)data);
+ data += IndexTupleSize((IndexTuple) data);
}
/* set up ItemPointer and remmeber it for parent */
ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
- state->itup[ state->ituplen ] = ptr->itup;
+ state->itup[state->ituplen] = ptr->itup;
state->ituplen++;
}
/* saves old rightlink */
- if ( state->stack->blkno != GIST_ROOT_BLKNO )
- rrlink = GistPageGetOpaque(dist->page)->rightlink;
+ if (state->stack->blkno != GIST_ROOT_BLKNO)
+ rrlink = GistPageGetOpaque(dist->page)->rightlink;
START_CRIT_SECTION();
/*
- * must mark buffers dirty before XLogInsert, even though we'll
- * still be changing their opaque fields below.
- * set up right links.
+ * must mark buffers dirty before XLogInsert, even though we'll still
+ * be changing their opaque fields below. set up right links.
*/
- for (ptr = dist; ptr; ptr = ptr->next)
+ for (ptr = dist; ptr; ptr = ptr->next)
{
MarkBufferDirty(ptr->buffer);
GistPageGetOpaque(ptr->page)->rightlink = (ptr->next) ?
@@ -388,9 +395,10 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
}
/* restore splitted non-root page */
- if ( state->stack->blkno != GIST_ROOT_BLKNO ) {
- PageRestoreTempPage( dist->page, BufferGetPage( dist->buffer ) );
- dist->page = BufferGetPage( dist->buffer );
+ if (state->stack->blkno != GIST_ROOT_BLKNO)
+ {
+ PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
+ dist->page = BufferGetPage(dist->buffer);
}
if (!state->r->rd_istemp)
@@ -419,25 +427,27 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
/* set up NSN */
oldnsn = GistPageGetOpaque(dist->page)->nsn;
- if ( state->stack->blkno == GIST_ROOT_BLKNO )
+ if (state->stack->blkno == GIST_ROOT_BLKNO)
/* if root split we should put initial value */
oldnsn = PageGetLSN(dist->page);
- for (ptr = dist; ptr; ptr = ptr->next) {
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
/* only for last set oldnsn */
GistPageGetOpaque(ptr->page)->nsn = (ptr->next) ?
PageGetLSN(ptr->page) : oldnsn;
}
- /*
- * release buffers, if it was a root split then
- * release all buffers because we create all buffers
+ /*
+ * release buffers, if it was a root split then release all buffers
+ * because we create all buffers
*/
- ptr = ( state->stack->blkno == GIST_ROOT_BLKNO ) ? dist : dist->next;
- for(; ptr; ptr = ptr->next)
+ ptr = (state->stack->blkno == GIST_ROOT_BLKNO) ? dist : dist->next;
+ for (; ptr; ptr = ptr->next)
UnlockReleaseBuffer(ptr->buffer);
- if (state->stack->blkno == GIST_ROOT_BLKNO) {
+ if (state->stack->blkno == GIST_ROOT_BLKNO)
+ {
gistnewroot(state->r, state->stack->buffer, state->itup, state->ituplen, &(state->key));
state->needInsertComplete = false;
}
@@ -470,7 +480,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
}
rdata = formUpdateRdata(state->r->rd_node, state->stack->buffer,
- offs, noffs,
+ offs, noffs,
state->itup, state->ituplen,
&(state->key));
@@ -922,16 +932,16 @@ gistSplit(Relation r,
GistSplitVector v;
GistEntryVector *entryvec;
int i;
- SplitedPageLayout *res = NULL;
+ SplitedPageLayout *res = NULL;
/* generate the item array */
entryvec = palloc(GEVHDRSZ + (len + 1) * sizeof(GISTENTRY));
entryvec->n = len + 1;
- memset( v.spl_lisnull, TRUE, sizeof(bool) * giststate->tupdesc->natts );
- memset( v.spl_risnull, TRUE, sizeof(bool) * giststate->tupdesc->natts );
- gistSplitByKey(r, page, itup, len, giststate,
- &v, entryvec, 0);
+ memset(v.spl_lisnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
+ memset(v.spl_risnull, TRUE, sizeof(bool) * giststate->tupdesc->natts);
+ gistSplitByKey(r, page, itup, len, giststate,
+ &v, entryvec, 0);
/* form left and right vector */
lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
@@ -952,19 +962,20 @@ gistSplit(Relation r,
{
ROTATEDIST(res);
res->block.num = v.splitVector.spl_nright;
- res->list = gistfillitupvec(rvectup, v.splitVector.spl_nright, &( res->lenlist ) );
+ res->list = gistfillitupvec(rvectup, v.splitVector.spl_nright, &(res->lenlist));
res->itup = (v.spl_rightvalid) ? gistFormTuple(giststate, r, v.spl_rattr, v.spl_risnull, false)
: gist_form_invalid_tuple(GIST_ROOT_BLKNO);
}
if (!gistfitpage(lvectup, v.splitVector.spl_nleft))
{
- SplitedPageLayout *resptr, *subres;
+ SplitedPageLayout *resptr,
+ *subres;
resptr = subres = gistSplit(r, page, lvectup, v.splitVector.spl_nleft, giststate);
- /* install on list's tail */
- while( resptr->next )
+ /* install on list's tail */
+ while (resptr->next)
resptr = resptr->next;
resptr->next = res;
@@ -974,7 +985,7 @@ gistSplit(Relation r,
{
ROTATEDIST(res);
res->block.num = v.splitVector.spl_nleft;
- res->list = gistfillitupvec(lvectup, v.splitVector.spl_nleft, &( res->lenlist ) );
+ res->list = gistfillitupvec(lvectup, v.splitVector.spl_nleft, &(res->lenlist));
res->itup = (v.spl_leftvalid) ? gistFormTuple(giststate, r, v.spl_lattr, v.spl_lisnull, false)
: gist_form_invalid_tuple(GIST_ROOT_BLKNO);
}
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index ed41f2c335..68a4c18bb8 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.60 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.61 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -360,8 +360,7 @@ gistindex_keytest(IndexTuple tuple,
IncrIndexProcessed();
/*
- * Tuple doesn't restore after crash recovery because of incomplete
- * insert
+ * Tuple doesn't restore after crash recovery because of incomplete insert
*/
if (!GistPageIsLeaf(p) && GistTupleIsInvalid(tuple))
return true;
@@ -378,14 +377,18 @@ gistindex_keytest(IndexTuple tuple,
giststate->tupdesc,
&isNull);
- if ( key->sk_flags & SK_ISNULL ) {
- /* is the compared-to datum NULL? on non-leaf page it's possible
- to have nulls in childs :( */
+ if (key->sk_flags & SK_ISNULL)
+ {
+ /*
+ * is the compared-to datum NULL? on non-leaf page it's possible
+ * to have nulls in childs :(
+ */
- if ( isNull || !GistPageIsLeaf(p) )
+ if (isNull || !GistPageIsLeaf(p))
return true;
return false;
- } else if ( isNull )
+ }
+ else if (isNull)
return false;
gistdentryinit(giststate, key->sk_attno - 1, &de,
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index ccd54bac12..5c021461ee 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.8 2006/09/10 00:29:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.9 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -112,7 +112,8 @@ gist_box_consistent(PG_FUNCTION_ARGS)
}
static void
-adjustBox( BOX *b, BOX *addon ) {
+adjustBox(BOX *b, BOX *addon)
+{
if (b->high.x < addon->high.x)
b->high.x = addon->high.x;
if (b->low.x > addon->low.x)
@@ -146,7 +147,7 @@ gist_box_union(PG_FUNCTION_ARGS)
for (i = 1; i < numranges; i++)
{
cur = DatumGetBoxP(entryvec->vector[i].key);
- adjustBox( pageunion, cur );
+ adjustBox(pageunion, cur);
}
*sizep = sizeof(BOX);
@@ -210,67 +211,79 @@ compare_KB(const void *a, const void *b)
}
static void
-chooseLR( GIST_SPLITVEC *v,
- OffsetNumber *list1, int nlist1, BOX *union1,
- OffsetNumber *list2, int nlist2, BOX *union2 )
+chooseLR(GIST_SPLITVEC *v,
+ OffsetNumber *list1, int nlist1, BOX *union1,
+ OffsetNumber *list2, int nlist2, BOX *union2)
{
- bool firstToLeft = true;
-
- if ( v->spl_ldatum_exists || v->spl_rdatum_exists ) {
- if ( v->spl_ldatum_exists && v->spl_rdatum_exists ) {
- BOX LRl = *union1, LRr = *union2;
- BOX RLl = *union2, RLr = *union1;
- double sizeLR, sizeRL;
-
- adjustBox( &LRl, DatumGetBoxP( v->spl_ldatum ) );
- adjustBox( &LRr, DatumGetBoxP( v->spl_rdatum ) );
- adjustBox( &RLl, DatumGetBoxP( v->spl_ldatum ) );
- adjustBox( &RLr, DatumGetBoxP( v->spl_rdatum ) );
-
- sizeLR = size_box( DirectFunctionCall2(rt_box_inter, BoxPGetDatum(&LRl), BoxPGetDatum(&LRr)) );
- sizeRL = size_box( DirectFunctionCall2(rt_box_inter, BoxPGetDatum(&RLl), BoxPGetDatum(&RLr)) );
+ bool firstToLeft = true;
- if ( sizeLR > sizeRL )
+ if (v->spl_ldatum_exists || v->spl_rdatum_exists)
+ {
+ if (v->spl_ldatum_exists && v->spl_rdatum_exists)
+ {
+ BOX LRl = *union1,
+ LRr = *union2;
+ BOX RLl = *union2,
+ RLr = *union1;
+ double sizeLR,
+ sizeRL;
+
+ adjustBox(&LRl, DatumGetBoxP(v->spl_ldatum));
+ adjustBox(&LRr, DatumGetBoxP(v->spl_rdatum));
+ adjustBox(&RLl, DatumGetBoxP(v->spl_ldatum));
+ adjustBox(&RLr, DatumGetBoxP(v->spl_rdatum));
+
+ sizeLR = size_box(DirectFunctionCall2(rt_box_inter, BoxPGetDatum(&LRl), BoxPGetDatum(&LRr)));
+ sizeRL = size_box(DirectFunctionCall2(rt_box_inter, BoxPGetDatum(&RLl), BoxPGetDatum(&RLr)));
+
+ if (sizeLR > sizeRL)
firstToLeft = false;
- } else {
- float p1, p2;
- GISTENTRY oldUnion, addon;
+ }
+ else
+ {
+ float p1,
+ p2;
+ GISTENTRY oldUnion,
+ addon;
- gistentryinit(oldUnion, ( v->spl_ldatum_exists ) ? v->spl_ldatum : v->spl_rdatum,
+ gistentryinit(oldUnion, (v->spl_ldatum_exists) ? v->spl_ldatum : v->spl_rdatum,
NULL, NULL, InvalidOffsetNumber, FALSE);
-
+
gistentryinit(addon, BoxPGetDatum(union1), NULL, NULL, InvalidOffsetNumber, FALSE);
- DirectFunctionCall3(gist_box_penalty, PointerGetDatum(&oldUnion), PointerGetDatum(&union1), PointerGetDatum(&p1));
+ DirectFunctionCall3(gist_box_penalty, PointerGetDatum(&oldUnion), PointerGetDatum(&union1), PointerGetDatum(&p1));
gistentryinit(addon, BoxPGetDatum(union2), NULL, NULL, InvalidOffsetNumber, FALSE);
DirectFunctionCall3(gist_box_penalty, PointerGetDatum(&oldUnion), PointerGetDatum(&union2), PointerGetDatum(&p2));
- if ( (v->spl_ldatum_exists && p1 > p2) || (v->spl_rdatum_exists && p1 < p2) )
- firstToLeft = false;
+ if ((v->spl_ldatum_exists && p1 > p2) || (v->spl_rdatum_exists && p1 < p2))
+ firstToLeft = false;
}
}
- if ( firstToLeft ) {
+ if (firstToLeft)
+ {
v->spl_left = list1;
v->spl_right = list2;
v->spl_nleft = nlist1;
v->spl_nright = nlist2;
- if ( v->spl_ldatum_exists )
- adjustBox(union1, DatumGetBoxP( v->spl_ldatum ) );
+ if (v->spl_ldatum_exists)
+ adjustBox(union1, DatumGetBoxP(v->spl_ldatum));
v->spl_ldatum = BoxPGetDatum(union1);
- if ( v->spl_rdatum_exists )
- adjustBox(union2, DatumGetBoxP( v->spl_rdatum ) );
+ if (v->spl_rdatum_exists)
+ adjustBox(union2, DatumGetBoxP(v->spl_rdatum));
v->spl_rdatum = BoxPGetDatum(union2);
- } else {
+ }
+ else
+ {
v->spl_left = list2;
v->spl_right = list1;
v->spl_nleft = nlist2;
v->spl_nright = nlist1;
- if ( v->spl_ldatum_exists )
- adjustBox(union2, DatumGetBoxP( v->spl_ldatum ) );
+ if (v->spl_ldatum_exists)
+ adjustBox(union2, DatumGetBoxP(v->spl_ldatum));
v->spl_ldatum = BoxPGetDatum(union2);
- if ( v->spl_rdatum_exists )
- adjustBox(union1, DatumGetBoxP( v->spl_rdatum ) );
+ if (v->spl_rdatum_exists)
+ adjustBox(union1, DatumGetBoxP(v->spl_rdatum));
v->spl_rdatum = BoxPGetDatum(union1);
}
@@ -326,7 +339,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
))
allisequal = false;
- adjustBox( &pageunion, cur );
+ adjustBox(&pageunion, cur);
}
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@@ -359,12 +372,12 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
}
}
- if ( v->spl_ldatum_exists )
- adjustBox( unionL, DatumGetBoxP( v->spl_ldatum ) );
+ if (v->spl_ldatum_exists)
+ adjustBox(unionL, DatumGetBoxP(v->spl_ldatum));
v->spl_ldatum = BoxPGetDatum(unionL);
- if ( v->spl_rdatum_exists )
- adjustBox( unionR, DatumGetBoxP( v->spl_rdatum ) );
+ if (v->spl_rdatum_exists)
+ adjustBox(unionR, DatumGetBoxP(v->spl_rdatum));
v->spl_rdatum = BoxPGetDatum(unionR);
v->spl_ldatum_exists = v->spl_rdatum_exists = false;
@@ -471,13 +484,13 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
}
if (direction == 'x')
- chooseLR( v,
- listL, posL, unionL,
- listR, posR, unionR );
- else
- chooseLR( v,
- listB, posB, unionB,
- listT, posT, unionT );
+ chooseLR(v,
+ listL, posL, unionL,
+ listR, posR, unionR);
+ else
+ chooseLR(v,
+ listB, posB, unionB,
+ listT, posT, unionT);
PG_RETURN_POINTER(v);
}
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 0d3c1d2a9a..b0fdb74004 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.64 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.65 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -228,12 +228,12 @@ gistendscan(PG_FUNCTION_ARGS)
static void
gistfreestack(GISTSearchStack *s)
-{
+{
while (s != NULL)
{
GISTSearchStack *p = s->next;
+
pfree(s);
s = p;
}
}
-
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index 8c6683a4cc..14a14509cb 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.2 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.3 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -16,12 +16,13 @@
#include "access/gist_private.h"
-typedef struct {
- Datum *attr;
- int len;
+typedef struct
+{
+ Datum *attr;
+ int len;
OffsetNumber *entries;
- bool *isnull;
- bool *equiv;
+ bool *isnull;
+ bool *equiv;
} GistSplitUnion;
@@ -29,25 +30,28 @@ typedef struct {
* Forms unions of subkeys after page split, but
* uses only tuples aren't in groups of equalent tuples
*/
-static void
-gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
- GistSplitUnion *gsvp, int startkey) {
- IndexTuple *cleanedItVec;
- int i, cleanedLen=0;
+static void
+gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
+ GistSplitUnion *gsvp, int startkey)
+{
+ IndexTuple *cleanedItVec;
+ int i,
+ cleanedLen = 0;
- cleanedItVec = (IndexTuple*)palloc(sizeof(IndexTuple) * gsvp->len);
+ cleanedItVec = (IndexTuple *) palloc(sizeof(IndexTuple) * gsvp->len);
- for(i=0;i<gsvp->len;i++) {
- if ( gsvp->equiv && gsvp->equiv[gsvp->entries[i]])
+ for (i = 0; i < gsvp->len; i++)
+ {
+ if (gsvp->equiv && gsvp->equiv[gsvp->entries[i]])
continue;
cleanedItVec[cleanedLen++] = itvec[gsvp->entries[i] - 1];
}
- gistMakeUnionItVec(giststate, cleanedItVec, cleanedLen, startkey,
- gsvp->attr, gsvp->isnull);
+ gistMakeUnionItVec(giststate, cleanedItVec, cleanedLen, startkey,
+ gsvp->attr, gsvp->isnull);
- pfree( cleanedItVec );
+ pfree(cleanedItVec);
}
/*
@@ -56,7 +60,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
static void
gistunionsubkey(GISTSTATE *giststate, IndexTuple *itvec, GistSplitVector *spl, int attno)
{
- GistSplitUnion gsvp;
+ GistSplitUnion gsvp;
gsvp.equiv = spl->spl_equiv;
@@ -76,34 +80,40 @@ gistunionsubkey(GISTSTATE *giststate, IndexTuple *itvec, GistSplitVector *spl, i
}
/*
- * find group in vector with equivalent value
+ * find group in vector with equivalent value
*/
static int
gistfindgroup(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, GistSplitVector *spl, int attno)
{
int i;
GISTENTRY entry;
- int len=0;
+ int len = 0;
/*
- * attno key is always not null (see gistSplitByKey), so we may not check for
- * nulls
+ * attno key is always not null (see gistSplitByKey), so we may not check
+ * for nulls
*/
gistentryinit(entry, spl->splitVector.spl_rdatum, r, NULL, (OffsetNumber) 0, FALSE);
- for (i = 0; i < spl->splitVector.spl_nleft; i++) {
- float penalty = gistpenalty(giststate, attno, &entry, false,
- &valvec[spl->splitVector.spl_left[i]], false);
- if ( penalty == 0.0 ) {
+ for (i = 0; i < spl->splitVector.spl_nleft; i++)
+ {
+ float penalty = gistpenalty(giststate, attno, &entry, false,
+ &valvec[spl->splitVector.spl_left[i]], false);
+
+ if (penalty == 0.0)
+ {
spl->spl_equiv[spl->splitVector.spl_left[i]] = true;
len++;
}
}
gistentryinit(entry, spl->splitVector.spl_ldatum, r, NULL, (OffsetNumber) 0, FALSE);
- for (i = 0; i < spl->splitVector.spl_nright; i++) {
- float penalty = gistpenalty(giststate, attno, &entry, false,
- &valvec[spl->splitVector.spl_right[i]], false);
- if ( penalty == 0.0 ) {
+ for (i = 0; i < spl->splitVector.spl_nright; i++)
+ {
+ float penalty = gistpenalty(giststate, attno, &entry, false,
+ &valvec[spl->splitVector.spl_right[i]], false);
+
+ if (penalty == 0.0)
+ {
spl->spl_equiv[spl->splitVector.spl_right[i]] = true;
len++;
}
@@ -113,24 +123,32 @@ gistfindgroup(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, GistSplitVect
}
static void
-cleanupOffsets( OffsetNumber *a, int *len, bool *equiv, int *LenEquiv ) {
- int curlen,i;
- OffsetNumber *curwpos;
+cleanupOffsets(OffsetNumber *a, int *len, bool *equiv, int *LenEquiv)
+{
+ int curlen,
+ i;
+ OffsetNumber *curwpos;
curlen = *len;
curwpos = a;
- for (i = 0; i < *len; i++) {
- if ( equiv[ a[i] ] == FALSE ) {
+ for (i = 0; i < *len; i++)
+ {
+ if (equiv[a[i]] == FALSE)
+ {
*curwpos = a[i];
curwpos++;
- } else {
+ }
+ else
+ {
/* corner case: we shouldn't make void array */
- if ( curlen==1 ) {
- equiv[ a[i] ] = FALSE; /* mark item as non-equivalent */
- i--; /* redo the same */
+ if (curlen == 1)
+ {
+ equiv[a[i]] = FALSE; /* mark item as non-equivalent */
+ i--; /* redo the same */
*LenEquiv -= 1;
continue;
- } else
+ }
+ else
curlen--;
}
}
@@ -139,33 +157,37 @@ cleanupOffsets( OffsetNumber *a, int *len, bool *equiv, int *LenEquiv ) {
}
static void
-placeOne( Relation r, GISTSTATE *giststate, GistSplitVector *v, IndexTuple itup, OffsetNumber off, int attno ) {
+placeOne(Relation r, GISTSTATE *giststate, GistSplitVector *v, IndexTuple itup, OffsetNumber off, int attno)
+{
GISTENTRY identry[INDEX_MAX_KEYS];
bool isnull[INDEX_MAX_KEYS];
- bool toLeft = true;
+ bool toLeft = true;
gistDeCompressAtt(giststate, r, itup, NULL, (OffsetNumber) 0, identry, isnull);
- for(;attno<giststate->tupdesc->natts;attno++) {
- float lpenalty, rpenalty;
+ for (; attno < giststate->tupdesc->natts; attno++)
+ {
+ float lpenalty,
+ rpenalty;
GISTENTRY entry;
- gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
- lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno], identry+attno, isnull[ attno ]);
- gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
- rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno], identry+attno, isnull[ attno ]);
+ gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
+ lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno], identry + attno, isnull[attno]);
+ gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
+ rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno], identry + attno, isnull[attno]);
- if ( lpenalty != rpenalty ) {
- if ( lpenalty > rpenalty )
+ if (lpenalty != rpenalty)
+ {
+ if (lpenalty > rpenalty)
toLeft = false;
break;
}
}
- if ( toLeft )
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = off;
+ if (toLeft)
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = off;
else
- v->splitVector.spl_right[ v->splitVector.spl_nright++ ] = off;
+ v->splitVector.spl_right[v->splitVector.spl_nright++] = off;
}
#define SWAPVAR( s, d, t ) \
@@ -176,71 +198,83 @@ do { \
} while(0)
/*
- * adjust left and right unions according to splits by previous
- * split by firsts columns. This function is called only in case
+ * adjust left and right unions according to splits by previous
+ * split by firsts columns. This function is called only in case
* when pickSplit doesn't support subspplit.
*/
static void
-supportSecondarySplit( Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC *sv, Datum oldL, Datum oldR ) {
- bool leaveOnLeft = true, tmpBool;
- GISTENTRY entryL, entryR, entrySL, entrySR;
-
- gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
- gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
- gistentryinit(entrySL, sv->spl_ldatum , r, NULL, 0, FALSE);
- gistentryinit(entrySR, sv->spl_rdatum , r, NULL, 0, FALSE);
-
- if ( sv->spl_ldatum_exists && sv->spl_rdatum_exists ) {
- float penalty1, penalty2;
+supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC *sv, Datum oldL, Datum oldR)
+{
+ bool leaveOnLeft = true,
+ tmpBool;
+ GISTENTRY entryL,
+ entryR,
+ entrySL,
+ entrySR;
+
+ gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
+ gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
+ gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
+ gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
+
+ if (sv->spl_ldatum_exists && sv->spl_rdatum_exists)
+ {
+ float penalty1,
+ penalty2;
penalty1 = gistpenalty(giststate, attno, &entryL, false, &entrySL, false) +
- gistpenalty(giststate, attno, &entryR, false, &entrySR, false);
+ gistpenalty(giststate, attno, &entryR, false, &entrySR, false);
penalty2 = gistpenalty(giststate, attno, &entryL, false, &entrySR, false) +
- gistpenalty(giststate, attno, &entryR, false, &entrySL, false);
+ gistpenalty(giststate, attno, &entryR, false, &entrySL, false);
- if ( penalty1 > penalty2 )
+ if (penalty1 > penalty2)
leaveOnLeft = false;
- } else {
- GISTENTRY *entry1 = (sv->spl_ldatum_exists) ? &entryL : &entryR;
- float penalty1, penalty2;
+ }
+ else
+ {
+ GISTENTRY *entry1 = (sv->spl_ldatum_exists) ? &entryL : &entryR;
+ float penalty1,
+ penalty2;
/*
- * there is only one previously defined union,
- * so we just choose swap or not by lowest penalty
+ * there is only one previously defined union, so we just choose swap
+ * or not by lowest penalty
*/
penalty1 = gistpenalty(giststate, attno, entry1, false, &entrySL, false);
penalty2 = gistpenalty(giststate, attno, entry1, false, &entrySR, false);
- if ( penalty1 < penalty2 )
- leaveOnLeft = ( sv->spl_ldatum_exists ) ? true : false;
+ if (penalty1 < penalty2)
+ leaveOnLeft = (sv->spl_ldatum_exists) ? true : false;
else
- leaveOnLeft = ( sv->spl_rdatum_exists ) ? true : false;
+ leaveOnLeft = (sv->spl_rdatum_exists) ? true : false;
}
- if ( leaveOnLeft == false ) {
+ if (leaveOnLeft == false)
+ {
/*
- * swap left and right
+ * swap left and right
*/
- OffsetNumber *off, noff;
- Datum datum;
-
- SWAPVAR( sv->spl_left, sv->spl_right, off );
- SWAPVAR( sv->spl_nleft, sv->spl_nright, noff );
- SWAPVAR( sv->spl_ldatum, sv->spl_rdatum, datum );
- gistentryinit(entrySL, sv->spl_ldatum , r, NULL, 0, FALSE);
- gistentryinit(entrySR, sv->spl_rdatum , r, NULL, 0, FALSE);
+ OffsetNumber *off,
+ noff;
+ Datum datum;
+
+ SWAPVAR(sv->spl_left, sv->spl_right, off);
+ SWAPVAR(sv->spl_nleft, sv->spl_nright, noff);
+ SWAPVAR(sv->spl_ldatum, sv->spl_rdatum, datum);
+ gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
+ gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
}
- if ( sv->spl_ldatum_exists )
+ if (sv->spl_ldatum_exists)
gistMakeUnionKey(giststate, attno, &entryL, false, &entrySL, false,
- &sv->spl_ldatum, &tmpBool);
+ &sv->spl_ldatum, &tmpBool);
- if ( sv->spl_rdatum_exists )
+ if (sv->spl_rdatum_exists)
gistMakeUnionKey(giststate, attno, &entryR, false, &entrySR, false,
- &sv->spl_rdatum, &tmpBool);
+ &sv->spl_rdatum, &tmpBool);
sv->spl_ldatum_exists = sv->spl_rdatum_exists = false;
}
@@ -251,20 +285,21 @@ supportSecondarySplit( Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVE
* get better split.
* Returns TRUE and v->spl_equiv = NULL if left and right unions of attno columns are the same,
* so caller may find better split
- * Returns TRUE and v->spl_equiv != NULL if there is tuples which may be freely moved
+ * Returns TRUE and v->spl_equiv != NULL if there is tuples which may be freely moved
*/
static bool
gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v,
IndexTuple *itup, int len, GISTSTATE *giststate)
{
GIST_SPLITVEC *sv = &v->splitVector;
+
/*
* now let the user-defined picksplit function set up the split vector; in
* entryvec have no null value!!
*/
- sv->spl_ldatum_exists = ( v->spl_lisnull[ attno ] ) ? false : true;
- sv->spl_rdatum_exists = ( v->spl_risnull[ attno ] ) ? false : true;
+ sv->spl_ldatum_exists = (v->spl_lisnull[attno]) ? false : true;
+ sv->spl_rdatum_exists = (v->spl_risnull[attno]) ? false : true;
sv->spl_ldatum = v->spl_lattr[attno];
sv->spl_rdatum = v->spl_rattr[attno];
@@ -278,11 +313,12 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
if (sv->spl_right[sv->spl_nright - 1] == InvalidOffsetNumber)
sv->spl_right[sv->spl_nright - 1] = (OffsetNumber) (entryvec->n - 1);
- if( sv->spl_ldatum_exists || sv->spl_rdatum_exists ) {
- elog(LOG,"PickSplit method of %d columns of index '%s' doesn't support secondary split",
- attno + 1, RelationGetRelationName(r) );
+ if (sv->spl_ldatum_exists || sv->spl_rdatum_exists)
+ {
+ elog(LOG, "PickSplit method of %d columns of index '%s' doesn't support secondary split",
+ attno + 1, RelationGetRelationName(r));
- supportSecondarySplit( r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno] );
+ supportSecondarySplit(r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno]);
}
v->spl_lattr[attno] = sv->spl_ldatum;
@@ -296,53 +332,64 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
*/
v->spl_equiv = NULL;
- if (giststate->tupdesc->natts > 1 && attno+1 != giststate->tupdesc->natts)
+ if (giststate->tupdesc->natts > 1 && attno + 1 != giststate->tupdesc->natts)
{
- if ( gistKeyIsEQ(giststate, attno, sv->spl_ldatum, sv->spl_rdatum) ) {
+ if (gistKeyIsEQ(giststate, attno, sv->spl_ldatum, sv->spl_rdatum))
+ {
/*
- * Left and right key's unions are equial, so
- * we can get better split by following columns. Note,
- * unions for attno columns are already done.
+ * Left and right key's unions are equial, so we can get better
+ * split by following columns. Note, unions for attno columns are
+ * already done.
*/
return true;
- } else {
+ }
+ else
+ {
int LenEquiv;
- v->spl_equiv = (bool *) palloc0(sizeof(bool) * (entryvec->n+1));
+ v->spl_equiv = (bool *) palloc0(sizeof(bool) * (entryvec->n + 1));
LenEquiv = gistfindgroup(r, giststate, entryvec->vector, v, attno);
/*
- * if possible, we should distribute equivalent tuples
- */
- if (LenEquiv == 0 ) {
+ * if possible, we should distribute equivalent tuples
+ */
+ if (LenEquiv == 0)
+ {
gistunionsubkey(giststate, itup, v, attno + 1);
- } else {
- cleanupOffsets( sv->spl_left, &sv->spl_nleft, v->spl_equiv, &LenEquiv );
- cleanupOffsets( sv->spl_right, &sv->spl_nright, v->spl_equiv, &LenEquiv );
+ }
+ else
+ {
+ cleanupOffsets(sv->spl_left, &sv->spl_nleft, v->spl_equiv, &LenEquiv);
+ cleanupOffsets(sv->spl_right, &sv->spl_nright, v->spl_equiv, &LenEquiv);
gistunionsubkey(giststate, itup, v, attno + 1);
- if (LenEquiv == 1 ) {
+ if (LenEquiv == 1)
+ {
/*
- * In case with one tuple we just choose left-right
- * by penalty. It's simplify user-defined pickSplit
+ * In case with one tuple we just choose left-right by
+ * penalty. It's simplify user-defined pickSplit
*/
OffsetNumber toMove = InvalidOffsetNumber;
- for(toMove=FirstOffsetNumber;toMove<entryvec->n;toMove++)
- if ( v->spl_equiv[ toMove ] )
+ for (toMove = FirstOffsetNumber; toMove < entryvec->n; toMove++)
+ if (v->spl_equiv[toMove])
break;
- Assert( toMove < entryvec->n );
-
- placeOne( r, giststate, v, itup[ toMove-1 ], toMove, attno+1 );
- /* redo gistunionsubkey(): it will not degradate performance,
- * because it's very rarely */
+ Assert(toMove < entryvec->n);
+
+ placeOne(r, giststate, v, itup[toMove - 1], toMove, attno + 1);
+
+ /*
+ * redo gistunionsubkey(): it will not degradate
+ * performance, because it's very rarely
+ */
v->spl_equiv = NULL;
gistunionsubkey(giststate, itup, v, attno + 1);
return false;
- } else if ( LenEquiv > 1 )
+ }
+ else if (LenEquiv > 1)
return true;
}
}
@@ -352,60 +399,65 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
}
/*
- * simple split page
+ * simple split page
*/
static void
-gistSplitHalf(GIST_SPLITVEC *v, int len) {
- int i;
+gistSplitHalf(GIST_SPLITVEC *v, int len)
+{
+ int i;
- v->spl_nright = v->spl_nleft = 0;
+ v->spl_nright = v->spl_nleft = 0;
v->spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
- v->spl_right= (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
- for(i = 1; i <= len; i++)
- if ( i<len/2 )
- v->spl_right[ v->spl_nright++ ] = i;
+ v->spl_right = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
+ for (i = 1; i <= len; i++)
+ if (i < len / 2)
+ v->spl_right[v->spl_nright++] = i;
else
- v->spl_left[ v->spl_nleft++ ] = i;
+ v->spl_left[v->spl_nleft++] = i;
}
/*
* if it was invalid tuple then we need special processing.
- * We move all invalid tuples on right page.
+ * We move all invalid tuples on right page.
*
- * if there is no place on left page, gistSplit will be called one more
+ * if there is no place on left page, gistSplit will be called one more
* time for left page.
*
* Normally, we never exec this code, but after crash replay it's possible
* to get 'invalid' tuples (probability is low enough)
*/
static void
-gistSplitByInvalid(GISTSTATE *giststate, GistSplitVector *v, IndexTuple *itup, int len) {
- int i;
- static OffsetNumber offInvTuples[ MaxOffsetNumber ];
- int nOffInvTuples = 0;
+gistSplitByInvalid(GISTSTATE *giststate, GistSplitVector *v, IndexTuple *itup, int len)
+{
+ int i;
+ static OffsetNumber offInvTuples[MaxOffsetNumber];
+ int nOffInvTuples = 0;
for (i = 1; i <= len; i++)
- if ( GistTupleIsInvalid(itup[i - 1]) )
- offInvTuples[ nOffInvTuples++ ] = i;
+ if (GistTupleIsInvalid(itup[i - 1]))
+ offInvTuples[nOffInvTuples++] = i;
- if ( nOffInvTuples == len ) {
+ if (nOffInvTuples == len)
+ {
/* corner case, all tuples are invalid */
- v->spl_rightvalid= v->spl_leftvalid = false;
- gistSplitHalf( &v->splitVector, len );
- } else {
- GistSplitUnion gsvp;
-
+ v->spl_rightvalid = v->spl_leftvalid = false;
+ gistSplitHalf(&v->splitVector, len);
+ }
+ else
+ {
+ GistSplitUnion gsvp;
+
v->splitVector.spl_right = offInvTuples;
v->splitVector.spl_nright = nOffInvTuples;
v->spl_rightvalid = false;
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;
- for(i = 1; i <= len; i++)
- if ( !GistTupleIsInvalid(itup[i - 1]) )
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = i;
+ for (i = 1; i <= len; i++)
+ if (!GistTupleIsInvalid(itup[i - 1]))
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = i;
v->spl_leftvalid = true;
-
+
gsvp.equiv = NULL;
gsvp.attr = v->spl_lattr;
gsvp.len = v->splitVector.spl_nleft;
@@ -418,52 +470,58 @@ gistSplitByInvalid(GISTSTATE *giststate, GistSplitVector *v, IndexTuple *itup, i
/*
* trys to split page by attno key, in a case of null
- * values move its to separate page.
+ * values move its to separate page.
*/
void
-gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate,
- GistSplitVector *v, GistEntryVector *entryvec, int attno) {
- int i;
- static OffsetNumber offNullTuples[ MaxOffsetNumber ];
- int nOffNullTuples = 0;
+gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate,
+ GistSplitVector *v, GistEntryVector *entryvec, int attno)
+{
+ int i;
+ static OffsetNumber offNullTuples[MaxOffsetNumber];
+ int nOffNullTuples = 0;
- for (i = 1; i <= len; i++) {
- Datum datum;
- bool IsNull;
+ for (i = 1; i <= len; i++)
+ {
+ Datum datum;
+ bool IsNull;
- if (!GistPageIsLeaf(page) && GistTupleIsInvalid(itup[i - 1])) {
+ if (!GistPageIsLeaf(page) && GistTupleIsInvalid(itup[i - 1]))
+ {
gistSplitByInvalid(giststate, v, itup, len);
return;
}
- datum = index_getattr(itup[i - 1], attno+1, giststate->tupdesc, &IsNull);
+ datum = index_getattr(itup[i - 1], attno + 1, giststate->tupdesc, &IsNull);
gistdentryinit(giststate, attno, &(entryvec->vector[i]),
datum, r, page, i,
FALSE, IsNull);
- if ( IsNull )
- offNullTuples[ nOffNullTuples++ ] = i;
+ if (IsNull)
+ offNullTuples[nOffNullTuples++] = i;
}
v->spl_leftvalid = v->spl_rightvalid = true;
- if ( nOffNullTuples == len ) {
- /*
+ if (nOffNullTuples == len)
+ {
+ /*
* Corner case: All keys in attno column are null, we should try to
- * split by keys in next column. It all keys in all columns
- * are NULL just split page half by half
+ * split by keys in next column. It all keys in all columns are NULL
+ * just split page half by half
*/
v->spl_risnull[attno] = v->spl_lisnull[attno] = TRUE;
- if ( attno+1 == r->rd_att->natts )
- gistSplitHalf( &v->splitVector, len );
- else
- gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno+1);
- } else if ( nOffNullTuples > 0 ) {
- int j=0;
-
- /*
- * We don't want to mix NULLs and not-NULLs keys
- * on one page, so move nulls to right page
+ if (attno + 1 == r->rd_att->natts)
+ gistSplitHalf(&v->splitVector, len);
+ else
+ gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
+ }
+ else if (nOffNullTuples > 0)
+ {
+ int j = 0;
+
+ /*
+ * We don't want to mix NULLs and not-NULLs keys on one page, so move
+ * nulls to right page
*/
v->splitVector.spl_right = offNullTuples;
v->splitVector.spl_nright = nOffNullTuples;
@@ -471,61 +529,71 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;
- for(i = 1; i <= len; i++)
- if ( j<v->splitVector.spl_nright && offNullTuples[j] == i )
+ for (i = 1; i <= len; i++)
+ if (j < v->splitVector.spl_nright && offNullTuples[j] == i)
j++;
else
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = i;
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = i;
v->spl_equiv = NULL;
gistunionsubkey(giststate, itup, v, attno);
- } else {
+ }
+ else
+ {
/*
* all keys are not-null
*/
- entryvec->n = len+1;
+ entryvec->n = len + 1;
- if ( gistUserPicksplit(r, entryvec, attno, v, itup, len, giststate) && attno+1 != r->rd_att->natts ) {
+ if (gistUserPicksplit(r, entryvec, attno, v, itup, len, giststate) && attno + 1 != r->rd_att->natts)
+ {
/*
- * Splitting on attno column is not optimized: there is a tuples which can be freely
- * left or right page, we will try to split page by
- * following columns
+ * Splitting on attno column is not optimized: there is a tuples
+ * which can be freely left or right page, we will try to split
+ * page by following columns
*/
- if ( v->spl_equiv == NULL ) {
- /* simple case: left and right keys for attno column are equial */
- gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno+1);
- } else {
+ if (v->spl_equiv == NULL)
+ {
+ /*
+ * simple case: left and right keys for attno column are
+ * equial
+ */
+ gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
+ }
+ else
+ {
/* we should clean up vector from already distributed tuples */
- IndexTuple *newitup = (IndexTuple*)palloc((len + 1) * sizeof(IndexTuple));
- OffsetNumber *map = (OffsetNumber*)palloc((len + 1) * sizeof(IndexTuple));
- int newlen = 0;
+ IndexTuple *newitup = (IndexTuple *) palloc((len + 1) * sizeof(IndexTuple));
+ OffsetNumber *map = (OffsetNumber *) palloc((len + 1) * sizeof(IndexTuple));
+ int newlen = 0;
GIST_SPLITVEC backupSplit = v->splitVector;
- for(i=0; i<len; i++)
- if ( v->spl_equiv[i+1] ) {
- map[ newlen ] = i+1;
- newitup[ newlen++ ] = itup[i];
+ for (i = 0; i < len; i++)
+ if (v->spl_equiv[i + 1])
+ {
+ map[newlen] = i + 1;
+ newitup[newlen++] = itup[i];
}
- Assert( newlen>0 );
+ Assert(newlen > 0);
- backupSplit.spl_left = (OffsetNumber*)palloc(sizeof(OffsetNumber)*len);
- memcpy( backupSplit.spl_left, v->splitVector.spl_left, sizeof(OffsetNumber)*v->splitVector.spl_nleft);
- backupSplit.spl_right = (OffsetNumber*)palloc(sizeof(OffsetNumber)*len);
- memcpy( backupSplit.spl_right, v->splitVector.spl_right, sizeof(OffsetNumber)*v->splitVector.spl_nright);
+ backupSplit.spl_left = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
+ memcpy(backupSplit.spl_left, v->splitVector.spl_left, sizeof(OffsetNumber) * v->splitVector.spl_nleft);
+ backupSplit.spl_right = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
+ memcpy(backupSplit.spl_right, v->splitVector.spl_right, sizeof(OffsetNumber) * v->splitVector.spl_nright);
- gistSplitByKey(r, page, newitup, newlen, giststate, v, entryvec, attno+1);
+ gistSplitByKey(r, page, newitup, newlen, giststate, v, entryvec, attno + 1);
/* merge result of subsplit */
- for(i=0;i<v->splitVector.spl_nleft;i++)
- backupSplit.spl_left[ backupSplit.spl_nleft++ ] = map[ v->splitVector.spl_left[i]-1 ];
- for(i=0;i<v->splitVector.spl_nright;i++)
- backupSplit.spl_right[ backupSplit.spl_nright++ ] = map[ v->splitVector.spl_right[i]-1 ];
+ for (i = 0; i < v->splitVector.spl_nleft; i++)
+ backupSplit.spl_left[backupSplit.spl_nleft++] = map[v->splitVector.spl_left[i] - 1];
+ for (i = 0; i < v->splitVector.spl_nright; i++)
+ backupSplit.spl_right[backupSplit.spl_nright++] = map[v->splitVector.spl_right[i] - 1];
v->splitVector = backupSplit;
/* reunion left and right datums */
gistunionsubkey(giststate, itup, v, attno);
}
- }
+ }
}
}
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 9b1cfc0cf6..ff22bd2b65 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.19 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.20 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -22,8 +22,8 @@
* static *S used for temrorary storage (saves stack and palloc() call)
*/
-static Datum attrS[INDEX_MAX_KEYS];
-static bool isnullS[INDEX_MAX_KEYS];
+static Datum attrS[INDEX_MAX_KEYS];
+static bool isnullS[INDEX_MAX_KEYS];
/*
* Write itup vector to page, has no control of free space
@@ -57,14 +57,17 @@ gistfillbuffer(Relation r, Page page, IndexTuple *itup,
bool
gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size freespace)
{
- unsigned int size = freespace, deleted = 0;
+ unsigned int size = freespace,
+ deleted = 0;
int i;
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
- if ( todelete != InvalidOffsetNumber ) {
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, todelete));
+ if (todelete != InvalidOffsetNumber)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, todelete));
+
deleted = IndexTupleSize(itup) + sizeof(ItemIdData);
}
@@ -72,11 +75,12 @@ gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size f
}
bool
-gistfitpage(IndexTuple *itvec, int len) {
- int i;
- Size size=0;
+gistfitpage(IndexTuple *itvec, int len)
+{
+ int i;
+ Size size = 0;
- for(i=0;i<len;i++)
+ for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
/* TODO: Consider fillfactor */
@@ -119,56 +123,64 @@ gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
*/
IndexTupleData *
-gistfillitupvec(IndexTuple *vec, int veclen, int *memlen) {
- char *ptr, *ret;
- int i;
+gistfillitupvec(IndexTuple *vec, int veclen, int *memlen)
+{
+ char *ptr,
+ *ret;
+ int i;
+
+ *memlen = 0;
- *memlen=0;
-
for (i = 0; i < veclen; i++)
*memlen += IndexTupleSize(vec[i]);
ptr = ret = palloc(*memlen);
- for (i = 0; i < veclen; i++) {
+ for (i = 0; i < veclen; i++)
+ {
memcpy(ptr, vec[i], IndexTupleSize(vec[i]));
ptr += IndexTupleSize(vec[i]);
}
- return (IndexTupleData*)ret;
+ return (IndexTupleData *) ret;
}
/*
- * Make unions of keys in IndexTuple vector, return FALSE if itvec contains
+ * Make unions of keys in IndexTuple vector, return FALSE if itvec contains
* invalid tuple. Resulting Datums aren't compressed.
*/
-bool
-gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startkey,
- Datum *attr, bool *isnull ) {
+bool
+gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startkey,
+ Datum *attr, bool *isnull)
+{
int i;
GistEntryVector *evec;
- int attrsize;
+ int attrsize;
- evec = (GistEntryVector *) palloc( ( len + 2 ) * sizeof(GISTENTRY) + GEVHDRSZ);
+ evec = (GistEntryVector *) palloc((len + 2) * sizeof(GISTENTRY) + GEVHDRSZ);
- for (i = startkey; i < giststate->tupdesc->natts; i++) {
- int j;
+ for (i = startkey; i < giststate->tupdesc->natts; i++)
+ {
+ int j;
evec->n = 0;
- if ( !isnull[i] ) {
- gistentryinit( evec->vector[evec->n], attr[i],
- NULL, NULL, (OffsetNumber) 0,
- FALSE);
+ if (!isnull[i])
+ {
+ gistentryinit(evec->vector[evec->n], attr[i],
+ NULL, NULL, (OffsetNumber) 0,
+ FALSE);
evec->n++;
}
- for (j = 0; j < len; j++) {
- Datum datum;
- bool IsNull;
+ for (j = 0; j < len; j++)
+ {
+ Datum datum;
+ bool IsNull;
- if (GistTupleIsInvalid(itvec[j]))
- return FALSE; /* signals that union with invalid tuple => result is invalid */
+ if (GistTupleIsInvalid(itvec[j]))
+ return FALSE; /* signals that union with invalid tuple =>
+ * result is invalid */
datum = index_getattr(itvec[j], i + 1, giststate->tupdesc, &IsNull);
if (IsNull)
@@ -183,19 +195,23 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startke
}
/* If this tuple vector was all NULLs, the union is NULL */
- if ( evec->n == 0 ) {
+ if (evec->n == 0)
+ {
attr[i] = (Datum) 0;
isnull[i] = TRUE;
- } else {
- if (evec->n == 1) {
+ }
+ else
+ {
+ if (evec->n == 1)
+ {
evec->n = 2;
evec->vector[1] = evec->vector[0];
- }
+ }
/* Make union and store in attr array */
attr[i] = FunctionCall2(&giststate->unionFn[i],
- PointerGetDatum(evec),
- PointerGetDatum(&attrsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&attrsize));
isnull[i] = FALSE;
}
@@ -213,57 +229,67 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
{
memset(isnullS, TRUE, sizeof(bool) * giststate->tupdesc->natts);
- if ( !gistMakeUnionItVec(giststate, itvec, len, 0, attrS, isnullS ) )
- return gist_form_invalid_tuple(InvalidBlockNumber);
+ if (!gistMakeUnionItVec(giststate, itvec, len, 0, attrS, isnullS))
+ return gist_form_invalid_tuple(InvalidBlockNumber);
- return gistFormTuple(giststate, r, attrS, isnullS, false);
+ return gistFormTuple(giststate, r, attrS, isnullS, false);
}
-/*
+/*
* makes union of two key
*/
void
-gistMakeUnionKey( GISTSTATE *giststate, int attno,
- GISTENTRY *entry1, bool isnull1,
- GISTENTRY *entry2, bool isnull2,
- Datum *dst, bool *dstisnull ) {
+gistMakeUnionKey(GISTSTATE *giststate, int attno,
+ GISTENTRY *entry1, bool isnull1,
+ GISTENTRY *entry2, bool isnull2,
+ Datum *dst, bool *dstisnull)
+{
- int dstsize;
+ int dstsize;
- static char storage[ 2 * sizeof(GISTENTRY) + GEVHDRSZ ];
- GistEntryVector *evec = (GistEntryVector*)storage;
+ static char storage[2 * sizeof(GISTENTRY) + GEVHDRSZ];
+ GistEntryVector *evec = (GistEntryVector *) storage;
evec->n = 2;
- if ( isnull1 && isnull2 ) {
+ if (isnull1 && isnull2)
+ {
*dstisnull = TRUE;
- *dst = (Datum)0;
- } else {
- if ( isnull1 == FALSE && isnull2 == FALSE ) {
+ *dst = (Datum) 0;
+ }
+ else
+ {
+ if (isnull1 == FALSE && isnull2 == FALSE)
+ {
evec->vector[0] = *entry1;
evec->vector[1] = *entry2;
- } else if ( isnull1 == FALSE ) {
+ }
+ else if (isnull1 == FALSE)
+ {
evec->vector[0] = *entry1;
evec->vector[1] = *entry1;
- } else {
+ }
+ else
+ {
evec->vector[0] = *entry2;
evec->vector[1] = *entry2;
}
*dstisnull = FALSE;
*dst = FunctionCall2(&giststate->unionFn[attno],
- PointerGetDatum(evec),
- PointerGetDatum(&dstsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&dstsize));
}
}
bool
-gistKeyIsEQ(GISTSTATE *giststate, int attno, Datum a, Datum b) {
- bool result;
+gistKeyIsEQ(GISTSTATE *giststate, int attno, Datum a, Datum b)
+{
+ bool result;
FunctionCall3(&giststate->equalFn[attno],
- a, b,
- PointerGetDatum(&result));
+ a, b,
+ PointerGetDatum(&result));
return result;
}
@@ -309,22 +335,24 @@ gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gis
gistDeCompressAtt(giststate, r, addtup, NULL,
(OffsetNumber) 0, addentries, addisnull);
- for(i = 0; i < r->rd_att->natts; i++) {
- gistMakeUnionKey( giststate, i,
- oldentries + i, oldisnull[i],
- addentries + i, addisnull[i],
- attrS + i, isnullS + i );
+ for (i = 0; i < r->rd_att->natts; i++)
+ {
+ gistMakeUnionKey(giststate, i,
+ oldentries + i, oldisnull[i],
+ addentries + i, addisnull[i],
+ attrS + i, isnullS + i);
- if ( neednew )
+ if (neednew)
/* we already need new key, so we can skip check */
continue;
- if ( isnullS[i] )
+ if (isnullS[i])
/* union of key may be NULL if and only if both keys are NULL */
continue;
- if ( !addisnull[i] ) {
- if ( oldisnull[i] || gistKeyIsEQ(giststate, i, oldentries[i].key, attrS[i])==false )
+ if (!addisnull[i])
+ {
+ if (oldisnull[i] || gistKeyIsEQ(giststate, i, oldentries[i].key, attrS[i]) == false)
neednew = true;
}
}
@@ -363,8 +391,8 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
it, NULL, (OffsetNumber) 0,
identry, isnull);
- Assert( maxoff >= FirstOffsetNumber );
- Assert( !GistPageIsLeaf(p) );
+ Assert(maxoff >= FirstOffsetNumber);
+ Assert(!GistPageIsLeaf(p));
for (i = FirstOffsetNumber; i <= maxoff && sum_grow; i = OffsetNumberNext(i))
{
@@ -484,7 +512,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
{
gistcentryinit(giststate, i, &centry[i], attdata[i],
r, NULL, (OffsetNumber) 0,
- newValues,
+ newValues,
FALSE);
compatt[i] = centry[i].key;
}
@@ -500,18 +528,19 @@ gistpenalty(GISTSTATE *giststate, int attno,
GISTENTRY *orig, bool isNullOrig,
GISTENTRY *add, bool isNullAdd)
{
- float penalty = 0.0;
+ float penalty = 0.0;
- if ( giststate->penaltyFn[attno].fn_strict==FALSE || ( isNullOrig == FALSE && isNullAdd == FALSE ) )
+ if (giststate->penaltyFn[attno].fn_strict == FALSE || (isNullOrig == FALSE && isNullAdd == FALSE))
FunctionCall3(&giststate->penaltyFn[attno],
PointerGetDatum(orig),
PointerGetDatum(add),
PointerGetDatum(&penalty));
- else if ( isNullOrig && isNullAdd )
+ else if (isNullOrig && isNullAdd)
penalty = 0.0;
else
- penalty = 1e10; /* try to prevent to mix null and non-null value */
-
+ penalty = 1e10; /* try to prevent to mix null and non-null
+ * value */
+
return penalty;
}
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index e5c73c8c22..5f5060280d 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.27 2006/09/21 20:31:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.28 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,19 +45,24 @@ typedef struct
} ArrayTuple;
/*
- * Make union of keys on page
+ * Make union of keys on page
*/
static IndexTuple
-PageMakeUnionKey(GistVacuum *gv, Buffer buffer) {
- Page page = BufferGetPage( buffer );
+PageMakeUnionKey(GistVacuum *gv, Buffer buffer)
+{
+ Page page = BufferGetPage(buffer);
IndexTuple *vec,
- tmp, res;
+ tmp,
+ res;
int veclen = 0;
MemoryContext oldCtx = MemoryContextSwitchTo(gv->opCtx);
vec = gistextractpage(page, &veclen);
- /* we call gistunion() in temprorary context because user-defined functions called in gistunion()
- may do not free all memory */
+
+ /*
+ * we call gistunion() in temprorary context because user-defined
+ * functions called in gistunion() may do not free all memory
+ */
tmp = gistunion(gv->index, vec, veclen, &(gv->giststate));
MemoryContextSwitchTo(oldCtx);
@@ -73,21 +78,25 @@ PageMakeUnionKey(GistVacuum *gv, Buffer buffer) {
}
static void
-gistDeleteSubtree( GistVacuum *gv, BlockNumber blkno ) {
- Buffer buffer;
- Page page;
+gistDeleteSubtree(GistVacuum *gv, BlockNumber blkno)
+{
+ Buffer buffer;
+ Page page;
buffer = ReadBuffer(gv->index, blkno);
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
- if ( !GistPageIsLeaf(page) ) {
- int i;
+ if (!GistPageIsLeaf(page))
+ {
+ int i;
+
+ for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i = OffsetNumberNext(i))
+ {
+ ItemId iid = PageGetItemId(page, i);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i = OffsetNumberNext(i)) {
- ItemId iid = PageGetItemId(page, i);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- gistDeleteSubtree(gv, ItemPointerGetBlockNumber(&(idxtuple->t_tid)));
+ gistDeleteSubtree(gv, ItemPointerGetBlockNumber(&(idxtuple->t_tid)));
}
}
@@ -103,7 +112,7 @@ gistDeleteSubtree( GistVacuum *gv, BlockNumber blkno ) {
{
XLogRecData rdata[2];
XLogRecPtr recptr;
- gistxlogPageDelete xlrec;
+ gistxlogPageDelete xlrec;
xlrec.node = gv->index->rd_node;
xlrec.blkno = blkno;
@@ -125,31 +134,34 @@ gistDeleteSubtree( GistVacuum *gv, BlockNumber blkno ) {
}
else
PageSetLSN(page, XLogRecPtrForTemp);
-
+
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
}
-static Page
-GistPageGetCopyPage( Page page ) {
- Size pageSize = PageGetPageSize( page );
- Page tmppage;
+static Page
+GistPageGetCopyPage(Page page)
+{
+ Size pageSize = PageGetPageSize(page);
+ Page tmppage;
- tmppage=(Page)palloc( pageSize );
- memcpy( tmppage, page, pageSize );
+ tmppage = (Page) palloc(pageSize);
+ memcpy(tmppage, page, pageSize);
return tmppage;
}
static ArrayTuple
-vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon, int curlenaddon) {
+vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon, int curlenaddon)
+{
ArrayTuple res = {NULL, 0, false};
IndexTuple *vec;
SplitedPageLayout *dist = NULL,
- *ptr;
- int i, veclen=0;
- BlockNumber blkno = BufferGetBlockNumber(buffer);
+ *ptr;
+ int i,
+ veclen = 0;
+ BlockNumber blkno = BufferGetBlockNumber(buffer);
MemoryContext oldCtx = MemoryContextSwitchTo(gv->opCtx);
vec = gistextractpage(tempPage, &veclen);
@@ -158,67 +170,73 @@ vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon,
MemoryContextSwitchTo(oldCtx);
- if (blkno != GIST_ROOT_BLKNO) {
+ if (blkno != GIST_ROOT_BLKNO)
+ {
/* if non-root split then we should not allocate new buffer */
dist->buffer = buffer;
dist->page = tempPage;
/* during vacuum we never split leaf page */
GistPageGetOpaque(dist->page)->flags = 0;
- } else
+ }
+ else
pfree(tempPage);
res.itup = (IndexTuple *) palloc(sizeof(IndexTuple) * veclen);
res.ituplen = 0;
/* make new pages and fills them */
- for (ptr = dist; ptr; ptr = ptr->next) {
- char *data;
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
+ char *data;
- if ( ptr->buffer == InvalidBuffer ) {
- ptr->buffer = gistNewBuffer( gv->index );
- GISTInitBuffer( ptr->buffer, 0 );
+ if (ptr->buffer == InvalidBuffer)
+ {
+ ptr->buffer = gistNewBuffer(gv->index);
+ GISTInitBuffer(ptr->buffer, 0);
ptr->page = BufferGetPage(ptr->buffer);
}
- ptr->block.blkno = BufferGetBlockNumber( ptr->buffer );
+ ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
- data = (char*)(ptr->list);
- for(i=0;i<ptr->block.num;i++) {
- if ( PageAddItem(ptr->page, (Item)data, IndexTupleSize((IndexTuple)data), i+FirstOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ data = (char *) (ptr->list);
+ for (i = 0; i < ptr->block.num; i++)
+ {
+ if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(gv->index));
- data += IndexTupleSize((IndexTuple)data);
+ data += IndexTupleSize((IndexTuple) data);
}
ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
- res.itup[ res.ituplen ] = (IndexTuple)palloc(IndexTupleSize(ptr->itup));
- memcpy( res.itup[ res.ituplen ], ptr->itup, IndexTupleSize(ptr->itup) );
+ res.itup[res.ituplen] = (IndexTuple) palloc(IndexTupleSize(ptr->itup));
+ memcpy(res.itup[res.ituplen], ptr->itup, IndexTupleSize(ptr->itup));
res.ituplen++;
}
START_CRIT_SECTION();
- for (ptr = dist; ptr; ptr = ptr->next) {
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
MarkBufferDirty(ptr->buffer);
GistPageGetOpaque(ptr->page)->rightlink = InvalidBlockNumber;
}
/* restore splitted non-root page */
- if (blkno != GIST_ROOT_BLKNO) {
- PageRestoreTempPage( dist->page, BufferGetPage( dist->buffer ) );
- dist->page = BufferGetPage( dist->buffer );
+ if (blkno != GIST_ROOT_BLKNO)
+ {
+ PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
+ dist->page = BufferGetPage(dist->buffer);
}
if (!gv->index->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData *rdata;
- ItemPointerData key; /* set key for incomplete
- * insert */
+ ItemPointerData key; /* set key for incomplete insert */
char *xlinfo;
ItemPointerSet(&key, blkno, TUPLE_IS_VALID);
rdata = formSplitRdata(gv->index->rd_node, blkno,
- false, &key, dist);
+ false, &key, dist);
xlinfo = rdata->data;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT, rdata);
@@ -241,13 +259,12 @@ vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon,
{
/* we must keep the buffer pin on the head page */
if (BufferGetBlockNumber(ptr->buffer) != blkno)
- UnlockReleaseBuffer( ptr->buffer );
+ UnlockReleaseBuffer(ptr->buffer);
}
if (blkno == GIST_ROOT_BLKNO)
{
- ItemPointerData key; /* set key for incomplete
- * insert */
+ ItemPointerData key; /* set key for incomplete insert */
ItemPointerSet(&key, blkno, TUPLE_IS_VALID);
@@ -266,7 +283,8 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
{
ArrayTuple res = {NULL, 0, false};
Buffer buffer;
- Page page, tempPage = NULL;
+ Page page,
+ tempPage = NULL;
OffsetNumber i,
maxoff;
ItemId iid;
@@ -278,7 +296,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
*addon = NULL;
bool needwrite = false;
OffsetNumber offToDelete[MaxOffsetNumber];
- BlockNumber blkToDelete[MaxOffsetNumber];
+ BlockNumber blkToDelete[MaxOffsetNumber];
ItemPointerData *completed = NULL;
int ncompleted = 0,
lencompleted = 16;
@@ -322,7 +340,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
if (chldtuple.ituplen || chldtuple.emptypage)
{
/* update tuple or/and inserts new */
- if ( chldtuple.emptypage )
+ if (chldtuple.emptypage)
blkToDelete[nBlkToDelete++] = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
offToDelete[nOffToDelete++] = i;
PageIndexTupleDelete(tempPage, i);
@@ -333,7 +351,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
if (chldtuple.ituplen)
{
- Assert( chldtuple.emptypage == false );
+ Assert(chldtuple.emptypage == false);
while (curlenaddon + chldtuple.ituplen >= lenaddon)
{
lenaddon *= 2;
@@ -367,56 +385,63 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
}
}
}
-
- Assert( maxoff == PageGetMaxOffsetNumber(tempPage) );
+
+ Assert(maxoff == PageGetMaxOffsetNumber(tempPage));
if (curlenaddon)
{
/* insert updated tuples */
- if (gistnospace(tempPage, addon, curlenaddon, InvalidOffsetNumber, 0)) {
+ if (gistnospace(tempPage, addon, curlenaddon, InvalidOffsetNumber, 0))
+ {
/* there is no space on page to insert tuples */
res = vacuumSplitPage(gv, tempPage, buffer, addon, curlenaddon);
- tempPage=NULL; /* vacuumSplitPage() free tempPage */
- needwrite = needunion = false; /* gistSplit already forms unions and writes pages */
- } else
+ tempPage = NULL; /* vacuumSplitPage() free tempPage */
+ needwrite = needunion = false; /* gistSplit already forms
+ * unions and writes pages */
+ }
+ else
/* enough free space */
gistfillbuffer(gv->index, tempPage, addon, curlenaddon, InvalidOffsetNumber);
}
}
- /*
- * If page is empty, we should remove pointer to it before
- * deleting page (except root)
+ /*
+ * If page is empty, we should remove pointer to it before deleting page
+ * (except root)
*/
- if ( blkno != GIST_ROOT_BLKNO && ( PageIsEmpty(page) || (tempPage && PageIsEmpty(tempPage)) ) ) {
+ if (blkno != GIST_ROOT_BLKNO && (PageIsEmpty(page) || (tempPage && PageIsEmpty(tempPage))))
+ {
/*
- * New version of page is empty, so leave it unchanged,
- * upper call will mark our page as deleted.
- * In case of page split we never will be here...
+ * New version of page is empty, so leave it unchanged, upper call
+ * will mark our page as deleted. In case of page split we never will
+ * be here...
*
- * If page was empty it can't become non-empty during processing
+ * If page was empty it can't become non-empty during processing
*/
res.emptypage = true;
UnlockReleaseBuffer(buffer);
- } else {
+ }
+ else
+ {
/* write page and remove its childs if it need */
START_CRIT_SECTION();
- if ( tempPage && needwrite ) {
+ if (tempPage && needwrite)
+ {
PageRestoreTempPage(tempPage, page);
tempPage = NULL;
}
- /* Empty index */
- if (PageIsEmpty(page) && blkno == GIST_ROOT_BLKNO )
+ /* Empty index */
+ if (PageIsEmpty(page) && blkno == GIST_ROOT_BLKNO)
{
needwrite = true;
GistPageSetLeaf(page);
}
-
+
if (needwrite)
{
MarkBufferDirty(buffer);
@@ -446,7 +471,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
END_CRIT_SECTION();
- if ( needunion && !PageIsEmpty(page) )
+ if (needunion && !PageIsEmpty(page))
{
res.itup = (IndexTuple *) palloc(sizeof(IndexTuple));
res.ituplen = 1;
@@ -456,7 +481,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
UnlockReleaseBuffer(buffer);
/* delete empty children, now we havn't any links to pointed subtrees */
- for(i=0;i<nBlkToDelete;i++)
+ for (i = 0; i < nBlkToDelete; i++)
gistDeleteSubtree(gv, blkToDelete[i]);
if (ncompleted && !gv->index->rd_istemp)
@@ -506,9 +531,10 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
/* use heap's tuple count */
Assert(info->num_heap_tuples >= 0);
stats->std.num_index_tuples = info->num_heap_tuples;
+
/*
- * XXX the above is wrong if index is partial. Would it be OK to
- * just return NULL, or is there work we must do below?
+ * XXX the above is wrong if index is partial. Would it be OK to just
+ * return NULL, or is there work we must do below?
*/
}
@@ -545,8 +571,8 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
RelationGetRelationName(rel))));
/*
- * If vacuum full, we already have exclusive lock on the index.
- * Otherwise, need lock unless it's local to this backend.
+ * If vacuum full, we already have exclusive lock on the index. Otherwise,
+ * need lock unless it's local to this backend.
*/
if (info->vacuum_full)
needLock = false;
@@ -725,7 +751,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
if (callback(&(idxtuple->t_tid), callback_state))
{
- todelete[ntodelete] = i-ntodelete;
+ todelete[ntodelete] = i - ntodelete;
ntodelete++;
stats->std.tuples_removed += 1;
}
@@ -739,7 +765,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
MarkBufferDirty(buffer);
- for(i=0;i<ntodelete;i++)
+ for (i = 0; i < ntodelete; i++)
PageIndexTupleDelete(page, todelete[i]);
GistMarkTuplesDeleted(page);
@@ -750,7 +776,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
gistxlogPageUpdate *xlinfo;
rdata = formUpdateRdata(rel->rd_node, buffer,
- todelete, ntodelete,
+ todelete, ntodelete,
NULL, 0,
NULL);
xlinfo = (gistxlogPageUpdate *) rdata->next->data;
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 9680ed9619..7ba2e6d52c 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistxlog.c,v 1.23 2006/08/07 16:57:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistxlog.c,v 1.24 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -55,11 +55,11 @@ typedef struct gistIncompleteInsert
static MemoryContext opCtx; /* working memory for operations */
-static MemoryContext insertCtx; /* holds incomplete_inserts list */
+static MemoryContext insertCtx; /* holds incomplete_inserts list */
static List *incomplete_inserts;
-#define ItemPointerEQ(a, b) \
+#define ItemPointerEQ(a, b) \
( ItemPointerGetOffsetNumber(a) == ItemPointerGetOffsetNumber(b) && \
ItemPointerGetBlockNumber (a) == ItemPointerGetBlockNumber(b) )
@@ -72,8 +72,9 @@ pushIncompleteInsert(RelFileNode node, XLogRecPtr lsn, ItemPointerData key,
MemoryContext oldCxt;
gistIncompleteInsert *ninsert;
- if ( !ItemPointerIsValid(&key) )
- /*
+ if (!ItemPointerIsValid(&key))
+
+ /*
* if key is null then we should not store insertion as incomplete,
* because it's a vacuum operation..
*/
@@ -108,8 +109,8 @@ pushIncompleteInsert(RelFileNode node, XLogRecPtr lsn, ItemPointerData key,
/*
* Stick the new incomplete insert onto the front of the list, not the
- * back. This is so that gist_xlog_cleanup will process incompletions
- * in last-in-first-out order.
+ * back. This is so that gist_xlog_cleanup will process incompletions in
+ * last-in-first-out order.
*/
incomplete_inserts = lcons(ninsert, incomplete_inserts);
@@ -121,10 +122,10 @@ forgetIncompleteInsert(RelFileNode node, ItemPointerData key)
{
ListCell *l;
- if ( !ItemPointerIsValid(&key) )
+ if (!ItemPointerIsValid(&key))
return;
- if (incomplete_inserts==NIL)
+ if (incomplete_inserts == NIL)
return;
foreach(l, incomplete_inserts)
@@ -241,9 +242,12 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record, bool isnewroot)
if (GistPageIsLeaf(page) && xlrec.len == 0 && xlrec.data->ntodelete == 0)
GistClearTuplesDeleted(page);
- if ( !GistPageIsLeaf(page) && PageGetMaxOffsetNumber(page) == InvalidOffsetNumber && xldata->blkno == GIST_ROOT_BLKNO )
- /* all links on non-leaf root page was deleted by vacuum full,
- so root page becomes a leaf */
+ if (!GistPageIsLeaf(page) && PageGetMaxOffsetNumber(page) == InvalidOffsetNumber && xldata->blkno == GIST_ROOT_BLKNO)
+
+ /*
+ * all links on non-leaf root page was deleted by vacuum full, so root
+ * page becomes a leaf
+ */
GistPageSetLeaf(page);
GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
@@ -432,11 +436,11 @@ static void
out_target(StringInfo buf, RelFileNode node, ItemPointerData key)
{
appendStringInfo(buf, "rel %u/%u/%u",
- node.spcNode, node.dbNode, node.relNode);
- if ( ItemPointerIsValid( &key ) )
+ node.spcNode, node.dbNode, node.relNode);
+ if (ItemPointerIsValid(&key))
appendStringInfo(buf, "; tid %u/%u",
- ItemPointerGetBlockNumber(&key),
- ItemPointerGetOffsetNumber(&key));
+ ItemPointerGetBlockNumber(&key),
+ ItemPointerGetOffsetNumber(&key));
}
static void
@@ -450,8 +454,8 @@ static void
out_gistxlogPageDelete(StringInfo buf, gistxlogPageDelete *xlrec)
{
appendStringInfo(buf, "page_delete: rel %u/%u/%u; blkno %u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
- xlrec->blkno);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
+ xlrec->blkno);
}
static void
@@ -460,7 +464,7 @@ out_gistxlogPageSplit(StringInfo buf, gistxlogPageSplit *xlrec)
appendStringInfo(buf, "page_split: ");
out_target(buf, xlrec->node, xlrec->key);
appendStringInfo(buf, "; block number %u splits to %d pages",
- xlrec->origblkno, xlrec->npage);
+ xlrec->origblkno, xlrec->npage);
}
void
@@ -486,15 +490,15 @@ gist_desc(StringInfo buf, uint8 xl_info, char *rec)
break;
case XLOG_GIST_CREATE_INDEX:
appendStringInfo(buf, "create_index: rel %u/%u/%u",
- ((RelFileNode *) rec)->spcNode,
- ((RelFileNode *) rec)->dbNode,
- ((RelFileNode *) rec)->relNode);
+ ((RelFileNode *) rec)->spcNode,
+ ((RelFileNode *) rec)->dbNode,
+ ((RelFileNode *) rec)->relNode);
break;
case XLOG_GIST_INSERT_COMPLETE:
appendStringInfo(buf, "complete_insert: rel %u/%u/%u",
- ((gistxlogInsertComplete *) rec)->node.spcNode,
- ((gistxlogInsertComplete *) rec)->node.dbNode,
- ((gistxlogInsertComplete *) rec)->node.relNode);
+ ((gistxlogInsertComplete *) rec)->node.spcNode,
+ ((gistxlogInsertComplete *) rec)->node.dbNode,
+ ((gistxlogInsertComplete *) rec)->node.relNode);
break;
default:
appendStringInfo(buf, "unknown gist op code %u", info);
@@ -547,22 +551,25 @@ gistxlogFindPath(Relation index, gistIncompleteInsert *insert)
elog(ERROR, "lost parent for block %u", insert->origblkno);
}
-static SplitedPageLayout*
-gistMakePageLayout(Buffer *buffers, int nbuffers) {
- SplitedPageLayout *res=NULL, *resptr;
+static SplitedPageLayout *
+gistMakePageLayout(Buffer *buffers, int nbuffers)
+{
+ SplitedPageLayout *res = NULL,
+ *resptr;
- while( nbuffers-- > 0 ) {
- Page page = BufferGetPage( buffers[ nbuffers ] );
- IndexTuple* vec;
- int veclen;
+ while (nbuffers-- > 0)
+ {
+ Page page = BufferGetPage(buffers[nbuffers]);
+ IndexTuple *vec;
+ int veclen;
- resptr = (SplitedPageLayout*)palloc0( sizeof(SplitedPageLayout) );
+ resptr = (SplitedPageLayout *) palloc0(sizeof(SplitedPageLayout));
- resptr->block.blkno = BufferGetBlockNumber( buffers[ nbuffers ] );
- resptr->block.num = PageGetMaxOffsetNumber( page );
+ resptr->block.blkno = BufferGetBlockNumber(buffers[nbuffers]);
+ resptr->block.num = PageGetMaxOffsetNumber(page);
- vec = gistextractpage( page, &veclen );
- resptr->list = gistfillitupvec( vec, veclen, &(resptr->lenlist) );
+ vec = gistextractpage(page, &veclen);
+ resptr->list = gistfillitupvec(vec, veclen, &(resptr->lenlist));
resptr->next = res;
res = resptr;
@@ -580,7 +587,7 @@ gistMakePageLayout(Buffer *buffers, int nbuffers) {
* Note that we assume the index is now in a valid state, except for the
* unfinished insertion. In particular it's safe to invoke gistFindPath();
* there shouldn't be any garbage pages for it to run into.
- *
+ *
* To complete insert we can't use basic insertion algorithm because
* during insertion we can't call user-defined support functions of opclass.
* So, we insert 'invalid' tuples without real key and do it by separate algorithm.
@@ -607,7 +614,7 @@ gistContinueInsert(gistIncompleteInsert *insert)
itup[i] = gist_form_invalid_tuple(insert->blkno[i]);
/*
- * any insertion of itup[] should make LOG message about
+ * any insertion of itup[] should make LOG message about
*/
if (insert->origblkno == GIST_ROOT_BLKNO)
@@ -626,7 +633,7 @@ gistContinueInsert(gistIncompleteInsert *insert)
Buffer *buffers;
Page *pages;
int numbuffer;
- OffsetNumber *todelete;
+ OffsetNumber *todelete;
/* construct path */
gistxlogFindPath(index, insert);
@@ -642,21 +649,22 @@ gistContinueInsert(gistIncompleteInsert *insert)
int j,
k,
pituplen = 0;
- XLogRecData *rdata;
- XLogRecPtr recptr;
- Buffer tempbuffer = InvalidBuffer;
- int ntodelete = 0;
+ XLogRecData *rdata;
+ XLogRecPtr recptr;
+ Buffer tempbuffer = InvalidBuffer;
+ int ntodelete = 0;
numbuffer = 1;
buffers[0] = ReadBuffer(index, insert->path[i]);
LockBuffer(buffers[0], GIST_EXCLUSIVE);
+
/*
* we check buffer, because we restored page earlier
*/
gistcheckpage(index, buffers[0]);
pages[0] = BufferGetPage(buffers[0]);
- Assert( !GistPageIsLeaf(pages[0]) );
+ Assert(!GistPageIsLeaf(pages[0]));
pituplen = PageGetMaxOffsetNumber(pages[0]);
@@ -678,12 +686,12 @@ gistContinueInsert(gistIncompleteInsert *insert)
}
}
- if ( ntodelete == 0 )
- elog(PANIC,"gistContinueInsert: can't find pointer to page(s)");
+ if (ntodelete == 0)
+ elog(PANIC, "gistContinueInsert: can't find pointer to page(s)");
/*
- * we check space with subtraction only first tuple to delete, hope,
- * that wiil be enough space....
+ * we check space with subtraction only first tuple to delete,
+ * hope, that wiil be enough space....
*/
if (gistnospace(pages[0], itup, lenitup, *todelete, 0))
@@ -699,7 +707,7 @@ gistContinueInsert(gistIncompleteInsert *insert)
if (BufferGetBlockNumber(buffers[0]) == GIST_ROOT_BLKNO)
{
- Buffer tmp;
+ Buffer tmp;
/*
* we split root, just copy content from root to new page
@@ -713,44 +721,48 @@ gistContinueInsert(gistIncompleteInsert *insert)
/* fill new page, root will be changed later */
tempbuffer = ReadBuffer(index, P_NEW);
LockBuffer(tempbuffer, GIST_EXCLUSIVE);
- memcpy( BufferGetPage(tempbuffer), pages[0], BufferGetPageSize(tempbuffer) );
+ memcpy(BufferGetPage(tempbuffer), pages[0], BufferGetPageSize(tempbuffer));
/* swap buffers[0] (was root) and temp buffer */
tmp = buffers[0];
buffers[0] = tempbuffer;
- tempbuffer = tmp; /* now in tempbuffer GIST_ROOT_BLKNO, it is still unchanged */
+ tempbuffer = tmp; /* now in tempbuffer GIST_ROOT_BLKNO,
+ * it is still unchanged */
pages[0] = BufferGetPage(buffers[0]);
}
START_CRIT_SECTION();
- for(j=0;j<ntodelete;j++)
+ for (j = 0; j < ntodelete; j++)
PageIndexTupleDelete(pages[0], todelete[j]);
rdata = formSplitRdata(index->rd_node, insert->path[i],
- false, &(insert->key),
- gistMakePageLayout( buffers, numbuffer ) );
+ false, &(insert->key),
+ gistMakePageLayout(buffers, numbuffer));
- } else {
+ }
+ else
+ {
START_CRIT_SECTION();
- for(j=0;j<ntodelete;j++)
+ for (j = 0; j < ntodelete; j++)
PageIndexTupleDelete(pages[0], todelete[j]);
gistfillbuffer(index, pages[0], itup, lenitup, InvalidOffsetNumber);
- rdata = formUpdateRdata(index->rd_node, buffers[0],
- todelete, ntodelete,
- itup, lenitup, &(insert->key));
+ rdata = formUpdateRdata(index->rd_node, buffers[0],
+ todelete, ntodelete,
+ itup, lenitup, &(insert->key));
}
- /*
- * use insert->key as mark for completion of insert (form*Rdata() above)
- * for following possible replays
+ /*
+ * use insert->key as mark for completion of insert (form*Rdata()
+ * above) for following possible replays
*/
/* write pages, we should mark it dirty befor XLogInsert() */
- for (j = 0; j < numbuffer; j++) {
+ for (j = 0; j < numbuffer; j++)
+ {
GistPageGetOpaque(pages[j])->rightlink = InvalidBlockNumber;
MarkBufferDirty(buffers[j]);
}
@@ -764,12 +776,14 @@ gistContinueInsert(gistIncompleteInsert *insert)
END_CRIT_SECTION();
lenitup = numbuffer;
- for (j = 0; j < numbuffer; j++) {
+ for (j = 0; j < numbuffer; j++)
+ {
itup[j] = gist_form_invalid_tuple(BufferGetBlockNumber(buffers[j]));
UnlockReleaseBuffer(buffers[j]);
}
- if ( tempbuffer != InvalidBuffer ) {
+ if (tempbuffer != InvalidBuffer)
+ {
/*
* it was a root split, so fill it by new values
*/
@@ -780,9 +794,9 @@ gistContinueInsert(gistIncompleteInsert *insert)
}
ereport(LOG,
- (errmsg("index %u/%u/%u needs VACUUM FULL or REINDEX to finish crash recovery",
+ (errmsg("index %u/%u/%u needs VACUUM FULL or REINDEX to finish crash recovery",
insert->node.spcNode, insert->node.dbNode, insert->node.relNode),
- errdetail("Incomplete insertion detected during crash replay.")));
+ errdetail("Incomplete insertion detected during crash replay.")));
}
void
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 3b05d11ee1..1e2d779a14 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.47 2006/03/05 15:58:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.48 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -138,9 +138,9 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena, but
- * keep it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * Note: this is currently identical in behavior to hashvarlena, but keep
+ * it as a separate function in case we someday want to do something
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
VARSIZE(key) - VARHDRSZ);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index f5a1fcfd81..696d4bf616 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.59 2006/07/03 22:45:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.60 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -224,7 +224,7 @@ _hash_metapinit(Relation rel)
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly if the index datatype is fixed-width, but for var-width there's
* some guessing involved.
*/
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 759f0b1f13..57acaf2bb8 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.219 2006/08/18 16:09:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.220 2006/10/04 00:29:48 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -133,9 +133,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
snapshot = scan->rs_snapshot;
/*
- * We must hold share lock on the buffer content while examining
- * tuple visibility. Afterwards, however, the tuples we have found
- * to be visible are guaranteed good as long as we hold the buffer pin.
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -223,7 +223,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = 0; /* first page */
+ page = 0; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
@@ -231,8 +231,8 @@ heapgettup(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
- lineoff = /* next offnum */
+ page = scan->rs_cblock; /* current page */
+ lineoff = /* next offnum */
OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
@@ -263,7 +263,7 @@ heapgettup(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
@@ -273,12 +273,12 @@ heapgettup(HeapScanDesc scan,
if (!scan->rs_inited)
{
- lineoff = lines; /* final offnum */
+ lineoff = lines; /* final offnum */
scan->rs_inited = true;
}
else
{
- lineoff = /* previous offnum */
+ lineoff = /* previous offnum */
OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
/* page and lineoff now reference the physically previous tid */
@@ -450,7 +450,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
- page = 0; /* first page */
+ page = 0; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
@@ -458,7 +458,7 @@ heapgettup_pagemode(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
lineindex = scan->rs_cindex + 1;
}
@@ -487,7 +487,7 @@ heapgettup_pagemode(HeapScanDesc scan,
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
}
dp = (Page) BufferGetPage(scan->rs_cbuf);
@@ -721,8 +721,8 @@ try_relation_open(Oid relationId, LOCKMODE lockmode)
LockRelationOid(relationId, lockmode);
/*
- * Now that we have the lock, probe to see if the relation really
- * exists or not.
+ * Now that we have the lock, probe to see if the relation really exists
+ * or not.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relationId),
@@ -764,7 +764,7 @@ relation_open_nowait(Oid relationId, LOCKMODE lockmode)
if (!ConditionalLockRelationOid(relationId, lockmode))
{
/* try to throw error by name; relation could be deleted... */
- char *relname = get_rel_name(relationId);
+ char *relname = get_rel_name(relationId);
if (relname)
ereport(ERROR,
@@ -774,8 +774,8 @@ relation_open_nowait(Oid relationId, LOCKMODE lockmode)
else
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on relation with OID %u",
- relationId)));
+ errmsg("could not obtain lock on relation with OID %u",
+ relationId)));
}
}
@@ -801,8 +801,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
/*
* Check for shared-cache-inval messages before trying to open the
- * relation. This is needed to cover the case where the name identifies
- * a rel that has been dropped and recreated since the start of our
+ * relation. This is needed to cover the case where the name identifies a
+ * rel that has been dropped and recreated since the start of our
* transaction: if we don't flush the old syscache entry then we'll latch
* onto that entry and suffer an error when we do RelationIdGetRelation.
* Note that relation_open does not need to do this, since a relation's
@@ -2723,7 +2723,7 @@ l3:
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
@@ -2840,6 +2840,7 @@ heap_restrpos(HeapScanDesc scan)
if (!ItemPointerIsValid(&scan->rs_mctid))
{
scan->rs_ctup.t_data = NULL;
+
/*
* unpin scan buffers
*/
@@ -2852,7 +2853,7 @@ heap_restrpos(HeapScanDesc scan)
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
@@ -2862,13 +2863,13 @@ heap_restrpos(HeapScanDesc scan)
scan->rs_cindex = scan->rs_mindex;
heapgettup_pagemode(scan,
NoMovementScanDirection,
- 0, /* needn't recheck scan keys */
+ 0, /* needn't recheck scan keys */
NULL);
}
else
heapgettup(scan,
NoMovementScanDirection,
- 0, /* needn't recheck scan keys */
+ 0, /* needn't recheck scan keys */
NULL);
}
}
@@ -2920,7 +2921,7 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
@@ -3173,8 +3174,8 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- true);
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ true);
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
@@ -3183,13 +3184,13 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
else
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- false);
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -3308,6 +3309,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move)
/* Set forward chain link in t_ctid */
htup->t_ctid = xlrec->newtid;
}
+
/*
* this test is ugly, but necessary to avoid thinking that insert change
* is already applied
@@ -3345,7 +3347,7 @@ newt:;
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -3548,9 +3550,9 @@ static void
out_target(StringInfo buf, xl_heaptid *target)
{
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
void
@@ -3586,8 +3588,8 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "update: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_MOVE)
{
@@ -3599,24 +3601,24 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "move: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_CLEAN)
{
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->block);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->block);
}
else if (info == XLOG_HEAP_NEWPAGE)
{
xl_heap_newpage *xlrec = (xl_heap_newpage *) rec;
appendStringInfo(buf, "newpage: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->blkno);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->blkno);
}
else if (info == XLOG_HEAP_LOCK)
{
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 31386de167..07c0a52990 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.64 2006/09/10 23:33:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.65 2006/10/04 00:29:48 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1331,7 +1331,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length)
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
if (length == 0)
- return result; /* Can save a lot of work at this point! */
+ return result; /* Can save a lot of work at this point! */
startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 347d2b5365..26758cc197 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.58 2006/07/31 20:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.59 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -86,7 +86,7 @@ RelationGetIndexScan(Relation indexRelation,
else
scan->keyData = NULL;
- scan->is_multiscan = false; /* caller may change this */
+ scan->is_multiscan = false; /* caller may change this */
scan->kill_prior_tuple = false;
scan->ignore_killed_tuples = true; /* default setting */
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 2663876f49..493e9f0ad0 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.94 2006/07/31 20:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.95 2006/10/04 00:29:48 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -122,7 +122,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation,
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
@@ -209,7 +209,7 @@ index_insert(Relation indexRelation,
* index_getnext on this scan; index_getnext_indexitem will not use the
* heapRelation link (nor the snapshot). However, the caller had better
* be holding some kind of lock on the heap relation in any case, to ensure
- * no one deletes it (or the index) out from under us. Caller must also
+ * no one deletes it (or the index) out from under us. Caller must also
* be holding a lock on the index.
*/
IndexScanDesc
@@ -553,7 +553,7 @@ index_getmulti(IndexScanDesc scan,
*
* callback routine tells whether a given main-heap tuple is
* to be deleted
- *
+ *
* return value is an optional palloc'd struct of statistics
* ----------------
*/
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 910d654443..d74f1a336a 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.143 2006/08/25 04:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.144 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -252,7 +252,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
*/
htup.t_self = itup->t_tid;
if (heap_fetch(heapRel, SnapshotSelf, &htup, &hbuffer,
- false, NULL))
+ false, NULL))
{
/* Normal case --- it's still live */
ReleaseBuffer(hbuffer);
@@ -355,7 +355,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* If 'afteritem' is >0 then the new tuple must be inserted after the
@@ -608,7 +608,7 @@ _bt_insertonpg(Relation rel,
if (!rel->rd_istemp)
{
xl_btree_insert xlrec;
- BlockNumber xldownlink;
+ BlockNumber xldownlink;
xl_btree_metadata xlmeta;
uint8 xlinfo;
XLogRecPtr recptr;
@@ -888,16 +888,17 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
if (sopaque->btpo_prev != ropaque->btpo_prev)
elog(PANIC, "right sibling's left-link doesn't match");
+
/*
* Check to see if we can set the SPLIT_END flag in the right-hand
* split page; this can save some I/O for vacuum since it need not
* proceed to the right sibling. We can set the flag if the right
- * sibling has a different cycleid: that means it could not be part
- * of a group of pages that were all split off from the same ancestor
+ * sibling has a different cycleid: that means it could not be part of
+ * a group of pages that were all split off from the same ancestor
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
@@ -911,8 +912,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* NO EREPORT(ERROR) till right sibling is updated. We can get away with
* not starting the critical section till here because we haven't been
- * scribbling on the original page yet, and we don't care about the
- * new sibling until it's linked into the btree.
+ * scribbling on the original page yet, and we don't care about the new
+ * sibling until it's linked into the btree.
*/
START_CRIT_SECTION();
@@ -947,8 +948,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
* themselves, knowing that the item pointers are in the same order
- * and can be reconstructed by scanning the tuples. See comments
- * for _bt_restore_page().
+ * and can be reconstructed by scanning the tuples. See comments for
+ * _bt_restore_page().
*/
xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader) leftpage)->pd_upper;
@@ -1708,17 +1709,17 @@ _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum,
static void
_bt_vacuum_one_page(Relation rel, Buffer buffer)
{
- OffsetNumber deletable[MaxOffsetNumber];
- int ndeletable = 0;
- OffsetNumber offnum,
- minoff,
- maxoff;
- Page page = BufferGetPage(buffer);
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ OffsetNumber deletable[MaxOffsetNumber];
+ int ndeletable = 0;
+ OffsetNumber offnum,
+ minoff,
+ maxoff;
+ Page page = BufferGetPage(buffer);
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Scan over all items to see which ones need deleted
- * according to LP_DELETE flags.
+ * Scan over all items to see which ones need deleted according to
+ * LP_DELETE flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
@@ -1726,7 +1727,7 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemId = PageGetItemId(page, offnum);
+ ItemId itemId = PageGetItemId(page, offnum);
if (ItemIdDeleted(itemId))
deletable[ndeletable++] = offnum;
@@ -1734,10 +1735,11 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
if (ndeletable > 0)
_bt_delitems(rel, buffer, deletable, ndeletable);
+
/*
* Note: if we didn't find any LP_DELETE items, then the page's
- * BTP_HAS_GARBAGE hint bit is falsely set. We do not bother
- * expending a separate write to clear it, however. We will clear
- * it when we split the page.
+ * BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
+ * separate write to clear it, however. We will clear it when we split
+ * the page.
*/
}
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 080e10c88c..def14adf1f 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.99 2006/07/25 19:13:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.100 2006/10/04 00:29:49 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -124,10 +124,10 @@ _bt_getroot(Relation rel, int access)
/*
* Since the cache might be stale, we check the page more carefully
- * here than normal. We *must* check that it's not deleted.
- * If it's not alone on its level, then we reject too --- this
- * may be overly paranoid but better safe than sorry. Note we
- * don't check P_ISROOT, because that's not set in a "fast root".
+ * here than normal. We *must* check that it's not deleted. If it's
+ * not alone on its level, then we reject too --- this may be overly
+ * paranoid but better safe than sorry. Note we don't check P_ISROOT,
+ * because that's not set in a "fast root".
*/
if (!P_IGNORE(rootopaque) &&
rootopaque->btpo.level == rootlevel &&
@@ -662,18 +662,18 @@ _bt_delitems(Relation rel, Buffer buf,
PageIndexMultiDelete(page, itemnos, nitems);
/*
- * We can clear the vacuum cycle ID since this page has certainly
- * been processed by the current vacuum scan.
+ * We can clear the vacuum cycle ID since this page has certainly been
+ * processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DELETE items. This is not
- * certainly true (there might be some that have recently been marked,
- * but weren't included in our target-item list), but it will almost
- * always be true and it doesn't seem worth an additional page scan
- * to check it. Remember that BTP_HAS_GARBAGE is only a hint anyway.
+ * certainly true (there might be some that have recently been marked, but
+ * weren't included in our target-item list), but it will almost always be
+ * true and it doesn't seem worth an additional page scan to check it.
+ * Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index fa5b162c90..c9a7a8b5b2 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.151 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.152 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ typedef struct
BlockNumber *freePages;
int nFreePages; /* number of entries in freePages[] */
int maxFreePages; /* allocated size of freePages[] */
- BlockNumber totFreePages; /* true total # of free pages */
+ BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
} BTVacState;
@@ -70,7 +70,7 @@ static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state,
BTCycleId cycleid);
static void btvacuumpage(BTVacState *vstate, BlockNumber blkno,
- BlockNumber orig_blkno);
+ BlockNumber orig_blkno);
/*
@@ -109,8 +109,8 @@ btbuild(PG_FUNCTION_ARGS)
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false);
/*
- * If building a unique index, put dead tuples in a second spool to
- * keep them out of the uniqueness check.
+ * If building a unique index, put dead tuples in a second spool to keep
+ * them out of the uniqueness check.
*/
if (indexInfo->ii_Unique)
buildstate.spool2 = _bt_spoolinit(index, false, true);
@@ -146,11 +146,11 @@ btbuild(PG_FUNCTION_ARGS)
#endif /* BTREE_BUILD_STATS */
/*
- * If we are reindexing a pre-existing index, it is critical to send out
- * a relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
- * (typically as a side effect of updating index stats, but it must
- * happen even if the stats don't change!)
+ * If we are reindexing a pre-existing index, it is critical to send out a
+ * relcache invalidation SI message to ensure all backends re-read the
+ * index metapage. We expect that the caller will ensure that happens
+ * (typically as a side effect of updating index stats, but it must happen
+ * even if the stats don't change!)
*/
/*
@@ -252,11 +252,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (scan->kill_prior_tuple)
{
/*
- * Yes, remember it for later. (We'll deal with all such tuples
+ * Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
- * multiple times. It's not worth trying to optimize that, so we
+ * multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
@@ -316,8 +316,8 @@ btgetmulti(PG_FUNCTION_ARGS)
while (ntids < max_tids)
{
/*
- * Advance to next tuple within page. This is the same as the
- * easy case in _bt_next().
+ * Advance to next tuple within page. This is the same as the easy
+ * case in _bt_next().
*/
if (++so->currPos.itemIndex > so->currPos.lastItem)
{
@@ -373,7 +373,7 @@ btrescan(PG_FUNCTION_ARGS)
so->keyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData));
else
so->keyData = NULL;
- so->killedItems = NULL; /* until needed */
+ so->killedItems = NULL; /* until needed */
so->numKilled = 0;
scan->opaque = so;
}
@@ -461,9 +461,9 @@ btmarkpos(PG_FUNCTION_ARGS)
/*
* Just record the current itemIndex. If we later step to next page
- * before releasing the marked position, _bt_steppage makes a full copy
- * of the currPos struct in markPos. If (as often happens) the mark is
- * moved before we leave the page, we don't have to do that work.
+ * before releasing the marked position, _bt_steppage makes a full copy of
+ * the currPos struct in markPos. If (as often happens) the mark is moved
+ * before we leave the page, we don't have to do that work.
*/
if (BTScanPosIsValid(so->currPos))
so->markItemIndex = so->currPos.itemIndex;
@@ -485,11 +485,11 @@ btrestrpos(PG_FUNCTION_ARGS)
if (so->markItemIndex >= 0)
{
/*
- * The mark position is on the same page we are currently on.
- * Just restore the itemIndex.
+ * The mark position is on the same page we are currently on. Just
+ * restore the itemIndex.
*/
so->currPos.itemIndex = so->markItemIndex;
- }
+ }
else
{
/* we aren't holding any read locks, but gotta drop the pin */
@@ -527,7 +527,7 @@ Datum
btbulkdelete(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
- IndexBulkDeleteResult * volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ IndexBulkDeleteResult *volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation rel = info->index;
@@ -569,10 +569,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/*
- * If btbulkdelete was called, we need not do anything, just return
- * the stats from the latest btbulkdelete call. If it wasn't called,
- * we must still do a pass over the index, to recycle any newly-recyclable
- * pages and to obtain index statistics.
+ * If btbulkdelete was called, we need not do anything, just return the
+ * stats from the latest btbulkdelete call. If it wasn't called, we must
+ * still do a pass over the index, to recycle any newly-recyclable pages
+ * and to obtain index statistics.
*
* Since we aren't going to actually delete any leaf items, there's no
* need to go through all the vacuum-cycle-ID pushups.
@@ -586,8 +586,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During a non-FULL vacuum it's quite possible for us to be fooled by
* concurrent page splits into double-counting some index tuples, so
- * disbelieve any total that exceeds the underlying heap's count.
- * (We can't check this during btbulkdelete.)
+ * disbelieve any total that exceeds the underlying heap's count. (We
+ * can't check this during btbulkdelete.)
*/
if (!info->vacuum_full)
{
@@ -622,8 +622,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
bool needLock;
/*
- * Reset counts that will be incremented during the scan; needed in
- * case of multiple scans during a single VACUUM command
+ * Reset counts that will be incremented during the scan; needed in case
+ * of multiple scans during a single VACUUM command
*/
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
@@ -647,24 +647,24 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * The outer loop iterates over all index pages except the metapage,
- * in physical order (we hope the kernel will cooperate in providing
+ * The outer loop iterates over all index pages except the metapage, in
+ * physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. Hence, we must repeatedly check the
* relation length. We must acquire the relation-extension lock while
* doing so to avoid a race condition: if someone else is extending the
* relation, there is a window where bufmgr/smgr have created a new
- * all-zero page but it hasn't yet been write-locked by _bt_getbuf().
- * If we manage to scan such a page here, we'll improperly assume it can
- * be recycled. Taking the lock synchronizes things enough to prevent a
+ * all-zero page but it hasn't yet been write-locked by _bt_getbuf(). If
+ * we manage to scan such a page here, we'll improperly assume it can be
+ * recycled. Taking the lock synchronizes things enough to prevent a
* problem: either num_pages won't include the new page, or _bt_getbuf
* already has write lock on the buffer and it will be fully initialized
* before we can examine it. (See also vacuumlazy.c, which has the same
- * issue.) Also, we need not worry if a page is added immediately after
+ * issue.) Also, we need not worry if a page is added immediately after
* we look; the page splitting code already has write-lock on the left
- * page before it adds a right page, so we must already have processed
- * any tuples due to be moved into such a page.
+ * page before it adds a right page, so we must already have processed any
+ * tuples due to be moved into such a page.
*
* We can skip locking for new or temp relations, however, since no one
* else could be accessing them.
@@ -771,7 +771,7 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool delete_now;
- BlockNumber recurse_to;
+ BlockNumber recurse_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
@@ -796,10 +796,10 @@ restart:
_bt_checkpage(rel, buf);
/*
- * If we are recursing, the only case we want to do anything with is
- * a live leaf page having the current vacuum cycle ID. Any other state
- * implies we already saw the page (eg, deleted it as being empty).
- * In particular, we don't want to risk adding it to freePages twice.
+ * If we are recursing, the only case we want to do anything with is a
+ * live leaf page having the current vacuum cycle ID. Any other state
+ * implies we already saw the page (eg, deleted it as being empty). In
+ * particular, we don't want to risk adding it to freePages twice.
*/
if (blkno != orig_blkno)
{
@@ -838,25 +838,24 @@ restart:
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable;
OffsetNumber offnum,
- minoff,
- maxoff;
+ minoff,
+ maxoff;
/*
- * Trade in the initial read lock for a super-exclusive write
- * lock on this page. We must get such a lock on every leaf page
- * over the course of the vacuum scan, whether or not it actually
- * contains any deletable tuples --- see nbtree/README.
+ * Trade in the initial read lock for a super-exclusive write lock on
+ * this page. We must get such a lock on every leaf page over the
+ * course of the vacuum scan, whether or not it actually contains any
+ * deletable tuples --- see nbtree/README.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/*
- * Check whether we need to recurse back to earlier pages. What
- * we are concerned about is a page split that happened since we
- * started the vacuum scan. If the split moved some tuples to a
- * lower page then we might have missed 'em. If so, set up for
- * tail recursion. (Must do this before possibly clearing
- * btpo_cycleid below!)
+ * Check whether we need to recurse back to earlier pages. What we
+ * are concerned about is a page split that happened since we started
+ * the vacuum scan. If the split moved some tuples to a lower page
+ * then we might have missed 'em. If so, set up for tail recursion.
+ * (Must do this before possibly clearing btpo_cycleid below!)
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid &&
@@ -866,8 +865,8 @@ restart:
recurse_to = opaque->btpo_next;
/*
- * Scan over all items to see which ones need deleted
- * according to the callback function.
+ * Scan over all items to see which ones need deleted according to the
+ * callback function.
*/
ndeletable = 0;
minoff = P_FIRSTDATAKEY(opaque);
@@ -890,8 +889,8 @@ restart:
}
/*
- * Apply any needed deletes. We issue just one _bt_delitems()
- * call per page, so as to minimize WAL traffic.
+ * Apply any needed deletes. We issue just one _bt_delitems() call
+ * per page, so as to minimize WAL traffic.
*/
if (ndeletable > 0)
{
@@ -908,8 +907,8 @@ restart:
* have any deletions to do. (If we do, _bt_delitems takes care
* of this.) This ensures we won't process the page again.
*
- * We treat this like a hint-bit update because there's no need
- * to WAL-log it.
+ * We treat this like a hint-bit update because there's no need to
+ * WAL-log it.
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid)
@@ -920,10 +919,10 @@ restart:
}
/*
- * If it's now empty, try to delete; else count the live tuples.
- * We don't delete when recursing, though, to avoid putting entries
- * into freePages out-of-order (doesn't seem worth any extra code to
- * handle the case).
+ * If it's now empty, try to delete; else count the live tuples. We
+ * don't delete when recursing, though, to avoid putting entries into
+ * freePages out-of-order (doesn't seem worth any extra code to handle
+ * the case).
*/
if (minoff > maxoff)
delete_now = (blkno == orig_blkno);
@@ -947,13 +946,12 @@ restart:
stats->pages_deleted++;
/*
- * During VACUUM FULL it's okay to recycle deleted pages
- * immediately, since there can be no other transactions scanning
- * the index. Note that we will only recycle the current page and
- * not any parent pages that _bt_pagedel might have recursed to;
- * this seems reasonable in the name of simplicity. (Trying to do
- * otherwise would mean we'd have to sort the list of recyclable
- * pages we're building.)
+ * During VACUUM FULL it's okay to recycle deleted pages immediately,
+ * since there can be no other transactions scanning the index. Note
+ * that we will only recycle the current page and not any parent pages
+ * that _bt_pagedel might have recursed to; this seems reasonable in
+ * the name of simplicity. (Trying to do otherwise would mean we'd
+ * have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)
{
@@ -969,11 +967,11 @@ restart:
_bt_relbuf(rel, buf);
/*
- * This is really tail recursion, but if the compiler is too stupid
- * to optimize it as such, we'd eat an uncomfortably large amount of
- * stack space per recursion level (due to the deletable[] array).
- * A failure is improbable since the number of levels isn't likely to be
- * large ... but just in case, let's hand-optimize into a loop.
+ * This is really tail recursion, but if the compiler is too stupid to
+ * optimize it as such, we'd eat an uncomfortably large amount of stack
+ * space per recursion level (due to the deletable[] array). A failure is
+ * improbable since the number of levels isn't likely to be large ... but
+ * just in case, let's hand-optimize into a loop.
*/
if (recurse_to != P_NONE)
{
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 07bc076e49..6d9be1b017 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.106 2006/08/24 01:18:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,7 +22,7 @@
static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
- OffsetNumber offnum);
+ OffsetNumber offnum);
static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
static Buffer _bt_walk_left(Relation rel, Buffer buf);
static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
@@ -417,7 +417,7 @@ _bt_compare(Relation rel,
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
@@ -604,7 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{
ScanKey cur = startKeys[i];
- Assert(cur->sk_attno == i+1);
+ Assert(cur->sk_attno == i + 1);
if (cur->sk_flags & SK_ROW_HEADER)
{
@@ -612,16 +612,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* Row comparison header: look to the first row member instead.
*
* The member scankeys are already in insertion format (ie, they
- * have sk_func = 3-way-comparison function), but we have to
- * watch out for nulls, which _bt_preprocess_keys didn't check.
- * A null in the first row member makes the condition unmatchable,
- * just like qual_ok = false.
+ * have sk_func = 3-way-comparison function), but we have to watch
+ * out for nulls, which _bt_preprocess_keys didn't check. A null
+ * in the first row member makes the condition unmatchable, just
+ * like qual_ok = false.
*/
cur = (ScanKey) DatumGetPointer(cur->sk_argument);
Assert(cur->sk_flags & SK_ROW_MEMBER);
if (cur->sk_flags & SK_ISNULL)
return false;
memcpy(scankeys + i, cur, sizeof(ScanKeyData));
+
/*
* If the row comparison is the last positioning key we accepted,
* try to add additional keys from the lower-order row members.
@@ -833,10 +834,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed.
- * (If this results in an offset before the first item or after the last
- * one, _bt_readpage will report no items found, and then we'll step to
- * the next page as needed.)
+ * the last item on this page. Adjust the starting offset if needed. (If
+ * this results in an offset before the first item or after the last one,
+ * _bt_readpage will report no items found, and then we'll step to the
+ * next page as needed.)
*/
if (goback)
offnum = OffsetNumberPrev(offnum);
@@ -882,8 +883,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
/*
- * Advance to next tuple on current page; or if there's no more,
- * try to step to the next page with data.
+ * Advance to next tuple on current page; or if there's no more, try to
+ * step to the next page with data.
*/
if (ScanDirectionIsForward(dir))
{
@@ -954,8 +955,8 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
/*
* we must save the page's right-link while scanning it; this tells us
- * where to step right to after we're done with these items. There is
- * no corresponding need for the left-link, since splits always go right.
+ * where to step right to after we're done with these items. There is no
+ * corresponding need for the left-link, since splits always go right.
*/
so->currPos.nextPage = opaque->btpo_next;
@@ -1055,8 +1056,8 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
_bt_killitems(scan, true);
/*
- * Before we modify currPos, make a copy of the page data if there
- * was a mark position that needs it.
+ * Before we modify currPos, make a copy of the page data if there was a
+ * mark position that needs it.
*/
if (so->markItemIndex >= 0)
{
@@ -1112,11 +1113,11 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
so->currPos.moreRight = true;
/*
- * Walk left to the next page with data. This is much more
- * complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to it,
- * plus the possibility that the page we were on gets deleted
- * after we leave it. See nbtree/README for details.
+ * Walk left to the next page with data. This is much more complex
+ * than the walk-right case because of the possibility that the page
+ * to our left splits while we are in flight to it, plus the
+ * possibility that the page we were on gets deleted after we leave
+ * it. See nbtree/README for details.
*/
for (;;)
{
@@ -1136,9 +1137,9 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
return false;
/*
- * Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and contains matching tuples.
- * Else loop back and do it all again.
+ * Okay, we managed to move left to a non-deleted page. Done if
+ * it's not half-dead and contains matching tuples. Else loop back
+ * and do it all again.
*/
page = BufferGetPage(so->currPos.buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index da2b191d5c..4951dca218 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -57,7 +57,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.106 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ static void _bt_slideleft(Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
IndexTuple itup, OffsetNumber itup_off);
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state,
- IndexTuple itup);
+ IndexTuple itup);
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
static void _bt_load(BTWriteState *wstate,
BTSpool *btspool, BTSpool *btspool2);
@@ -351,7 +351,7 @@ _bt_pagestate(BTWriteState *wstate, uint32 level)
state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100);
else
state->btps_full = RelationGetTargetPageFreeSpace(wstate->index,
- BTREE_DEFAULT_FILLFACTOR);
+ BTREE_DEFAULT_FILLFACTOR);
/* no parent level, yet */
state->btps_next = NULL;
@@ -464,8 +464,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
Size itupsz;
/*
- * This is a handy place to check for cancel interrupts during the
- * btree load phase of index creation.
+ * This is a handy place to check for cancel interrupts during the btree
+ * load phase of index creation.
*/
CHECK_FOR_INTERRUPTS();
@@ -499,10 +499,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
"or use full text indexing.")));
/*
- * Check to see if page is "full". It's definitely full if the item
- * won't fit. Otherwise, compare to the target freespace derived from
- * the fillfactor. However, we must put at least two items on each
- * page, so disregard fillfactor if we don't have that many.
+ * Check to see if page is "full". It's definitely full if the item won't
+ * fit. Otherwise, compare to the target freespace derived from the
+ * fillfactor. However, we must put at least two items on each page, so
+ * disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 8b68054112..a562ee6cbd 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.78 2006/07/25 19:13:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.79 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,8 +28,8 @@
static void _bt_mark_scankey_required(ScanKey skey);
static bool _bt_check_rowcompare(ScanKey skey,
- IndexTuple tuple, TupleDesc tupdesc,
- ScanDirection dir, bool *continuescan);
+ IndexTuple tuple, TupleDesc tupdesc,
+ ScanDirection dir, bool *continuescan);
/*
@@ -83,7 +83,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
@@ -388,7 +388,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
@@ -461,7 +461,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns.
@@ -472,12 +472,12 @@ _bt_preprocess_keys(IndexScanDesc scan)
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
{
- int addflags;
+ int addflags;
switch (skey->sk_strategy)
{
@@ -503,8 +503,8 @@ _bt_mark_scankey_required(ScanKey skey)
if (skey->sk_flags & SK_ROW_HEADER)
{
- ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
- AttrNumber attno = skey->sk_attno;
+ ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
+ AttrNumber attno = skey->sk_attno;
/* First subkey should be same as the header says */
Assert(subkey->sk_attno == attno);
@@ -558,12 +558,12 @@ _bt_checkkeys(IndexScanDesc scan,
*continuescan = true; /* default assumption */
/*
- * If the scan specifies not to return killed tuples, then we treat
- * a killed tuple as not passing the qual. Most of the time, it's a
- * win to not bother examining the tuple's index keys, but just return
+ * If the scan specifies not to return killed tuples, then we treat a
+ * killed tuple as not passing the qual. Most of the time, it's a win to
+ * not bother examining the tuple's index keys, but just return
* immediately with continuescan = true to proceed to the next tuple.
- * However, if this is the last tuple on the page, we should check
- * the index keys to prevent uselessly advancing to the next page.
+ * However, if this is the last tuple on the page, we should check the
+ * index keys to prevent uselessly advancing to the next page.
*/
if (scan->ignore_killed_tuples && ItemIdDeleted(iid))
{
@@ -580,9 +580,10 @@ _bt_checkkeys(IndexScanDesc scan,
if (offnum > P_FIRSTDATAKEY(opaque))
return false;
}
+
/*
- * OK, we want to check the keys, but we'll return FALSE even
- * if the tuple passes the key tests.
+ * OK, we want to check the keys, but we'll return FALSE even if the
+ * tuple passes the key tests.
*/
tuple_valid = false;
}
@@ -734,10 +735,9 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
- * columns are required for the scan direction, we can stop
- * the scan, because there can't be another tuple that will
- * succeed.
+ * But it can never match. If all the earlier row comparison
+ * columns are required for the scan direction, we can stop the
+ * scan, because there can't be another tuple that will succeed.
*/
if (subkey != (ScanKey) DatumGetPointer(skey->sk_argument))
subkey--;
@@ -771,7 +771,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
*/
switch (subkey->sk_strategy)
{
- /* EQ and NE cases aren't allowed here */
+ /* EQ and NE cases aren't allowed here */
case BTLessStrategyNumber:
result = (cmpresult < 0);
break;
@@ -795,8 +795,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
{
/*
* Tuple fails this qual. If it's a required qual for the current
- * scan direction, then we can conclude no further tuples will
- * pass, either. Note we have to look at the deciding column, not
+ * scan direction, then we can conclude no further tuples will pass,
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@@ -822,7 +822,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* is sufficient for setting LP_DELETE hint bits.
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
@@ -856,9 +856,9 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
for (i = 0; i < so->numKilled; i++)
{
- int itemIndex = so->killedItems[i];
- BTScanPosItem *kitem = &so->currPos.items[itemIndex];
- OffsetNumber offnum = kitem->indexOffset;
+ int itemIndex = so->killedItems[i];
+ BTScanPosItem *kitem = &so->currPos.items[itemIndex];
+ OffsetNumber offnum = kitem->indexOffset;
Assert(itemIndex >= so->currPos.firstItem &&
itemIndex <= so->currPos.lastItem);
@@ -881,9 +881,9 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
}
/*
- * Since this can be redone later if needed, it's treated the same
- * as a commit-hint-bit status update for heap tuples: we mark the
- * buffer dirty but don't make a WAL log entry.
+ * Since this can be redone later if needed, it's treated the same as a
+ * commit-hint-bit status update for heap tuples: we mark the buffer dirty
+ * but don't make a WAL log entry.
*
* Whenever we mark anything LP_DELETEd, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint.
@@ -898,8 +898,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
/*
- * Always reset the scan state, so we don't look for same items
- * on other pages.
+ * Always reset the scan state, so we don't look for same items on other
+ * pages.
*/
so->numKilled = 0;
}
@@ -908,8 +908,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
@@ -987,7 +987,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/* Assign the next cycle ID, being careful to avoid zero */
- do {
+ do
+ {
result = ++(btvacinfo->cycle_ctr);
} while (result == 0);
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 88962ccfce..e22ce04bbc 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.37 2006/08/07 16:57:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.38 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,7 +82,7 @@ forget_matching_split(RelFileNode node, BlockNumber downlink, bool is_root)
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles?
+ * eye on. Is it worth changing just on general principles?
*/
static void
_bt_restore_page(Page page, char *from, int len)
@@ -155,7 +155,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
char *datapos;
int datalen;
xl_btree_metadata md;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
datapos = (char *) xlrec + SizeOfBtreeInsert;
datalen = record->xl_len - SizeOfBtreeInsert;
@@ -180,7 +180,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (BufferIsValid(buffer))
{
@@ -193,7 +193,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
else
{
if (PageAddItem(page, (Item) datapos, datalen,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
@@ -225,7 +225,7 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber targetoff;
BlockNumber leftsib;
BlockNumber rightsib;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -376,8 +376,8 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Mark the page as not containing any LP_DELETE items --- see comments
- * in _bt_delitems().
+ * Mark the page as not containing any LP_DELETE items --- see comments in
+ * _bt_delitems().
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
@@ -543,7 +543,7 @@ btree_xlog_newroot(XLogRecPtr lsn, XLogRecord *record)
Buffer buffer;
Page page;
BTPageOpaque pageop;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
reln = XLogOpenRelation(xlrec->node);
buffer = XLogReadBuffer(reln, xlrec->rootblk, true);
@@ -637,9 +637,9 @@ static void
out_target(StringInfo buf, xl_btreetid *target)
{
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
void
@@ -680,7 +680,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_l: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R:
@@ -690,7 +690,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_r: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_L_ROOT:
@@ -700,7 +700,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_l_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R_ROOT:
@@ -710,7 +710,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "split_r_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_DELETE:
@@ -718,8 +718,8 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
appendStringInfo(buf, "delete: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->block);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->block);
break;
}
case XLOG_BTREE_DELETE_PAGE:
@@ -730,7 +730,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "delete_page: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; dead %u; left %u; right %u",
- xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
+ xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_NEWROOT:
@@ -738,9 +738,9 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
appendStringInfo(buf, "newroot: rel %u/%u/%u; root %u lev %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode,
- xlrec->rootblk, xlrec->level);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode,
+ xlrec->rootblk, xlrec->level);
break;
}
default:
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 0621d3bc60..f57bdefa3a 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.39 2006/07/13 16:49:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.40 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -414,7 +414,7 @@ clog_redo(XLogRecPtr lsn, XLogRecord *record)
void
clog_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == CLOG_ZEROPAGE)
{
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index ae5272286a..167d65fd2d 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.20 2006/07/20 00:46:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.21 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1493,10 +1493,10 @@ CheckPointMultiXact(void)
/*
* Truncate the SLRU files. This could be done at any time, but
- * checkpoint seems a reasonable place for it. There is one exception:
- * if we are called during xlog recovery, then shared->latest_page_number
- * isn't valid (because StartupMultiXact hasn't been called yet) and
- * so SimpleLruTruncate would get confused. It seems best not to risk
+ * checkpoint seems a reasonable place for it. There is one exception: if
+ * we are called during xlog recovery, then shared->latest_page_number
+ * isn't valid (because StartupMultiXact hasn't been called yet) and so
+ * SimpleLruTruncate would get confused. It seems best not to risk
* removing any data during recovery anyway, so don't truncate.
*/
if (!InRecovery)
@@ -1917,7 +1917,7 @@ multixact_desc(StringInfo buf, uint8 xl_info, char *rec)
int i;
appendStringInfo(buf, "create multixact %u offset %u:",
- xlrec->mid, xlrec->moff);
+ xlrec->mid, xlrec->moff);
for (i = 0; i < xlrec->nxids; i++)
appendStringInfo(buf, " %u", xlrec->xids[i]);
}
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 7c3884628b..08bb3598ac 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -41,7 +41,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.38 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.39 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,8 +92,8 @@
typedef struct SlruFlushData
{
- int num_files; /* # files actually open */
- int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
+ int num_files; /* # files actually open */
+ int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
} SlruFlushData;
@@ -113,7 +113,7 @@ typedef struct SlruFlushData
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
@@ -158,13 +158,13 @@ SimpleLruShmemSize(int nslots)
/* we assume nslots isn't so large as to risk overflow */
sz = MAXALIGN(sizeof(SlruSharedData));
- sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
+ sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
- sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
- sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
- sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
-
+ sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
+ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
+ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
+ sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
+
return BUFFERALIGN(sz) + BLCKSZ * nslots;
}
@@ -653,9 +653,9 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
* Easiest way to deal with that is to accept references to
* nonexistent files here and in SlruPhysicalReadPage.)
*
- * Note: it is possible for more than one backend to be executing
- * this code simultaneously for different pages of the same file.
- * Hence, don't use O_EXCL or O_TRUNC or anything like that.
+ * Note: it is possible for more than one backend to be executing this
+ * code simultaneously for different pages of the same file. Hence,
+ * don't use O_EXCL or O_TRUNC or anything like that.
*/
SlruFileName(ctl, path, segno);
fd = BasicOpenFile(path, O_RDWR | O_CREAT | PG_BINARY,
@@ -759,22 +759,22 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not seek in file \"%s\" to offset %u: %m.",
- path, offset)));
+ errdetail("Could not seek in file \"%s\" to offset %u: %m.",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not read from file \"%s\" at offset %u: %m.",
- path, offset)));
+ errdetail("Could not read from file \"%s\" at offset %u: %m.",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not write to file \"%s\" at offset %u: %m.",
- path, offset)));
+ errdetail("Could not write to file \"%s\" at offset %u: %m.",
+ path, offset)));
break;
case SLRU_FSYNC_FAILED:
ereport(ERROR,
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 6c37d097fc..bd0b9b131e 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.23 2006/10/03 21:21:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.24 2006/10/04 00:29:49 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -1250,8 +1250,8 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not remove twophase state file \"%s\": %m",
- path)));
+ errmsg("could not remove twophase state file \"%s\": %m",
+ path)));
}
/*
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 101f065489..04e9840cb5 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.74 2006/09/26 17:21:39 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.75 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,10 +133,10 @@ GetNewTransactionId(bool isSubXact)
{
/*
* Use volatile pointer to prevent code rearrangement; other backends
- * could be examining my subxids info concurrently, and we don't
- * want them to see an invalid intermediate state, such as
- * incrementing nxids before filling the array entry. Note we are
- * assuming that TransactionId and int fetch/store are atomic.
+ * could be examining my subxids info concurrently, and we don't want
+ * them to see an invalid intermediate state, such as incrementing
+ * nxids before filling the array entry. Note we are assuming that
+ * TransactionId and int fetch/store are atomic.
*/
volatile PGPROC *myproc = MyProc;
@@ -144,7 +144,7 @@ GetNewTransactionId(bool isSubXact)
myproc->xid = xid;
else
{
- int nxids = myproc->subxids.nxids;
+ int nxids = myproc->subxids.nxids;
if (nxids < PGPROC_MAX_CACHED_SUBXIDS)
{
@@ -196,7 +196,7 @@ SetTransactionIdLimit(TransactionId oldest_datminxid,
* The place where we actually get into deep trouble is halfway around
* from the oldest existing XID. (This calculation is probably off by one
* or two counts, because the special XIDs reduce the size of the loop a
- * little bit. But we throw in plenty of slop below, so it doesn't
+ * little bit. But we throw in plenty of slop below, so it doesn't
* matter.)
*/
xidWrapLimit = oldest_datminxid + (MaxTransactionId >> 1);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 6fda0ce1f9..8e1724989c 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.226 2006/08/27 19:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.227 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1376,12 +1376,12 @@ StartTransaction(void)
XactLockTableInsert(s->transactionId);
- PG_TRACE1 (transaction__start, s->transactionId);
+ PG_TRACE1(transaction__start, s->transactionId);
/*
- * set transaction_timestamp() (a/k/a now()). We want this to be the
- * same as the first command's statement_timestamp(), so don't do a
- * fresh GetCurrentTimestamp() call (which'd be expensive anyway).
+ * set transaction_timestamp() (a/k/a now()). We want this to be the same
+ * as the first command's statement_timestamp(), so don't do a fresh
+ * GetCurrentTimestamp() call (which'd be expensive anyway).
*/
xactStartTimestamp = stmtStartTimestamp;
@@ -1521,7 +1521,7 @@ CommitTransaction(void)
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->xid = InvalidTransactionId;
MyProc->xmin = InvalidTransactionId;
- MyProc->inVacuum = false; /* must be cleared with xid/xmin */
+ MyProc->inVacuum = false; /* must be cleared with xid/xmin */
/* Clear the subtransaction-XID cache too while holding the lock */
MyProc->subxids.nxids = 0;
@@ -1530,7 +1530,7 @@ CommitTransaction(void)
LWLockRelease(ProcArrayLock);
}
- PG_TRACE1 (transaction__commit, s->transactionId);
+ PG_TRACE1(transaction__commit, s->transactionId);
/*
* This is all post-commit cleanup. Note that if an error is raised here,
@@ -1921,7 +1921,7 @@ AbortTransaction(void)
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->xid = InvalidTransactionId;
MyProc->xmin = InvalidTransactionId;
- MyProc->inVacuum = false; /* must be cleared with xid/xmin */
+ MyProc->inVacuum = false; /* must be cleared with xid/xmin */
/* Clear the subtransaction-XID cache too while holding the lock */
MyProc->subxids.nxids = 0;
@@ -1930,7 +1930,7 @@ AbortTransaction(void)
LWLockRelease(ProcArrayLock);
}
- PG_TRACE1 (transaction__abort, s->transactionId);
+ PG_TRACE1(transaction__abort, s->transactionId);
/*
* Post-abort cleanup. See notes in CommitTransaction() concerning
@@ -4206,8 +4206,8 @@ xact_desc_commit(StringInfo buf, xl_xact_commit *xlrec)
int i;
appendStringInfo(buf, "%04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
if (xlrec->nrels > 0)
{
appendStringInfo(buf, "; rels:");
@@ -4216,7 +4216,7 @@ xact_desc_commit(StringInfo buf, xl_xact_commit *xlrec)
RelFileNode rnode = xlrec->xnodes[i];
appendStringInfo(buf, " %u/%u/%u",
- rnode.spcNode, rnode.dbNode, rnode.relNode);
+ rnode.spcNode, rnode.dbNode, rnode.relNode);
}
}
if (xlrec->nsubxacts > 0)
@@ -4237,8 +4237,8 @@ xact_desc_abort(StringInfo buf, xl_xact_abort *xlrec)
int i;
appendStringInfo(buf, "%04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
if (xlrec->nrels > 0)
{
appendStringInfo(buf, "; rels:");
@@ -4247,7 +4247,7 @@ xact_desc_abort(StringInfo buf, xl_xact_abort *xlrec)
RelFileNode rnode = xlrec->xnodes[i];
appendStringInfo(buf, " %u/%u/%u",
- rnode.spcNode, rnode.dbNode, rnode.relNode);
+ rnode.spcNode, rnode.dbNode, rnode.relNode);
}
}
if (xlrec->nsubxacts > 0)
@@ -4264,7 +4264,7 @@ xact_desc_abort(StringInfo buf, xl_xact_abort *xlrec)
void
xact_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 16fb6b5e5e..7b5780b66d 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.249 2006/08/21 16:16:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.250 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -349,9 +349,9 @@ typedef struct XLogCtlInsert
*/
typedef struct XLogCtlWrite
{
- XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
- int curridx; /* cache index of next block to write */
- time_t lastSegSwitchTime; /* time of last xlog segment switch */
+ XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
+ int curridx; /* cache index of next block to write */
+ time_t lastSegSwitchTime; /* time of last xlog segment switch */
} XLogCtlWrite;
/*
@@ -481,7 +481,7 @@ static bool InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg);
static int XLogFileRead(uint32 log, uint32 seg, int emode);
-static void XLogFileClose(void);
+static void XLogFileClose(void);
static bool RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize);
static int PreallocXlogFiles(XLogRecPtr endptr);
@@ -506,7 +506,7 @@ static void issue_xlog_fsync(void);
static void xlog_outrec(StringInfo buf, XLogRecord *record);
#endif
static bool read_backup_label(XLogRecPtr *checkPointLoc,
- XLogRecPtr *minRecoveryLoc);
+ XLogRecPtr *minRecoveryLoc);
static void rm_redo_error_callback(void *arg);
@@ -697,9 +697,9 @@ begin:;
/*
* NOTE: We disallow len == 0 because it provides a useful bit of extra
* error checking in ReadRecord. This means that all callers of
- * XLogInsert must supply at least some not-in-a-buffer data. However,
- * we make an exception for XLOG SWITCH records because we don't want
- * them to ever cross a segment boundary.
+ * XLogInsert must supply at least some not-in-a-buffer data. However, we
+ * make an exception for XLOG SWITCH records because we don't want them to
+ * ever cross a segment boundary.
*/
if (len == 0 && !isLogSwitch)
elog(PANIC, "invalid xlog record length %u", len);
@@ -752,8 +752,8 @@ begin:;
* checkpoint, so it's better to be slow in this case and fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
- * affect the contents of the XLOG record, so we'll update our local
- * copy but not force a recomputation.
+ * affect the contents of the XLOG record, so we'll update our local copy
+ * but not force a recomputation.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@@ -782,10 +782,10 @@ begin:;
}
/*
- * Also check to see if forcePageWrites was just turned on; if we
- * weren't already doing full-page writes then go back and recompute.
- * (If it was just turned off, we could recompute the record without
- * full pages, but we choose not to bother.)
+ * Also check to see if forcePageWrites was just turned on; if we weren't
+ * already doing full-page writes then go back and recompute. (If it was
+ * just turned off, we could recompute the record without full pages, but
+ * we choose not to bother.)
*/
if (Insert->forcePageWrites && !doPageWrites)
{
@@ -870,11 +870,11 @@ begin:;
INSERT_RECPTR(RecPtr, Insert, curridx);
/*
- * If the record is an XLOG_SWITCH, and we are exactly at the start
- * of a segment, we need not insert it (and don't want to because
- * we'd like consecutive switch requests to be no-ops). Instead,
- * make sure everything is written and flushed through the end of
- * the prior segment, and return the prior segment's end address.
+ * If the record is an XLOG_SWITCH, and we are exactly at the start of a
+ * segment, we need not insert it (and don't want to because we'd like
+ * consecutive switch requests to be no-ops). Instead, make sure
+ * everything is written and flushed through the end of the prior segment,
+ * and return the prior segment's end address.
*/
if (isLogSwitch &&
(RecPtr.xrecoff % XLogSegSize) == SizeOfXLogLongPHD)
@@ -926,7 +926,7 @@ begin:;
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
{
- StringInfoData buf;
+ StringInfoData buf;
initStringInfo(&buf);
appendStringInfo(&buf, "INSERT @ %X/%X: ",
@@ -1019,8 +1019,8 @@ begin:;
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
/*
- * Flush through the end of the page containing XLOG_SWITCH,
- * and perform end-of-segment actions (eg, notifying archiver).
+ * Flush through the end of the page containing XLOG_SWITCH, and
+ * perform end-of-segment actions (eg, notifying archiver).
*/
WriteRqst = XLogCtl->xlblocks[curridx];
FlushRqst.Write = WriteRqst;
@@ -1667,8 +1667,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
* switch.
*
* This is also the right place to notify the Archiver that the
- * segment is ready to copy to archival storage, and to update
- * the timer for archive_timeout.
+ * segment is ready to copy to archival storage, and to update the
+ * timer for archive_timeout.
*/
if (finishing_seg || (xlog_switch && last_iteration))
{
@@ -2300,36 +2300,35 @@ XLogFileClose(void)
Assert(openLogFile >= 0);
/*
- * posix_fadvise is problematic on many platforms: on older x86 Linux
- * it just dumps core, and there are reports of problems on PPC platforms
- * as well. The following is therefore disabled for the time being.
- * We could consider some kind of configure test to see if it's safe to
- * use, but since we lack hard evidence that there's any useful performance
- * gain to be had, spending time on that seems unprofitable for now.
+ * posix_fadvise is problematic on many platforms: on older x86 Linux it
+ * just dumps core, and there are reports of problems on PPC platforms as
+ * well. The following is therefore disabled for the time being. We could
+ * consider some kind of configure test to see if it's safe to use, but
+ * since we lack hard evidence that there's any useful performance gain to
+ * be had, spending time on that seems unprofitable for now.
*/
#ifdef NOT_USED
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * OS to release any cached pages. But do not do so if WAL archiving is
+ * OS to release any cached pages. But do not do so if WAL archiving is
* active, because archiver process could use the cache to read the WAL
* segment.
*
- * While O_DIRECT works for O_SYNC, posix_fadvise() works for fsync()
- * and O_SYNC, and some platforms only have posix_fadvise().
+ * While O_DIRECT works for O_SYNC, posix_fadvise() works for fsync() and
+ * O_SYNC, and some platforms only have posix_fadvise().
*/
#if defined(HAVE_DECL_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
if (!XLogArchivingActive())
posix_fadvise(openLogFile, 0, 0, POSIX_FADV_DONTNEED);
#endif
-
-#endif /* NOT_USED */
+#endif /* NOT_USED */
if (close(openLogFile))
ereport(PANIC,
- (errcode_for_file_access(),
- errmsg("could not close log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ (errcode_for_file_access(),
+ errmsg("could not close log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
@@ -2978,8 +2977,8 @@ ReadRecord(XLogRecPtr *RecPtr, int emode)
got_record:;
/*
- * xl_len == 0 is bad data for everything except XLOG SWITCH, where
- * it is required.
+ * xl_len == 0 is bad data for everything except XLOG SWITCH, where it is
+ * required.
*/
if (record->xl_rmid == RM_XLOG_ID && record->xl_info == XLOG_SWITCH)
{
@@ -3168,6 +3167,7 @@ got_record:;
EndRecPtr.xrecoff = RecPtr->xrecoff + MAXALIGN(total_len);
ReadRecPtr = *RecPtr;
memcpy(buffer, record, total_len);
+
/*
* Special processing if it's an XLOG SWITCH record
*/
@@ -3177,10 +3177,11 @@ got_record:;
EndRecPtr.xrecoff += XLogSegSize - 1;
EndRecPtr.xrecoff -= EndRecPtr.xrecoff % XLogSegSize;
nextRecord = NULL; /* definitely not on same page */
+
/*
- * Pretend that readBuf contains the last page of the segment.
- * This is just to avoid Assert failure in StartupXLOG if XLOG
- * ends with this segment.
+ * Pretend that readBuf contains the last page of the segment. This is
+ * just to avoid Assert failure in StartupXLOG if XLOG ends with this
+ * segment.
*/
readOff = XLogSegSize - XLOG_BLCKSZ;
}
@@ -3661,7 +3662,7 @@ static void
WriteControlFile(void)
{
int fd;
- char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
+ char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
char *localeptr;
/*
@@ -3846,9 +3847,9 @@ ReadControlFile(void)
if (ControlFile->xlog_blcksz != XLOG_BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with XLOG_BLCKSZ %d,"
- " but the server was compiled with XLOG_BLCKSZ %d.",
- ControlFile->xlog_blcksz, XLOG_BLCKSZ),
+ errdetail("The database cluster was initialized with XLOG_BLCKSZ %d,"
+ " but the server was compiled with XLOG_BLCKSZ %d.",
+ ControlFile->xlog_blcksz, XLOG_BLCKSZ),
errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->xlog_seg_size != XLOG_SEG_SIZE)
ereport(FATAL,
@@ -4027,7 +4028,7 @@ XLOGShmemInit(void)
* Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
* in additional info.)
*/
- XLogCtl->XLogCacheByte = (Size) XLOG_BLCKSZ * XLOGbuffers;
+ XLogCtl->XLogCacheByte = (Size) XLOG_BLCKSZ *XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
XLogCtl->Insert.currpage = (XLogPageHeader) (XLogCtl->pages);
@@ -4649,10 +4650,10 @@ StartupXLOG(void)
" you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at log time %s",
- str_time(ControlFile->checkPointCopy.time)),
- errhint("If this has occurred more than once some data may be corrupted"
- " and you may need to choose an earlier recovery target.")));
+ (errmsg("database system was interrupted while in recovery at log time %s",
+ str_time(ControlFile->checkPointCopy.time)),
+ errhint("If this has occurred more than once some data may be corrupted"
+ " and you may need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -4812,10 +4813,10 @@ StartupXLOG(void)
int rmid;
/*
- * Update pg_control to show that we are recovering and to show
- * the selected checkpoint as the place we are starting from.
- * We also mark pg_control with any minimum recovery stop point
- * obtained from a backup history file.
+ * Update pg_control to show that we are recovering and to show the
+ * selected checkpoint as the place we are starting from. We also mark
+ * pg_control with any minimum recovery stop point obtained from a
+ * backup history file.
*/
if (InArchiveRecovery)
{
@@ -4839,12 +4840,12 @@ StartupXLOG(void)
UpdateControlFile();
/*
- * If there was a backup label file, it's done its job and the
- * info has now been propagated into pg_control. We must get rid of
- * the label file so that if we crash during recovery, we'll pick up
- * at the latest recovery restartpoint instead of going all the way
- * back to the backup start point. It seems prudent though to just
- * rename the file out of the way rather than delete it completely.
+ * If there was a backup label file, it's done its job and the info
+ * has now been propagated into pg_control. We must get rid of the
+ * label file so that if we crash during recovery, we'll pick up at
+ * the latest recovery restartpoint instead of going all the way back
+ * to the backup start point. It seems prudent though to just rename
+ * the file out of the way rather than delete it completely.
*/
if (haveBackupLabel)
{
@@ -4884,7 +4885,7 @@ StartupXLOG(void)
{
bool recoveryContinue = true;
bool recoveryApply = true;
- ErrorContextCallback errcontext;
+ ErrorContextCallback errcontext;
InRedo = true;
ereport(LOG,
@@ -4899,17 +4900,17 @@ StartupXLOG(void)
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
{
- StringInfoData buf;
+ StringInfoData buf;
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
- EndRecPtr.xlogid, EndRecPtr.xrecoff);
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ EndRecPtr.xlogid, EndRecPtr.xrecoff);
xlog_outrec(&buf, record);
appendStringInfo(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
record->xl_info,
- XLogRecGetData(record));
+ XLogRecGetData(record));
elog(LOG, "%s", buf.data);
pfree(buf.data);
}
@@ -5383,9 +5384,9 @@ GetRecentNextXid(void)
void
GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch)
{
- uint32 ckptXidEpoch;
- TransactionId ckptXid;
- TransactionId nextXid;
+ uint32 ckptXidEpoch;
+ TransactionId ckptXid;
+ TransactionId nextXid;
/* Must read checkpoint info first, else have race condition */
{
@@ -5718,7 +5719,7 @@ CheckPointGuts(XLogRecPtr checkPointRedo)
CheckPointCLOG();
CheckPointSUBTRANS();
CheckPointMultiXact();
- FlushBufferPool(); /* performs all required fsyncs */
+ FlushBufferPool(); /* performs all required fsyncs */
/* We deliberately delay 2PC checkpointing as long as possible */
CheckPointTwoPhase(checkPointRedo);
}
@@ -5735,12 +5736,12 @@ CheckPointGuts(XLogRecPtr checkPointRedo)
static void
RecoveryRestartPoint(const CheckPoint *checkPoint)
{
- int elapsed_secs;
- int rmid;
+ int elapsed_secs;
+ int rmid;
/*
- * Do nothing if the elapsed time since the last restartpoint is less
- * than half of checkpoint_timeout. (We use a value less than
+ * Do nothing if the elapsed time since the last restartpoint is less than
+ * half of checkpoint_timeout. (We use a value less than
* checkpoint_timeout so that variations in the timing of checkpoints on
* the master, or speed of transmission of WAL segments to a slave, won't
* make the slave skip a restartpoint once it's synced with the master.)
@@ -5770,9 +5771,9 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
CheckPointGuts(checkPoint->redo);
/*
- * Update pg_control so that any subsequent crash will restart from
- * this checkpoint. Note: ReadRecPtr gives the XLOG address of the
- * checkpoint record itself.
+ * Update pg_control so that any subsequent crash will restart from this
+ * checkpoint. Note: ReadRecPtr gives the XLOG address of the checkpoint
+ * record itself.
*/
ControlFile->prevCheckPoint = ControlFile->checkPoint;
ControlFile->checkPoint = ReadRecPtr;
@@ -5926,7 +5927,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
void
xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
@@ -5934,15 +5935,15 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "checkpoint: redo %X/%X; undo %X/%X; "
- "tli %u; xid %u/%u; oid %u; multi %u; offset %u; %s",
- checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
- checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
- checkpoint->ThisTimeLineID,
- checkpoint->nextXidEpoch, checkpoint->nextXid,
- checkpoint->nextOid,
- checkpoint->nextMulti,
- checkpoint->nextMultiOffset,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ "tli %u; xid %u/%u; oid %u; multi %u; offset %u; %s",
+ checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
+ checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
+ checkpoint->ThisTimeLineID,
+ checkpoint->nextXidEpoch, checkpoint->nextXid,
+ checkpoint->nextOid,
+ checkpoint->nextMulti,
+ checkpoint->nextMultiOffset,
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
@@ -5973,7 +5974,7 @@ xlog_outrec(StringInfo buf, XLogRecord *record)
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (record->xl_info & XLR_SET_BKP_BLOCK(i))
- appendStringInfo(buf, "; bkpb%d", i+1);
+ appendStringInfo(buf, "; bkpb%d", i + 1);
}
appendStringInfo(buf, ": %s", RmgrTable[record->xl_rmid].rm_name);
@@ -6142,18 +6143,18 @@ pg_start_backup(PG_FUNCTION_ARGS)
* Mark backup active in shared memory. We must do full-page WAL writes
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
- * written) copy of a database page if it reads the page concurrently
- * with our write to the same page. This can be fixed as long as the
- * first write to the page in the WAL sequence is a full-page write.
- * Hence, we turn on forcePageWrites and then force a CHECKPOINT, to
- * ensure there are no dirty pages in shared memory that might get
- * dumped while the backup is in progress without having a corresponding
- * WAL record. (Once the backup is complete, we need not force full-page
- * writes anymore, since we expect that any pages not modified during
- * the backup interval must have been correctly captured by the backup.)
+ * written) copy of a database page if it reads the page concurrently with
+ * our write to the same page. This can be fixed as long as the first
+ * write to the page in the WAL sequence is a full-page write. Hence, we
+ * turn on forcePageWrites and then force a CHECKPOINT, to ensure there
+ * are no dirty pages in shared memory that might get dumped while the
+ * backup is in progress without having a corresponding WAL record. (Once
+ * the backup is complete, we need not force full-page writes anymore,
+ * since we expect that any pages not modified during the backup interval
+ * must have been correctly captured by the backup.)
*
- * We must hold WALInsertLock to change the value of forcePageWrites,
- * to ensure adequate interlocking against XLogInsert().
+ * We must hold WALInsertLock to change the value of forcePageWrites, to
+ * ensure adequate interlocking against XLogInsert().
*/
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
if (XLogCtl->Insert.forcePageWrites)
@@ -6171,7 +6172,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
PG_TRY();
{
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs will
* have different checkpoint positions and hence different history
* file names, even if nothing happened in between.
@@ -6303,10 +6304,9 @@ pg_stop_backup(PG_FUNCTION_ARGS)
LWLockRelease(WALInsertLock);
/*
- * Force a switch to a new xlog segment file, so that the backup
- * is valid as soon as archiver moves out the current segment file.
- * We'll report the end address of the XLOG SWITCH record as the backup
- * stopping point.
+ * Force a switch to a new xlog segment file, so that the backup is valid
+ * as soon as archiver moves out the current segment file. We'll report
+ * the end address of the XLOG SWITCH record as the backup stopping point.
*/
stoppoint = RequestXLogSwitch();
@@ -6392,9 +6392,9 @@ pg_stop_backup(PG_FUNCTION_ARGS)
BACKUP_LABEL_FILE)));
/*
- * Clean out any no-longer-needed history files. As a side effect,
- * this will post a .ready file for the newly created history file,
- * notifying the archiver that history file may be archived immediately.
+ * Clean out any no-longer-needed history files. As a side effect, this
+ * will post a .ready file for the newly created history file, notifying
+ * the archiver that history file may be archived immediately.
*/
CleanupBackupHistory();
@@ -6415,7 +6415,7 @@ Datum
pg_switch_xlog(PG_FUNCTION_ARGS)
{
text *result;
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
char location[MAXFNAMELEN];
if (!superuser())
@@ -6514,17 +6514,17 @@ pg_xlogfile_name_offset(PG_FUNCTION_ARGS)
uint32 xrecoff;
XLogRecPtr locationpoint;
char xlogfilename[MAXFNAMELEN];
- Datum values[2];
- bool isnull[2];
- TupleDesc resultTupleDesc;
- HeapTuple resultHeapTuple;
- Datum result;
+ Datum values[2];
+ bool isnull[2];
+ TupleDesc resultTupleDesc;
+ HeapTuple resultHeapTuple;
+ Datum result;
/*
* Read input and parse
*/
locationstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(location)));
+ PointerGetDatum(location)));
if (sscanf(locationstr, "%X/%X", &uxlogid, &uxrecoff) != 2)
ereport(ERROR,
@@ -6536,8 +6536,8 @@ pg_xlogfile_name_offset(PG_FUNCTION_ARGS)
locationpoint.xrecoff = uxrecoff;
/*
- * Construct a tuple descriptor for the result row. This must match
- * this function's pg_proc entry!
+ * Construct a tuple descriptor for the result row. This must match this
+ * function's pg_proc entry!
*/
resultTupleDesc = CreateTemplateTupleDesc(2, false);
TupleDescInitEntry(resultTupleDesc, (AttrNumber) 1, "file_name",
@@ -6593,7 +6593,7 @@ pg_xlogfile_name(PG_FUNCTION_ARGS)
char xlogfilename[MAXFNAMELEN];
locationstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(location)));
+ PointerGetDatum(location)));
if (sscanf(locationstr, "%X/%X", &uxlogid, &uxrecoff) != 2)
ereport(ERROR,
@@ -6608,7 +6608,7 @@ pg_xlogfile_name(PG_FUNCTION_ARGS)
XLogFileName(xlogfilename, ThisTimeLineID, xlogid, xlogseg);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(xlogfilename)));
+ CStringGetDatum(xlogfilename)));
PG_RETURN_TEXT_P(result);
}
@@ -6734,8 +6734,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, XLogRecPtr *minRecoveryLoc)
static void
rm_redo_error_callback(void *arg)
{
- XLogRecord *record = (XLogRecord *) arg;
- StringInfoData buf;
+ XLogRecord *record = (XLogRecord *) arg;
+ StringInfoData buf;
initStringInfo(&buf);
RmgrTable[record->xl_rmid].rm_desc(&buf,
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index d7fb9359de..e1d3b70001 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.47 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.48 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -172,8 +172,8 @@ XLogCheckInvalidPages(void)
hash_seq_init(&status, invalid_page_tab);
/*
- * Our strategy is to emit WARNING messages for all remaining entries
- * and only PANIC after we've dumped all the available info.
+ * Our strategy is to emit WARNING messages for all remaining entries and
+ * only PANIC after we've dumped all the available info.
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
@@ -255,7 +255,7 @@ XLogReadBuffer(Relation reln, BlockNumber blkno, bool init)
if (!init)
{
/* check that page has been initialized */
- Page page = (Page) BufferGetPage(buffer);
+ Page page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
{
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 06d8a5ca4f..70b9172cd7 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.224 2006/08/15 22:36:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.225 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -381,8 +381,8 @@ BootstrapMain(int argc, char *argv[])
/*
* When we are a dummy process, we aren't going to do the full
- * InitPostgres pushups, but there are a couple of things that need
- * to get lit up even in a dummy process.
+ * InitPostgres pushups, but there are a couple of things that need to get
+ * lit up even in a dummy process.
*/
if (IsUnderPostmaster)
{
@@ -421,6 +421,7 @@ BootstrapMain(int argc, char *argv[])
case BS_XLOG_STARTUP:
bootstrap_signals();
StartupXLOG();
+
/*
* These next two functions don't consider themselves critical,
* but we'd best PANIC anyway if they fail.
@@ -1229,7 +1230,7 @@ AddStr(char *str, int strlength, int mderef)
* We postpone actually building the indexes until just before we're
* finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index e74c9b4410..27ca6e011a 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.131 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.132 2006/10/04 00:29:50 momjian Exp $
*
* NOTES
* See acl.h.
@@ -53,9 +53,9 @@ static List *objectNamesToOids(GrantObjectType objtype, List *objnames);
static AclMode string_to_privilege(const char *privname);
static const char *privilege_to_string(AclMode privilege);
static AclMode restrict_and_check_grant(bool is_grant, AclMode avail_goptions,
- bool all_privs, AclMode privileges,
- Oid objectId, Oid grantorId,
- AclObjectKind objkind, char *objname);
+ bool all_privs, AclMode privileges,
+ Oid objectId, Oid grantorId,
+ AclObjectKind objkind, char *objname);
static AclMode pg_aclmask(AclObjectKind objkind, Oid table_oid, Oid roleid,
AclMode mask, AclMaskHow how);
@@ -156,8 +156,8 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
AclMode privileges, Oid objectId, Oid grantorId,
AclObjectKind objkind, char *objname)
{
- AclMode this_privileges;
- AclMode whole_mask;
+ AclMode this_privileges;
+ AclMode whole_mask;
switch (objkind)
{
@@ -189,9 +189,9 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
}
/*
- * If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object will
- * get you by here.
+ * If we found no grant options, consider whether to issue a hard error.
+ * Per spec, having any privilege at all on the object will get you by
+ * here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
@@ -203,11 +203,10 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what
- * the spec says to do: the spec seems to want a warning only if no
- * privilege bits actually change in the ACL. In practice that
- * behavior seems much too noisy, as well as inconsistent with the
- * GRANT case.)
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * spec says to do: the spec seems to want a warning only if no privilege
+ * bits actually change in the ACL. In practice that behavior seems much
+ * too noisy, as well as inconsistent with the GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (is_grant)
@@ -215,18 +214,18 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
if (this_privileges == 0)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
- errmsg("no privileges were granted for \"%s\"", objname)));
+ errmsg("no privileges were granted for \"%s\"", objname)));
else if (!all_privs && this_privileges != privileges)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
- errmsg("not all privileges were granted for \"%s\"", objname)));
+ errmsg("not all privileges were granted for \"%s\"", objname)));
}
else
{
if (this_privileges == 0)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
- errmsg("no privileges could be revoked for \"%s\"", objname)));
+ errmsg("no privileges could be revoked for \"%s\"", objname)));
else if (!all_privs && this_privileges != privileges)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
@@ -285,11 +284,11 @@ ExecuteGrantStmt(GrantStmt *stmt)
*/
switch (stmt->objtype)
{
- /*
- * Because this might be a sequence, we test both relation
- * and sequence bits, and later do a more limited test
- * when we know the object type.
- */
+ /*
+ * Because this might be a sequence, we test both relation and
+ * sequence bits, and later do a more limited test when we know
+ * the object type.
+ */
case ACL_OBJECT_RELATION:
all_privileges = ACL_ALL_RIGHTS_RELATION | ACL_ALL_RIGHTS_SEQUENCE;
errormsg = _("invalid privilege type %s for relation");
@@ -329,6 +328,7 @@ ExecuteGrantStmt(GrantStmt *stmt)
if (stmt->privileges == NIL)
{
istmt.all_privs = true;
+
/*
* will be turned into ACL_ALL_RIGHTS_* by the internal routines
* depending on the object type
@@ -595,28 +595,28 @@ ExecGrant_Relation(InternalGrant *istmt)
}
else
this_privileges = istmt->privileges;
-
+
/*
- * The GRANT TABLE syntax can be used for sequences and
- * non-sequences, so we have to look at the relkind to
- * determine the supported permissions. The OR of
- * table and sequence permissions were already checked.
+ * The GRANT TABLE syntax can be used for sequences and non-sequences,
+ * so we have to look at the relkind to determine the supported
+ * permissions. The OR of table and sequence permissions were already
+ * checked.
*/
if (istmt->objtype == ACL_OBJECT_RELATION)
{
if (pg_class_tuple->relkind == RELKIND_SEQUENCE)
{
/*
- * For backward compatibility, throw just a warning
- * for invalid sequence permissions when using the
- * non-sequence GRANT syntax is used.
+ * For backward compatibility, throw just a warning for
+ * invalid sequence permissions when using the non-sequence
+ * GRANT syntax is used.
*/
if (this_privileges & ~((AclMode) ACL_ALL_RIGHTS_SEQUENCE))
{
/*
- * Mention the object name because the user needs to
- * know which operations succeeded. This is required
- * because WARNING allows the command to continue.
+ * Mention the object name because the user needs to know
+ * which operations succeeded. This is required because
+ * WARNING allows the command to continue.
*/
ereport(WARNING,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
@@ -628,15 +628,16 @@ ExecGrant_Relation(InternalGrant *istmt)
else
{
if (this_privileges & ~((AclMode) ACL_ALL_RIGHTS_RELATION))
+
/*
- * USAGE is the only permission supported by sequences
- * but not by non-sequences. Don't mention the object
- * name because we didn't in the combined TABLE |
- * SEQUENCE check.
+ * USAGE is the only permission supported by sequences but
+ * not by non-sequences. Don't mention the object name
+ * because we didn't in the combined TABLE | SEQUENCE
+ * check.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("invalid privilege type USAGE for table")));
+ errmsg("invalid privilege type USAGE for table")));
}
}
@@ -660,15 +661,15 @@ ExecGrant_Relation(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
istmt->all_privs, this_privileges,
relOid, grantorId,
- pg_class_tuple->relkind == RELKIND_SEQUENCE
- ? ACL_KIND_SEQUENCE : ACL_KIND_CLASS,
+ pg_class_tuple->relkind == RELKIND_SEQUENCE
+ ? ACL_KIND_SEQUENCE : ACL_KIND_CLASS,
NameStr(pg_class_tuple->relname));
/*
@@ -777,8 +778,8 @@ ExecGrant_Database(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
@@ -893,8 +894,8 @@ ExecGrant_Function(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
@@ -1019,8 +1020,8 @@ ExecGrant_Language(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
@@ -1136,8 +1137,8 @@ ExecGrant_Namespace(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
@@ -1259,8 +1260,8 @@ ExecGrant_Tablespace(InternalGrant *istmt)
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
@@ -1565,7 +1566,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_USAGE)) &&
IsSystemClass(classForm) &&
@@ -1602,7 +1603,7 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
{
/* No ACL, so build default ACL */
acl = acldefault(classForm->relkind == RELKIND_SEQUENCE ?
- ACL_OBJECT_SEQUENCE : ACL_OBJECT_RELATION,
+ ACL_OBJECT_SEQUENCE : ACL_OBJECT_RELATION,
ownerId);
aclDatum = (Datum) 0;
}
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 5d99acafc9..d302f023a9 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.67 2006/07/31 20:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.68 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -233,7 +233,7 @@ IsReservedName(const char *name)
*
* Hard-wiring this list is pretty grotty, but we really need it so that
* we can compute the locktag for a relation (and then lock it) without
- * having already read its pg_class entry. If we try to retrieve relisshared
+ * having already read its pg_class entry. If we try to retrieve relisshared
* from pg_class with no pre-existing lock, there is a race condition against
* anyone who is concurrently committing a change to the pg_class entry:
* since we read system catalog entries under SnapshotNow, it's possible
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 7007e83873..cab4f1006b 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.59 2006/08/20 21:56:16 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.60 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,6 +63,7 @@ struct ObjectAddresses
int numrefs; /* current number of references */
int maxrefs; /* current size of palloc'd array */
};
+
/* typedef ObjectAddresses appears in dependency.h */
/* for find_expr_references_walker */
@@ -244,8 +245,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
{
ObjectAddresses *implicit;
ObjectAddresses *alreadyDeleted;
- Relation depRel;
- int i;
+ Relation depRel;
+ int i;
implicit = new_object_addresses();
alreadyDeleted = new_object_addresses();
@@ -271,8 +272,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
continue;
/*
- * Add the objects dependent on this one to the global list of implicit
- * objects.
+ * Add the objects dependent on this one to the global list of
+ * implicit objects.
*/
findAutoDeletableObjects(&obj, implicit, depRel, false);
}
@@ -707,8 +708,8 @@ recursiveDeletion(const ObjectAddress *object,
*/
/*
- * Step 3: delete the object itself, and save it to the list of
- * deleted objects if appropiate.
+ * Step 3: delete the object itself, and save it to the list of deleted
+ * objects if appropiate.
*/
doDeletion(object);
if (alreadyDeleted != NULL)
@@ -1284,7 +1285,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, RelabelType))
{
- RelabelType *relab = (RelabelType *) node;
+ RelabelType *relab = (RelabelType *) node;
/* since there is no function dependency, need to depend on type */
add_object_address(OCLASS_TYPE, relab->resulttype, 0,
@@ -1300,7 +1301,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, RowExpr))
{
- RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *rowexpr = (RowExpr *) node;
add_object_address(OCLASS_TYPE, rowexpr->row_typeid, 0,
context->addrs);
@@ -1471,7 +1472,7 @@ object_address_comparator(const void *a, const void *b)
ObjectAddresses *
new_object_addresses(void)
{
- ObjectAddresses *addrs;
+ ObjectAddresses *addrs;
addrs = palloc(sizeof(ObjectAddresses));
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index e1f3e194b6..d30556d48c 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.312 2006/08/02 01:59:44 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.313 2006/10/04 00:29:50 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -607,8 +607,8 @@ InsertPgClassTuple(Relation pg_class_desc,
tup = heap_formtuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure
- * would be embarrassing to do this sort of thing in polite company.
+ * The new tuple must have the oid already chosen for the rel. Sure would
+ * be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
@@ -666,8 +666,8 @@ AddNewRelationTuple(Relation pg_class_desc,
else
{
/*
- * Other relations will not have Xids in them, so set the initial value
- * to InvalidTransactionId.
+ * Other relations will not have Xids in them, so set the initial
+ * value to InvalidTransactionId.
*/
new_rel_reltup->relminxid = InvalidTransactionId;
new_rel_reltup->relvacuumxid = InvalidTransactionId;
@@ -1975,7 +1975,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum)
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
@@ -2103,7 +2103,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
@@ -2119,19 +2119,19 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
*/
foreach(cell, oids)
{
- Oid relid = lfirst_oid(cell);
- ListCell *cell2;
+ Oid relid = lfirst_oid(cell);
+ ListCell *cell2;
dependents = heap_truncate_find_FKs(list_make1_oid(relid));
foreach(cell2, dependents)
{
- Oid relid2 = lfirst_oid(cell2);
+ Oid relid2 = lfirst_oid(cell2);
if (!list_member_oid(oids, relid2))
{
- char *relname = get_rel_name(relid);
- char *relname2 = get_rel_name(relid2);
+ char *relname = get_rel_name(relid);
+ char *relname2 = get_rel_name(relid2);
if (tempTables)
ereport(ERROR,
@@ -2145,9 +2145,9 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
errmsg("cannot truncate a table referenced in a foreign key constraint"),
errdetail("Table \"%s\" references \"%s\".",
relname2, relname),
- errhint("Truncate table \"%s\" at the same time, "
- "or use TRUNCATE ... CASCADE.",
- relname2)));
+ errhint("Truncate table \"%s\" at the same time, "
+ "or use TRUNCATE ... CASCADE.",
+ relname2)));
}
}
}
@@ -2164,7 +2164,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables)
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
@@ -2176,8 +2176,8 @@ heap_truncate_find_FKs(List *relationIds)
HeapTuple tuple;
/*
- * Must scan pg_constraint. Right now, it is a seqscan because
- * there is no available index on confrelid.
+ * Must scan pg_constraint. Right now, it is a seqscan because there is
+ * no available index on confrelid.
*/
fkeyRel = heap_open(ConstraintRelationId, AccessShareLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 60a30ce372..290cd27688 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.273 2006/08/25 04:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.274 2006/10/04 00:29:50 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -58,7 +58,9 @@ typedef struct
{
Tuplesortstate *tuplesort; /* for sorting the index TIDs */
/* statistics (for debug purposes only): */
- double htups, itups, tups_inserted;
+ double htups,
+ itups,
+ tups_inserted;
} v_i_state;
/* non-export function prototypes */
@@ -449,7 +451,7 @@ UpdateIndexRelation(Oid indexoid,
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
*
@@ -486,7 +488,7 @@ index_create(Oid heapRelationId,
* (but not VACUUM).
*/
heapRelation = heap_open(heapRelationId,
- (concurrent ? ShareUpdateExclusiveLock : ShareLock));
+ (concurrent ? ShareUpdateExclusiveLock : ShareLock));
/*
* The index will be in the same namespace as its parent table, and is
@@ -509,8 +511,8 @@ index_create(Oid heapRelationId,
errmsg("user-defined indexes on system catalog tables are not supported")));
/*
- * concurrent index build on a system catalog is unsafe because we tend
- * to release locks before committing in catalogs
+ * concurrent index build on a system catalog is unsafe because we tend to
+ * release locks before committing in catalogs
*/
if (concurrent &&
IsSystemRelation(heapRelation))
@@ -766,8 +768,8 @@ index_create(Oid heapRelationId,
{
/*
* Caller is responsible for filling the index later on. However,
- * we'd better make sure that the heap relation is correctly marked
- * as having an index.
+ * we'd better make sure that the heap relation is correctly marked as
+ * having an index.
*/
index_update_stats(heapRelation,
true,
@@ -932,7 +934,7 @@ BuildIndexInfo(Relation index)
/* other info */
ii->ii_Unique = indexStruct->indisunique;
- ii->ii_Concurrent = false; /* assume normal case */
+ ii->ii_Concurrent = false; /* assume normal case */
return ii;
}
@@ -1018,7 +1020,7 @@ FormIndexDatum(IndexInfo *indexInfo,
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
@@ -1031,7 +1033,7 @@ FormIndexDatum(IndexInfo *indexInfo,
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
@@ -1041,7 +1043,7 @@ static void
index_update_stats(Relation rel, bool hasindex, bool isprimary,
Oid reltoastidxid, double reltuples)
{
- BlockNumber relpages = RelationGetNumberOfBlocks(rel);
+ BlockNumber relpages = RelationGetNumberOfBlocks(rel);
Oid relid = RelationGetRelid(rel);
Relation pg_class;
HeapTuple tuple;
@@ -1054,9 +1056,9 @@ index_update_stats(Relation rel, bool hasindex, bool isprimary,
*
* 1. In bootstrap mode, we have no choice --- UPDATE wouldn't work.
*
- * 2. We could be reindexing pg_class itself, in which case we can't
- * move its pg_class row because CatalogUpdateIndexes might not know
- * about all the indexes yet (see reindex_relation).
+ * 2. We could be reindexing pg_class itself, in which case we can't move
+ * its pg_class row because CatalogUpdateIndexes might not know about all
+ * the indexes yet (see reindex_relation).
*
* 3. Because we execute CREATE INDEX with just share lock on the parent
* rel (to allow concurrent index creations), an ordinary update could
@@ -1069,24 +1071,24 @@ index_update_stats(Relation rel, bool hasindex, bool isprimary,
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
- * it), and of course the relpages and reltuples counts are correct (or
- * at least more so than the old values) regardless.
+ * it), and of course the relpages and reltuples counts are correct (or at
+ * least more so than the old values) regardless.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache,
- * but we can't rely on that during bootstrap or while reindexing
- * pg_class itself.
+ * Make a copy of the tuple to update. Normally we use the syscache, but
+ * we can't rely on that during bootstrap or while reindexing pg_class
+ * itself.
*/
if (IsBootstrapProcessingMode() ||
ReindexIsProcessingHeap(RelationRelationId))
@@ -1245,7 +1247,7 @@ setNewRelfilenode(Relation relation)
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
@@ -1365,22 +1367,21 @@ IndexBuildHeapScan(Relation heapRelation,
estate);
/*
- * Prepare for scan of the base relation. In a normal index build,
- * we use SnapshotAny because we must retrieve all tuples and do our own
- * time qual checks (because we have to index RECENTLY_DEAD tuples).
- * In a concurrent build, we take a regular MVCC snapshot and index
- * whatever's live according to that. During bootstrap we just use
- * SnapshotNow.
+ * Prepare for scan of the base relation. In a normal index build, we use
+ * SnapshotAny because we must retrieve all tuples and do our own time
+ * qual checks (because we have to index RECENTLY_DEAD tuples). In a
+ * concurrent build, we take a regular MVCC snapshot and index whatever's
+ * live according to that. During bootstrap we just use SnapshotNow.
*/
if (IsBootstrapProcessingMode())
{
snapshot = SnapshotNow;
- OldestXmin = InvalidTransactionId; /* not used */
+ OldestXmin = InvalidTransactionId; /* not used */
}
else if (indexInfo->ii_Concurrent)
{
snapshot = CopySnapshot(GetTransactionSnapshot());
- OldestXmin = InvalidTransactionId; /* not used */
+ OldestXmin = InvalidTransactionId; /* not used */
}
else
{
@@ -1391,8 +1392,8 @@ IndexBuildHeapScan(Relation heapRelation,
scan = heap_beginscan(heapRelation, /* relation */
snapshot, /* seeself */
- 0, /* number of keys */
- NULL); /* scan key */
+ 0, /* number of keys */
+ NULL); /* scan key */
reltuples = 0;
@@ -1435,9 +1436,9 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* If tuple is recently deleted then we must index it
* anyway to preserve MVCC semantics. (Pre-existing
- * transactions could try to use the index after we
- * finish building it, and may need to see such tuples.)
- * Exclude it from unique-checking, however.
+ * transactions could try to use the index after we finish
+ * building it, and may need to see such tuples.) Exclude
+ * it from unique-checking, however.
*/
indexIt = true;
tupleIsAlive = false;
@@ -1573,7 +1574,7 @@ IndexBuildHeapScan(Relation heapRelation,
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
@@ -1582,7 +1583,7 @@ IndexBuildHeapScan(Relation heapRelation,
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
@@ -1598,7 +1599,7 @@ IndexBuildHeapScan(Relation heapRelation,
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a serializable snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index valid and commit.
+ * not index). Then we mark the index valid and commit.
*
* Doing two full table scans is a brute-force strategy. We could try to be
* cleverer, eg storing new tuples in a special area of the table (perhaps
@@ -1608,10 +1609,11 @@ IndexBuildHeapScan(Relation heapRelation,
void
validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
{
- Relation heapRelation, indexRelation;
- IndexInfo *indexInfo;
+ Relation heapRelation,
+ indexRelation;
+ IndexInfo *indexInfo;
IndexVacuumInfo ivinfo;
- v_i_state state;
+ v_i_state state;
/* Open and lock the parent heap relation */
heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
@@ -1619,9 +1621,9 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should
- * be passed in from DefineIndex, but its copy is long gone due to
- * having been built in a previous transaction.)
+ * Fetch info needed for index_insert. (You might think this should be
+ * passed in from DefineIndex, but its copy is long gone due to having
+ * been built in a previous transaction.)
*/
indexInfo = BuildIndexInfo(indexRelation);
@@ -1675,7 +1677,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
static bool
validate_index_callback(ItemPointer itemptr, void *opaque)
{
- v_i_state *state = (v_i_state *) opaque;
+ v_i_state *state = (v_i_state *) opaque;
tuplesort_putdatum(state->tuplesort, PointerGetDatum(itemptr), false);
state->itups += 1;
@@ -1703,9 +1705,10 @@ validate_index_heapscan(Relation heapRelation,
TupleTableSlot *slot;
EState *estate;
ExprContext *econtext;
+
/* state variables for the merge */
ItemPointer indexcursor = NULL;
- bool tuplesort_empty = false;
+ bool tuplesort_empty = false;
/*
* sanity checks
@@ -1734,8 +1737,8 @@ validate_index_heapscan(Relation heapRelation,
*/
scan = heap_beginscan(heapRelation, /* relation */
snapshot, /* seeself */
- 0, /* number of keys */
- NULL); /* scan key */
+ 0, /* number of keys */
+ NULL); /* scan key */
/*
* Scan all tuples matching the snapshot.
@@ -1749,15 +1752,15 @@ validate_index_heapscan(Relation heapRelation,
state->htups += 1;
/*
- * "merge" by skipping through the index tuples until we find or
- * pass the current heap tuple.
+ * "merge" by skipping through the index tuples until we find or pass
+ * the current heap tuple.
*/
while (!tuplesort_empty &&
(!indexcursor ||
ItemPointerCompare(indexcursor, heapcursor) < 0))
{
- Datum ts_val;
- bool ts_isnull;
+ Datum ts_val;
+ bool ts_isnull;
if (indexcursor)
pfree(indexcursor);
@@ -1774,7 +1777,7 @@ validate_index_heapscan(Relation heapRelation,
* We've overshot which means this heap tuple is missing from the
* index, so insert it.
*/
- bool check_unique;
+ bool check_unique;
MemoryContextReset(econtext->ecxt_per_tuple_memory);
@@ -1793,7 +1796,7 @@ validate_index_heapscan(Relation heapRelation,
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
@@ -1803,11 +1806,11 @@ validate_index_heapscan(Relation heapRelation,
isnull);
/*
- * If the tuple is already committed dead, we still have to
- * put it in the index (because some xacts might be able to
- * see it), but we might as well suppress uniqueness checking.
- * This is just an optimization because the index AM is not
- * supposed to raise a uniqueness failure anyway.
+ * If the tuple is already committed dead, we still have to put it
+ * in the index (because some xacts might be able to see it), but
+ * we might as well suppress uniqueness checking. This is just an
+ * optimization because the index AM is not supposed to raise a
+ * uniqueness failure anyway.
*/
if (indexInfo->ii_Unique)
{
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index abd3547e5e..1d6162ca12 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.87 2006/09/14 22:05:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.88 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -718,7 +718,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
ObjectIdGetDatum(namespaceId));
if (HeapTupleIsValid(opertup))
{
- Oid result = HeapTupleGetOid(opertup);
+ Oid result = HeapTupleGetOid(opertup);
ReleaseSysCache(opertup);
return result;
@@ -741,9 +741,9 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
}
/*
- * We have to find the list member that is first in the search path,
- * if there's more than one. This doubly-nested loop looks ugly,
- * but in practice there should usually be few catlist members.
+ * We have to find the list member that is first in the search path, if
+ * there's more than one. This doubly-nested loop looks ugly, but in
+ * practice there should usually be few catlist members.
*/
recomputeNamespacePath();
@@ -759,7 +759,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright)
if (operform->oprnamespace == namespaceId)
{
- Oid result = HeapTupleGetOid(opertup);
+ Oid result = HeapTupleGetOid(opertup);
ReleaseSysCacheList(catlist);
return result;
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 747543e077..6420defe4f 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.82 2006/07/27 19:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.83 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,15 +89,15 @@ AggregateCreate(const char *aggName,
}
/*
- * If transtype is polymorphic, must have polymorphic argument also;
- * else we will have no way to deduce the actual transtype.
+ * If transtype is polymorphic, must have polymorphic argument also; else
+ * we will have no way to deduce the actual transtype.
*/
if (!hasPolyArg &&
(aggTransType == ANYARRAYOID || aggTransType == ANYELEMENTOID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition data type"),
- errdetail("An aggregate using \"anyarray\" or \"anyelement\" as transition type must have at least one argument of either type.")));
+ errdetail("An aggregate using \"anyarray\" or \"anyelement\" as transition type must have at least one argument of either type.")));
/* find the transfn */
nargs_transfn = numArgs + 1;
@@ -133,8 +133,8 @@ AggregateCreate(const char *aggName,
/*
* If the transfn is strict and the initval is NULL, make sure first input
- * type and transtype are the same (or at least binary-compatible), so that
- * it's OK to use the first input value as the initial transValue.
+ * type and transtype are the same (or at least binary-compatible), so
+ * that it's OK to use the first input value as the initial transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
@@ -211,7 +211,7 @@ AggregateCreate(const char *aggName,
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
buildoidvector(aggArgTypes,
- numArgs), /* paramTypes */
+ numArgs), /* paramTypes */
PointerGetDatum(NULL), /* allParamTypes */
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL)); /* parameterNames */
@@ -355,7 +355,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
}
/* Check aggregate creator has permission to call the function */
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index 99cdf5e7e6..b0c5986251 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.22 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.23 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -264,7 +264,7 @@ changeDependencyFor(Oid classId, Oid objectId,
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 85e3d968d4..5195cb06be 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.14 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.15 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1101,9 +1101,9 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
while ((tuple = systable_getnext(scan)) != NULL)
{
- ObjectAddress obj;
- GrantObjectType objtype;
- InternalGrant istmt;
+ ObjectAddress obj;
+ GrantObjectType objtype;
+ InternalGrant istmt;
Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tuple);
/* We only operate on objects in the current database */
@@ -1112,7 +1112,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
switch (sdepForm->deptype)
{
- /* Shouldn't happen */
+ /* Shouldn't happen */
case SHARED_DEPENDENCY_PIN:
case SHARED_DEPENDENCY_INVALID:
elog(ERROR, "unexpected dependency type");
@@ -1267,10 +1267,11 @@ shdepReassignOwned(List *roleids, Oid newrole)
break;
case RelationRelationId:
+
/*
- * Pass recursing = true so that we don't fail on
- * indexes, owned sequences, etc when we happen
- * to visit them before their parent table.
+ * Pass recursing = true so that we don't fail on indexes,
+ * owned sequences, etc when we happen to visit them
+ * before their parent table.
*/
ATExecChangeOwner(sdepForm->objid, newrole, true);
break;
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 1b9eea29dc..32de0b90dc 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.107 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.108 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,34 +73,34 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
/*
* initialize *values with the type name and dummy values
*
- * The representational details are the same as int4 ... it doesn't
- * really matter what they are so long as they are consistent. Also
- * note that we give it typtype = 'p' (pseudotype) as extra insurance
- * that it won't be mistaken for a usable type.
+ * The representational details are the same as int4 ... it doesn't really
+ * matter what they are so long as they are consistent. Also note that we
+ * give it typtype = 'p' (pseudotype) as extra insurance that it won't be
+ * mistaken for a usable type.
*/
i = 0;
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
- values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
- values[i++] = BoolGetDatum(true); /* typbyval */
- values[i++] = CharGetDatum('p'); /* typtype */
- values[i++] = BoolGetDatum(false); /* typisdefined */
+ values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
+ values[i++] = BoolGetDatum(true); /* typbyval */
+ values[i++] = CharGetDatum('p'); /* typtype */
+ values[i++] = BoolGetDatum(false); /* typisdefined */
values[i++] = CharGetDatum(DEFAULT_TYPDELIM); /* typdelim */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typelem */
- values[i++] = ObjectIdGetDatum(F_SHELL_IN); /* typinput */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typelem */
+ values[i++] = ObjectIdGetDatum(F_SHELL_IN); /* typinput */
values[i++] = ObjectIdGetDatum(F_SHELL_OUT); /* typoutput */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typreceive */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typsend */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typanalyze */
- values[i++] = CharGetDatum('i'); /* typalign */
- values[i++] = CharGetDatum('p'); /* typstorage */
- values[i++] = BoolGetDatum(false); /* typnotnull */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
- values[i++] = Int32GetDatum(-1); /* typtypmod */
- values[i++] = Int32GetDatum(0); /* typndims */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typreceive */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typsend */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typanalyze */
+ values[i++] = CharGetDatum('i'); /* typalign */
+ values[i++] = CharGetDatum('p'); /* typstorage */
+ values[i++] = BoolGetDatum(false); /* typnotnull */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
+ values[i++] = Int32GetDatum(-1); /* typtypmod */
+ values[i++] = Int32GetDatum(0); /* typndims */
nulls[i++] = 'n'; /* typdefaultbin */
nulls[i++] = 'n'; /* typdefault */
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index aa3e7b8fef..c4416d5e01 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.2 2006/08/25 04:06:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.3 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,7 +70,7 @@ AlterTableCreateToastTable(Oid relOid)
void
BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid)
{
- Relation rel;
+ Relation rel;
rel = heap_openrv(makeRangeVar(NULL, relName), AccessExclusiveLock);
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 7923199f63..e072a23f27 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.40 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.41 2006/10/04 00:29:50 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -58,7 +58,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
TypeName *transType = NULL;
char *initval = NULL;
Oid *aggArgTypes;
- int numArgs;
+ int numArgs;
Oid transTypeId;
ListCell *pl;
@@ -122,8 +122,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
if (oldstyle)
{
/*
- * Old style: use basetype parameter. This supports aggregates
- * of zero or one input, with input type ANY meaning zero inputs.
+ * Old style: use basetype parameter. This supports aggregates of
+ * zero or one input, with input type ANY meaning zero inputs.
*
* Historically we allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
@@ -150,8 +150,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
/*
* New style: args is a list of TypeNames (possibly zero of 'em).
*/
- ListCell *lc;
- int i = 0;
+ ListCell *lc;
+ int i = 0;
if (baseType != NULL)
ereport(ERROR,
@@ -162,7 +162,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
aggArgTypes = (Oid *) palloc(sizeof(Oid) * numArgs);
foreach(lc, args)
{
- TypeName *curTypeName = (TypeName *) lfirst(lc);
+ TypeName *curTypeName = (TypeName *) lfirst(lc);
aggArgTypes[i++] = typenameTypeId(NULL, curTypeName);
}
@@ -171,9 +171,9 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
/*
* look up the aggregate's transtype.
*
- * transtype can't be a pseudo-type, since we need to be
- * able to store values of the transtype. However, we can allow
- * polymorphic transtype in some cases (AggregateCreate will check).
+ * transtype can't be a pseudo-type, since we need to be able to store
+ * values of the transtype. However, we can allow polymorphic transtype
+ * in some cases (AggregateCreate will check).
*/
transTypeId = typenameTypeId(NULL, transType);
if (get_typtype(transTypeId) == 'p' &&
@@ -189,7 +189,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
*/
AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
- aggArgTypes, /* input data type(s) */
+ aggArgTypes, /* input data type(s) */
numArgs,
transfuncName, /* step function name */
finalfuncName, /* final function name */
@@ -289,7 +289,7 @@ RenameAggregate(List *name, List *args, const char *newname)
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
/* must be owner */
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index f9e41e3531..1ce768f046 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.98 2006/09/17 22:50:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.99 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,12 +129,11 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
CHECK_FOR_INTERRUPTS();
/*
- * Open the relation, getting ShareUpdateExclusiveLock to ensure that
- * two ANALYZEs don't run on it concurrently. (This also locks out
- * a concurrent VACUUM, which doesn't matter much at the moment but
- * might matter if we ever try to accumulate stats on dead tuples.)
- * If the rel has been dropped since we last saw it, we don't need
- * to process it.
+ * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
+ * ANALYZEs don't run on it concurrently. (This also locks out a
+ * concurrent VACUUM, which doesn't matter much at the moment but might
+ * matter if we ever try to accumulate stats on dead tuples.) If the rel
+ * has been dropped since we last saw it, we don't need to process it.
*/
onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
if (!onerel)
@@ -216,8 +215,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
if (i == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- col, RelationGetRelationName(onerel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ col, RelationGetRelationName(onerel))));
vacattrstats[tcnt] = examine_attribute(onerel, i);
if (vacattrstats[tcnt] != NULL)
tcnt++;
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index bd2301dd62..665c66cad5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.153 2006/08/18 16:09:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.154 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -288,7 +288,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(rvtc->indexOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* probably can't happen */
+ if (!HeapTupleIsValid(tuple)) /* probably can't happen */
{
relation_close(OldHeap, AccessExclusiveLock);
return;
@@ -350,7 +350,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
errmsg("cannot cluster on partial index \"%s\"",
RelationGetRelationName(OldIndex))));
- if (!OldIndex->rd_am->amclusterable)
+ if (!OldIndex->rd_am->amclusterable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on index \"%s\" because access method does not support clustering",
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index f54243a495..df4416d37c 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.91 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.92 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,11 +261,12 @@ CreateComments(Oid oid, Oid classoid, int32 subid, char *comment)
* If the comment given is null or an empty string, instead delete any
* existing comment for the specified key.
*/
-void CreateSharedComments(Oid oid, Oid classoid, char *comment)
+void
+CreateSharedComments(Oid oid, Oid classoid, char *comment)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
HeapTuple newtuple = NULL;
Datum values[Natts_pg_shdescription];
@@ -294,18 +295,18 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
/* Use the index to search for a matching old tuple */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
{
@@ -316,11 +317,11 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
else
{
newtuple = heap_modifytuple(oldtuple, RelationGetDescr(shdescription),
- values, nulls, replaces);
+ values, nulls, replaces);
simple_heap_update(shdescription, &oldtuple->t_self, newtuple);
}
- break; /* Assume there can be only one match */
+ break; /* Assume there can be only one match */
}
systable_endscan(sd);
@@ -330,7 +331,7 @@ void CreateSharedComments(Oid oid, Oid classoid, char *comment)
if (newtuple == NULL && comment != NULL)
{
newtuple = heap_formtuple(RelationGetDescr(shdescription),
- values, nulls);
+ values, nulls);
simple_heap_insert(shdescription, newtuple);
}
@@ -405,25 +406,25 @@ void
DeleteSharedComments(Oid oid, Oid classoid)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
/* Use the index to search for all matching old tuples */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
simple_heap_delete(shdescription, &oldtuple->t_self);
@@ -620,8 +621,8 @@ CommentDatabase(List *qualname, char *comment)
static void
CommentTablespace(List *qualname, char *comment)
{
- char *tablespace;
- Oid oid;
+ char *tablespace;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
@@ -657,8 +658,8 @@ CommentTablespace(List *qualname, char *comment)
static void
CommentRole(List *qualname, char *comment)
{
- char *role;
- Oid oid;
+ char *role;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
@@ -672,7 +673,7 @@ CommentRole(List *qualname, char *comment)
if (!has_privs_of_role(GetUserId(), oid))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be member of role \"%s\" to comment upon it", role)));
+ errmsg("must be member of role \"%s\" to comment upon it", role)));
/* Call CreateSharedComments() to create/drop the comments */
CreateSharedComments(oid, AuthIdRelationId, comment);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 23d9742234..0cbfc237ff 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.271 2006/08/31 03:17:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.272 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ typedef struct CopyStateData
/* low-level state data */
CopyDest copy_dest; /* type of copy source/destination */
FILE *copy_file; /* used if copy_dest == COPY_FILE */
- StringInfo fe_msgbuf; /* used for all dests during COPY TO, only
- * for dest == COPY_NEW_FE in COPY FROM */
+ StringInfo fe_msgbuf; /* used for all dests during COPY TO, only for
+ * dest == COPY_NEW_FE in COPY FROM */
bool fe_copy; /* true for all FE copy dests */
bool fe_eof; /* true if detected end of copy data */
EolType eol_type; /* EOL type of input */
@@ -109,7 +109,7 @@ typedef struct CopyStateData
bool header_line; /* CSV header line? */
char *null_print; /* NULL marker string (server encoding!) */
int null_print_len; /* length of same */
- char *null_print_client; /* same converted to client encoding */
+ char *null_print_client; /* same converted to client encoding */
char *delim; /* column delimiter (must be 1 byte) */
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
@@ -125,8 +125,8 @@ typedef struct CopyStateData
/*
* Working state for COPY TO
*/
- FmgrInfo *out_functions; /* lookup info for output functions */
- MemoryContext rowcontext; /* per-row evaluation context */
+ FmgrInfo *out_functions; /* lookup info for output functions */
+ MemoryContext rowcontext; /* per-row evaluation context */
/*
* These variables are used to reduce overhead in textual COPY FROM.
@@ -177,7 +177,7 @@ typedef struct
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because "do {} while(0)" overrides the continue/break
- * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
+ * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
*/
/*
@@ -243,7 +243,7 @@ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
static void DoCopyTo(CopyState cstate);
static void CopyTo(CopyState cstate);
static void CopyOneRowTo(CopyState cstate, Oid tupleOid,
- Datum *values, bool *nulls);
+ Datum *values, bool *nulls);
static void CopyFrom(CopyState cstate);
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
@@ -259,7 +259,7 @@ static void CopyAttributeOutText(CopyState cstate, char *string);
static void CopyAttributeOutCSV(CopyState cstate, char *string,
bool use_quote, bool single_attr);
static List *CopyGetAttnums(TupleDesc tupDesc, Relation rel,
- List *attnamelist);
+ List *attnamelist);
static char *limit_printout_length(const char *str);
/* Low-level communications functions */
@@ -863,10 +863,10 @@ DoCopy(const CopyStmt *stmt)
/* Disallow end-of-line characters */
if (strchr(cstate->delim, '\r') != NULL ||
- strchr(cstate->delim, '\n') != NULL)
+ strchr(cstate->delim, '\n') != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("COPY delimiter cannot be newline or carriage return")));
+ errmsg("COPY delimiter cannot be newline or carriage return")));
if (strchr(cstate->null_print, '\r') != NULL ||
strchr(cstate->null_print, '\n') != NULL)
@@ -956,7 +956,7 @@ DoCopy(const CopyStmt *stmt)
/* Open and lock the relation, using the appropriate lock type. */
cstate->rel = heap_openrv(stmt->relation,
- (is_from ? RowExclusiveLock : AccessShareLock));
+ (is_from ? RowExclusiveLock : AccessShareLock));
/* Check relation permissions. */
aclresult = pg_class_aclcheck(RelationGetRelid(cstate->rel),
@@ -1009,7 +1009,7 @@ DoCopy(const CopyStmt *stmt)
* rewriting or planning. Do that now.
*
* Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * we make a preliminary copy of the source querytree. This prevents
* problems in the case that the COPY is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
* EXPLAIN, DECLARE CURSOR and PREPARE.) XXX the planner really
@@ -1124,8 +1124,8 @@ DoCopy(const CopyStmt *stmt)
/*
* Set up encoding conversion info. Even if the client and server
- * encodings are the same, we must apply pg_client_to_server() to
- * validate data in multibyte encodings.
+ * encodings are the same, we must apply pg_client_to_server() to validate
+ * data in multibyte encodings.
*/
cstate->client_encoding = pg_get_client_encoding();
cstate->need_transcoding =
@@ -1139,7 +1139,8 @@ DoCopy(const CopyStmt *stmt)
if (is_from) /* copy from file to database */
CopyFrom(cstate);
- else /* copy from database to file */
+ else
+ /* copy from database to file */
DoCopyTo(cstate);
/*
@@ -1210,12 +1211,12 @@ DoCopyTo(CopyState cstate)
}
else
{
- mode_t oumask; /* Pre-existing umask value */
+ mode_t oumask; /* Pre-existing umask value */
struct stat st;
/*
- * Prevent write to relative path ... too easy to shoot oneself in
- * the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot oneself in the
+ * foot by overwriting a database file ...
*/
if (!is_absolute_path(cstate->filename))
ereport(ERROR,
@@ -1351,7 +1352,7 @@ CopyTo(CopyState cstate)
*/
if (cstate->need_transcoding)
cstate->null_print_client = pg_server_to_client(cstate->null_print,
- cstate->null_print_len);
+ cstate->null_print_len);
/* if a header has been requested send the line */
if (cstate->header_line)
@@ -1508,7 +1509,7 @@ CopyOneRowTo(CopyState cstate, Oid tupleOid, Datum *values, bool *nulls)
CopySendEndOfRow(cstate);
MemoryContextSwitchTo(oldcontext);
-
+
cstate->processed++;
}
@@ -2237,6 +2238,7 @@ CopyReadLineText(CopyState cstate)
bool hit_eof = false;
bool result = false;
char mblen_str[2];
+
/* CSV variables */
bool first_char_in_line = true;
bool in_quote = false,
@@ -2268,10 +2270,10 @@ CopyReadLineText(CopyState cstate)
* assumed the same in frontend and backend encodings.
*
* For speed, we try to move data from raw_buf to line_buf in chunks
- * rather than one character at a time. raw_buf_ptr points to the next
+ * rather than one character at a time. raw_buf_ptr points to the next
* character to examine; any characters from raw_buf_index to raw_buf_ptr
- * have been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * have been determined to be part of the line, but not yet transferred to
+ * line_buf.
*
* For a little extra speed within the loop, we copy raw_buf and
* raw_buf_len into local variables.
@@ -2286,14 +2288,14 @@ CopyReadLineText(CopyState cstate)
char c;
/*
- * Load more data if needed. Ideally we would just force four bytes
- * of read-ahead and avoid the many calls to
- * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE
- * protocol does not allow us to read too far ahead or we might
- * read into the next data, so we read-ahead only as far we know
- * we can. One optimization would be to read-ahead four byte here
- * if cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
- * considering the size of the buffer.
+ * Load more data if needed. Ideally we would just force four bytes
+ * of read-ahead and avoid the many calls to
+ * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
+ * does not allow us to read too far ahead or we might read into the
+ * next data, so we read-ahead only as far we know we can. One
+ * optimization would be to read-ahead four byte here if
+ * cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
+ * considering the size of the buffer.
*/
if (raw_buf_ptr >= copy_buf_len || need_data)
{
@@ -2328,12 +2330,12 @@ CopyReadLineText(CopyState cstate)
{
/*
* If character is '\\' or '\r', we may need to look ahead below.
- * Force fetch of the next character if we don't already have it. We
- * need to do this before changing CSV state, in case one of these
- * characters is also the quote or escape character.
+ * Force fetch of the next character if we don't already have it.
+ * We need to do this before changing CSV state, in case one of
+ * these characters is also the quote or escape character.
*
- * Note: old-protocol does not like forced prefetch, but it's OK here
- * since we cannot validly be at EOF.
+ * Note: old-protocol does not like forced prefetch, but it's OK
+ * here since we cannot validly be at EOF.
*/
if (c == '\\' || c == '\r')
{
@@ -2341,12 +2343,12 @@ CopyReadLineText(CopyState cstate)
}
/*
- * Dealing with quotes and escapes here is mildly tricky. If the quote
- * char is also the escape char, there's no problem - we just use the
- * char as a toggle. If they are different, we need to ensure that we
- * only take account of an escape inside a quoted field and
- * immediately preceding a quote char, and not the second in a
- * escape-escape sequence.
+ * Dealing with quotes and escapes here is mildly tricky. If the
+ * quote char is also the escape char, there's no problem - we
+ * just use the char as a toggle. If they are different, we need
+ * to ensure that we only take account of an escape inside a
+ * quoted field and immediately preceding a quote char, and not
+ * the second in a escape-escape sequence.
*/
if (in_quote && c == escapec)
last_was_esc = !last_was_esc;
@@ -2357,9 +2359,9 @@ CopyReadLineText(CopyState cstate)
/*
* Updating the line count for embedded CR and/or LF chars is
- * necessarily a little fragile - this test is probably about the best
- * we can do. (XXX it's arguable whether we should do this at all ---
- * is cur_lineno a physical or logical count?)
+ * necessarily a little fragile - this test is probably about the
+ * best we can do. (XXX it's arguable whether we should do this
+ * at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
cstate->cur_lineno++;
@@ -2394,12 +2396,13 @@ CopyReadLineText(CopyState cstate)
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
- "unquoted carriage return found in data"),
+ "unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
+
/*
* if we got here, it is the first line and we didn't find
* \n, so don't consume the peeked character
@@ -2410,12 +2413,12 @@ CopyReadLineText(CopyState cstate)
else if (cstate->eol_type == EOL_NL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
"unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
/* If reach here, we have found the line terminator */
break;
}
@@ -2431,15 +2434,15 @@ CopyReadLineText(CopyState cstate)
"unquoted newline found in data"),
errhint(!cstate->csv_mode ?
"Use \"\\n\" to represent newline." :
- "Use quoted CSV field to represent newline.")));
+ "Use quoted CSV field to represent newline.")));
cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
/*
- * In CSV mode, we only recognize \. alone on a line. This is
- * because \. is a valid CSV data value.
+ * In CSV mode, we only recognize \. alone on a line. This is because
+ * \. is a valid CSV data value.
*/
if (c == '\\' && (!cstate->csv_mode || first_char_in_line))
{
@@ -2529,23 +2532,24 @@ CopyReadLineText(CopyState cstate)
break;
}
else if (!cstate->csv_mode)
+
/*
- * If we are here, it means we found a backslash followed by
- * something other than a period. In non-CSV mode, anything
- * after a backslash is special, so we skip over that second
- * character too. If we didn't do that \\. would be
- * considered an eof-of copy, while in non-CVS mode it is a
- * literal backslash followed by a period. In CSV mode,
- * backslashes are not special, so we want to process the
- * character after the backslash just like a normal character,
- * so we don't increment in those cases.
+ * If we are here, it means we found a backslash followed by
+ * something other than a period. In non-CSV mode, anything
+ * after a backslash is special, so we skip over that second
+ * character too. If we didn't do that \\. would be
+ * considered an eof-of copy, while in non-CVS mode it is a
+ * literal backslash followed by a period. In CSV mode,
+ * backslashes are not special, so we want to process the
+ * character after the backslash just like a normal character,
+ * so we don't increment in those cases.
*/
raw_buf_ptr++;
}
/*
- * This label is for CSV cases where \. appears at the start of a line,
- * but there is more text after it, meaning it was a data value.
+ * This label is for CSV cases where \. appears at the start of a
+ * line, but there is more text after it, meaning it was a data value.
* We are more strict for \. in CSV mode because \. could be a data
* value, while in non-CSV mode, \. cannot be a data value.
*/
@@ -2554,9 +2558,9 @@ not_end_of_copy:
/*
* Process all bytes of a multi-byte character as a group.
*
- * We only support multi-byte sequences where the first byte
- * has the high-bit set, so as an optimization we can avoid
- * this block entirely if it is not set.
+ * We only support multi-byte sequences where the first byte has the
+ * high-bit set, so as an optimization we can avoid this block
+ * entirely if it is not set.
*/
if (cstate->encoding_embeds_ascii && IS_HIGHBIT_SET(c))
{
@@ -3040,10 +3044,10 @@ CopyAttributeOutText(CopyState cstate, char *string)
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
- * character, we dump out all characters between replaceable characters
- * in a single call. The loop invariant is that the data from "start"
- * to "ptr" can be sent literally, but hasn't yet been.
+ * are infrequent. To avoid overhead from calling CopySendData once per
+ * character, we dump out all characters between replaceable characters in
+ * a single call. The loop invariant is that the data from "start" to
+ * "ptr" can be sent literally, but hasn't yet been.
*/
start = ptr;
while ((c = *ptr) != '\0')
@@ -3090,7 +3094,7 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
- start = ptr; /* we include char in next run */
+ start = ptr; /* we include char in next run */
}
/*
@@ -3139,14 +3143,13 @@ CopyAttributeOutCSV(CopyState cstate, char *string,
if (!use_quote)
{
/*
- * Because '\.' can be a data value, quote it if it appears
- * alone on a line so it is not interpreted as the end-of-data
- * marker.
+ * Because '\.' can be a data value, quote it if it appears alone on a
+ * line so it is not interpreted as the end-of-data marker.
*/
if (single_attr && strcmp(ptr, "\\.") == 0)
- use_quote = true;
- else
- {
+ use_quote = true;
+ else
+ {
char *tptr = ptr;
while ((c = *tptr) != '\0')
@@ -3251,14 +3254,14 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist)
{
if (rel != NULL)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name, RelationGetRelationName(rel))));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name, RelationGetRelationName(rel))));
else
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" does not exist",
- name)));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" does not exist",
+ name)));
}
/* Check for duplicates */
if (list_member_int(attnums, attnum))
@@ -3289,7 +3292,7 @@ copy_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
static void
copy_dest_receive(TupleTableSlot *slot, DestReceiver *self)
{
- DR_copy *myState = (DR_copy *) self;
+ DR_copy *myState = (DR_copy *) self;
CopyState cstate = myState->cstate;
/* Make sure the tuple is fully deconstructed */
@@ -3323,7 +3326,7 @@ copy_dest_destroy(DestReceiver *self)
DestReceiver *
CreateCopyDestReceiver(void)
{
- DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
+ DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
self->pub.receiveSlot = copy_dest_receive;
self->pub.rStartup = copy_dest_startup;
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 853c7e6626..d7d4cdbfbc 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.184 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.185 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -326,9 +326,9 @@ createdb(const CreatedbStmt *stmt)
}
/*
- * Check for db name conflict. This is just to give a more friendly
- * error message than "unique index violation". There's a race condition
- * but we're willing to accept the less friendly message in that case.
+ * Check for db name conflict. This is just to give a more friendly error
+ * message than "unique index violation". There's a race condition but
+ * we're willing to accept the less friendly message in that case.
*/
if (OidIsValid(get_database_oid(dbname)))
ereport(ERROR,
@@ -336,10 +336,10 @@ createdb(const CreatedbStmt *stmt)
errmsg("database \"%s\" already exists", dbname)));
/*
- * Insert a new tuple into pg_database. This establishes our ownership
- * of the new database name (anyone else trying to insert the same name
- * will block on the unique index, and fail after we commit). It also
- * assigns the OID that the new database will have.
+ * Insert a new tuple into pg_database. This establishes our ownership of
+ * the new database name (anyone else trying to insert the same name will
+ * block on the unique index, and fail after we commit). It also assigns
+ * the OID that the new database will have.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@@ -361,9 +361,9 @@ createdb(const CreatedbStmt *stmt)
/*
* We deliberately set datconfig and datacl to defaults (NULL), rather
- * than copying them from the template database. Copying datacl would
- * be a bad idea when the owner is not the same as the template's
- * owner. It's more debatable whether datconfig should be copied.
+ * than copying them from the template database. Copying datacl would be
+ * a bad idea when the owner is not the same as the template's owner. It's
+ * more debatable whether datconfig should be copied.
*/
new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
new_record_nulls[Anum_pg_database_datacl - 1] = 'n';
@@ -497,8 +497,8 @@ createdb(const CreatedbStmt *stmt)
RequestCheckpoint(true, false);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_database_rel, NoLock);
@@ -543,8 +543,8 @@ dropdb(const char *dbname, bool missing_ok)
errmsg("cannot drop the currently open database")));
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this to ensure that no new backend starts up in the target
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this to ensure that no new backend starts up in the target
* database while we are deleting it (see postinit.c), and that no one is
* using it as a CREATE DATABASE template or trying to delete it for
* themselves.
@@ -589,8 +589,8 @@ dropdb(const char *dbname, bool missing_ok)
errmsg("cannot drop a template database")));
/*
- * Check for active backends in the target database. (Because we hold
- * the database lock, no new ones can start after this.)
+ * Check for active backends in the target database. (Because we hold the
+ * database lock, no new ones can start after this.)
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
@@ -647,8 +647,8 @@ dropdb(const char *dbname, bool missing_ok)
remove_dbtablespaces(db_id);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pgdbrel, NoLock);
@@ -670,8 +670,8 @@ RenameDatabase(const char *oldname, const char *newname)
Relation rel;
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this for the same reasons as DROP DATABASE.
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this for the same reasons as DROP DATABASE.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@@ -693,8 +693,8 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. This is the
- * same concern as above, but applied to other sessions.
+ * Make sure the database does not have active sessions. This is the same
+ * concern as above, but applied to other sessions.
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
@@ -730,8 +730,8 @@ RenameDatabase(const char *oldname, const char *newname)
CatalogUpdateIndexes(rel, newtup);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
@@ -1067,9 +1067,9 @@ get_db_info(const char *name, LOCKMODE lockmode,
relation = heap_open(DatabaseRelationId, AccessShareLock);
/*
- * Loop covers the rare case where the database is renamed before we
- * can lock it. We try again just in case we can find a new one of
- * the same name.
+ * Loop covers the rare case where the database is renamed before we can
+ * lock it. We try again just in case we can find a new one of the same
+ * name.
*/
for (;;)
{
@@ -1079,8 +1079,8 @@ get_db_info(const char *name, LOCKMODE lockmode,
Oid dbOid;
/*
- * there's no syscache for database-indexed-by-name,
- * so must do it the hard way
+ * there's no syscache for database-indexed-by-name, so must do it the
+ * hard way
*/
ScanKeyInit(&scanKey,
Anum_pg_database_datname,
@@ -1110,9 +1110,9 @@ get_db_info(const char *name, LOCKMODE lockmode,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and
- * still the same name, we win; else, drop the lock and loop
- * back to try again.
+ * And now, re-fetch the tuple by OID. If it's still there and still
+ * the same name, we win; else, drop the lock and loop back to try
+ * again.
*/
tuple = SearchSysCache(DATABASEOID,
ObjectIdGetDatum(dbOid),
@@ -1267,8 +1267,8 @@ get_database_oid(const char *dbname)
Oid oid;
/*
- * There's no syscache for pg_database indexed by name,
- * so we must look the hard way.
+ * There's no syscache for pg_database indexed by name, so we must look
+ * the hard way.
*/
pg_database = heap_open(DatabaseRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
@@ -1399,15 +1399,15 @@ dbase_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_dbase_create_rec *xlrec = (xl_dbase_create_rec *) rec;
appendStringInfo(buf, "create db: copy dir %u/%u to %u/%u",
- xlrec->src_db_id, xlrec->src_tablespace_id,
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->src_db_id, xlrec->src_tablespace_id,
+ xlrec->db_id, xlrec->tablespace_id);
}
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
appendStringInfo(buf, "drop db: dir %u/%u",
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->db_id, xlrec->tablespace_id);
}
else
appendStringInfo(buf, "UNKNOWN");
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 149e1b6dae..697678c4aa 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.97 2006/07/03 22:45:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.98 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -131,6 +131,7 @@ defGetBoolean(DefElem *def)
*/
if (def->arg == NULL)
return true;
+
/*
* Allow 0, 1, "true", "false"
*/
@@ -150,7 +151,7 @@ defGetBoolean(DefElem *def)
break;
default:
{
- char *sval = defGetString(def);
+ char *sval = defGetString(def);
if (pg_strcasecmp(sval, "true") == 0)
return true;
@@ -310,9 +311,9 @@ defGetTypeLength(DefElem *def)
DefElem *
defWithOids(bool value)
{
- DefElem *f = makeNode(DefElem);
+ DefElem *f = makeNode(DefElem);
f->defname = "oids";
- f->arg = (Node *)makeInteger(value);
+ f->arg = (Node *) makeInteger(value);
return f;
}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 48db000ea9..00ada3fc72 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.151 2006/09/06 20:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.152 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ typedef struct ExplainState
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
- ParamListInfo params, TupOutputState *tstate);
+ ParamListInfo params, TupOutputState *tstate);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
@@ -760,7 +760,7 @@ explain_outNode(StringInfo str,
* The tidquals list has OR semantics, so be sure to show it
* as an OR condition.
*/
- List *tidquals = ((TidScan *) plan)->tidquals;
+ List *tidquals = ((TidScan *) plan)->tidquals;
if (list_length(tidquals) > 1)
tidquals = list_make1(make_orclause(tidquals));
@@ -928,8 +928,8 @@ explain_outNode(StringInfo str,
/*
* Ordinarily we don't pass down our own outer_plan value to our
* child nodes, but in an Append we must, since we might be
- * looking at an appendrel indexscan with outer references
- * from the member scans.
+ * looking at an appendrel indexscan with outer references from
+ * the member scans.
*/
explain_outNode(str, subnode,
appendstate->appendplans[j],
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 615d4c93b2..29954a27a8 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.78 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.79 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -686,7 +686,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
* Find the function, do permissions and validity checks
*/
funcOid = LookupFuncNameTypeNames(functionName, argTypes, stmt->missing_ok);
- if (!OidIsValid(funcOid))
+ if (!OidIsValid(funcOid))
{
/* can only get here if stmt->missing_ok */
ereport(NOTICE,
@@ -1394,7 +1394,7 @@ DropCast(DropCastStmt *stmt)
0, 0);
if (!HeapTupleIsValid(tuple))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("cast from type %s to type %s does not exist",
@@ -1402,7 +1402,7 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
- (errmsg("cast from type %s to type %s does not exist ... skipping",
+ (errmsg("cast from type %s to type %s does not exist ... skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 81246768bc..5f54f66f59 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.148 2006/08/27 19:14:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.149 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,8 +125,8 @@ DefineIndex(RangeVar *heapRelation,
LockRelId heaprelid;
LOCKTAG heaplocktag;
Snapshot snapshot;
- Relation pg_index;
- HeapTuple indexTuple;
+ Relation pg_index;
+ HeapTuple indexTuple;
Form_pg_index indexForm;
/*
@@ -450,18 +450,18 @@ DefineIndex(RangeVar *heapRelation,
* for an overview of how this works)
*
* We must commit our current transaction so that the index becomes
- * visible; then start another. Note that all the data structures
- * we just built are lost in the commit. The only data we keep past
- * here are the relation IDs.
+ * visible; then start another. Note that all the data structures we just
+ * built are lost in the commit. The only data we keep past here are the
+ * relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
- * that neither it nor the index can be dropped before we finish.
- * This cannot block, even if someone else is waiting for access, because
- * we already have the same lock within our transaction.
+ * that neither it nor the index can be dropped before we finish. This
+ * cannot block, even if someone else is waiting for access, because we
+ * already have the same lock within our transaction.
*
* Note: we don't currently bother with a session lock on the index,
- * because there are no operations that could change its state while
- * we hold lock on the parent table. This might need to change later.
+ * because there are no operations that could change its state while we
+ * hold lock on the parent table. This might need to change later.
*/
heaprelid = rel->rd_lockInfo.lockRelId;
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
@@ -471,15 +471,15 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now we must wait until no running transaction could have the table open
- * with the old list of indexes. To do this, inquire which xacts currently
- * would conflict with ShareLock on the table -- ie, which ones have
- * a lock that permits writing the table. Then wait for each of these
- * xacts to commit or abort. Note we do not need to worry about xacts
- * that open the table for writing after this point; they will see the
- * new index when they open it.
+ * with the old list of indexes. To do this, inquire which xacts
+ * currently would conflict with ShareLock on the table -- ie, which ones
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
+ * xacts that open the table for writing after this point; they will see
+ * the new index when they open it.
*
- * Note: GetLockConflicts() never reports our own xid,
- * hence we need not check for that.
+ * Note: GetLockConflicts() never reports our own xid, hence we need not
+ * check for that.
*/
SET_LOCKTAG_RELATION(heaplocktag, heaprelid.dbId, heaprelid.relId);
old_xact_list = GetLockConflicts(&heaplocktag, ShareLock);
@@ -493,12 +493,12 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. All other transactions running at this
+ * to filter candidate tuples. All other transactions running at this
* time will have to be out-waited before we can commit, because we can't
* guarantee that tuples deleted just before this will be in the index.
*
- * We also set ActiveSnapshot to this snap, since functions in indexes
- * may need a snapshot.
+ * We also set ActiveSnapshot to this snap, since functions in indexes may
+ * need a snapshot.
*/
snapshot = CopySnapshot(GetTransactionSnapshot());
ActiveSnapshot = snapshot;
@@ -510,13 +510,13 @@ DefineIndex(RangeVar *heapRelation,
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted
- * just before the reference snap was taken, we have to wait out any
- * transactions older than the reference snap. We can do this by
- * waiting for each xact explicitly listed in the snap.
+ * interesting tuples. But since it might not contain tuples deleted just
+ * before the reference snap was taken, we have to wait out any
+ * transactions older than the reference snap. We can do this by waiting
+ * for each xact explicitly listed in the snap.
*
- * Note: GetSnapshotData() never stores our own xid into a snap,
- * hence we need not check for that.
+ * Note: GetSnapshotData() never stores our own xid into a snap, hence we
+ * need not check for that.
*/
for (ixcnt = 0; ixcnt < snapshot->xcnt; ixcnt++)
XactLockTableWait(snapshot->xip[ixcnt]);
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 5d77e056e1..4407e2785c 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.48 2006/07/18 17:42:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.49 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
@@ -699,7 +699,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
{
- if (! stmt -> missing_ok )
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
@@ -708,10 +708,10 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
opcname, stmt->amname)));
-
+
return;
}
-
+
tuple = SearchSysCache(CLAOID,
ObjectIdGetDatum(opcID),
0, 0, 0);
@@ -719,19 +719,19 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
if (!HeapTupleIsValid(tuple))
{
-
- if (! stmt->missing_ok )
+
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
return;
}
-
+
opcID = HeapTupleGetOid(tuple);
/* Permission check: must own opclass or its namespace */
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 2aed55ed45..76884e8cd8 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.32 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.33 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -214,14 +214,14 @@ RemoveOperator(RemoveFuncStmt *stmt)
operOid = LookupOperNameTypeNames(NULL, operatorName,
typeName1, typeName2,
stmt->missing_ok, -1);
-
- if (stmt->missing_ok &&!OidIsValid(operOid) )
- {
- ereport(NOTICE,
- (errmsg("operator %s does not exist, skipping",
- NameListToString(operatorName))));
- return;
- }
+
+ if (stmt->missing_ok && !OidIsValid(operOid))
+ {
+ ereport(NOTICE,
+ (errmsg("operator %s does not exist, skipping",
+ NameListToString(operatorName))));
+ return;
+ }
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(operOid),
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 48833e7dc0..8907aac8a9 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.55 2006/09/07 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.56 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,8 +108,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params)
plan = copyObject(plan);
/*
- * XXX: debug_query_string is wrong here: the user might have
- * submitted multiple semicolon delimited queries.
+ * XXX: debug_query_string is wrong here: the user might have submitted
+ * multiple semicolon delimited queries.
*/
PortalDefineQuery(portal,
NULL,
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 46824a48e5..6b7c11a189 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.65 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.66 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,7 +162,7 @@ ExecuteQuery(ExecuteStmt *stmt, ParamListInfo params,
portal = CreateNewPortal();
/* Don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false;
-
+
/*
* For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that
* we can modify its destination (yech, but this has always been ugly).
@@ -251,7 +251,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
forboth(le, exprstates, la, argtypes)
@@ -674,22 +674,21 @@ ExplainExecuteQuery(ExplainStmt *stmt, ParamListInfo params,
Datum
pg_prepared_statement(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
- PreparedStatement *prep_stmt;
+ FuncCallContext *funcctx;
+ HASH_SEQ_STATUS *hash_seq;
+ PreparedStatement *prep_stmt;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
- MemoryContext oldcontext;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -704,9 +703,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
funcctx->user_fctx = NULL;
/*
- * build tupdesc for result tuples. This must match the
- * definition of the pg_prepared_statements view in
- * system_views.sql
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -735,21 +733,21 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
prep_stmt = hash_seq_search(hash_seq);
if (prep_stmt)
{
- Datum result;
- HeapTuple tuple;
- Datum values[5];
- bool nulls[5];
+ Datum result;
+ HeapTuple tuple;
+ Datum values[5];
+ bool nulls[5];
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->stmt_name));
+ CStringGetDatum(prep_stmt->stmt_name));
if (prep_stmt->query_string == NULL)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->query_string));
+ CStringGetDatum(prep_stmt->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->argtype_list);
@@ -783,8 +781,8 @@ build_regtype_array(List *oid_list)
i = 0;
foreach(lc, oid_list)
{
- Oid oid;
- Datum oid_str;
+ Oid oid;
+ Datum oid_str;
oid = lfirst_oid(lc);
oid_str = DirectFunctionCall1(oidout, ObjectIdGetDatum(oid));
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index f91242470a..7906f587b7 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.68 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.69 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -395,15 +395,15 @@ DropProceduralLanguage(DropPLangStmt *stmt)
0, 0, 0);
if (!HeapTupleIsValid(langTup))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", languageName)));
- else
+ else
ereport(NOTICE,
- (errmsg("language \"%s\" does not exist, skipping",
+ (errmsg("language \"%s\" does not exist, skipping",
languageName)));
-
+
return;
}
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 865c2f60fe..9d769cb052 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.139 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.140 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,7 +85,7 @@ static Relation open_share_lock(SeqTable seq);
static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, bool isInit,
- Form_pg_sequence new, List **owned_by);
+ Form_pg_sequence new, List **owned_by);
static void do_setval(Oid relid, int64 next, bool iscalled);
static void process_owned_by(Relation seqrel, List *owned_by);
@@ -862,7 +862,7 @@ open_share_lock(SeqTable seq)
static void
init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
{
- SeqTable elm;
+ SeqTable elm;
Relation seqrel;
/* Look to see if we already have a seqtable entry for relation */
@@ -1180,7 +1180,7 @@ process_owned_by(Relation seqrel, List *owned_by)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid OWNED BY option"),
- errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
+ errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
tablerel = NULL;
attnum = 0;
}
@@ -1209,7 +1209,7 @@ process_owned_by(Relation seqrel, List *owned_by)
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sequence must have same owner as table it is owned by")));
+ errmsg("sequence must have same owner as table it is owned by")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -1225,8 +1225,8 @@ process_owned_by(Relation seqrel, List *owned_by)
}
/*
- * OK, we are ready to update pg_depend. First remove any existing
- * AUTO dependencies for the sequence, then optionally add a new one.
+ * OK, we are ready to update pg_depend. First remove any existing AUTO
+ * dependencies for the sequence, then optionally add a new one.
*/
markSequenceUnowned(RelationGetRelid(seqrel));
@@ -1304,5 +1304,5 @@ seq_desc(StringInfo buf, uint8 xl_info, char *rec)
}
appendStringInfo(buf, "rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 45167b816a..04c2a03aa8 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.202 2006/09/04 21:15:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.203 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,7 +166,7 @@ static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation catalogRelation);
+ int16 seqNumber, Relation catalogRelation);
static int findAttrByName(const char *attributeName, List *schema);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
@@ -566,18 +566,18 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
- * requires multiple iterations to find indirectly-dependent relations.
- * At each phase, we need to exclusive-lock new rels before looking
- * for their dependencies, else we might miss something. Also, we
- * check each rel as soon as we open it, to avoid a faux pas such as
- * holding lock for a long time on a rel we have no permissions for.
+ * In CASCADE mode, suck in all referencing relations as well. This
+ * requires multiple iterations to find indirectly-dependent relations. At
+ * each phase, we need to exclusive-lock new rels before looking for their
+ * dependencies, else we might miss something. Also, we check each rel as
+ * soon as we open it, to avoid a faux pas such as holding lock for a long
+ * time on a rel we have no permissions for.
*/
if (stmt->behavior == DROP_CASCADE)
{
for (;;)
{
- List *newrelids;
+ List *newrelids;
newrelids = heap_truncate_find_FKs(relids);
if (newrelids == NIL)
@@ -585,7 +585,7 @@ ExecuteTruncate(TruncateStmt *stmt)
foreach(cell, newrelids)
{
- Oid relid = lfirst_oid(cell);
+ Oid relid = lfirst_oid(cell);
Relation rel;
rel = heap_open(relid, AccessExclusiveLock);
@@ -601,8 +601,8 @@ ExecuteTruncate(TruncateStmt *stmt)
/*
* Check foreign key references. In CASCADE mode, this should be
- * unnecessary since we just pulled in all the references; but as
- * a cross-check, do it anyway if in an Assert-enabled build.
+ * unnecessary since we just pulled in all the references; but as a
+ * cross-check, do it anyway if in an Assert-enabled build.
*/
#ifdef USE_ASSERT_CHECKING
heap_truncate_check_FKs(rels, false);
@@ -612,9 +612,9 @@ ExecuteTruncate(TruncateStmt *stmt)
#endif
/*
- * Also check for pending AFTER trigger events on the target relations.
- * We can't just leave those be, since they will try to fetch tuples
- * that the TRUNCATE removes.
+ * Also check for pending AFTER trigger events on the target relations. We
+ * can't just leave those be, since they will try to fetch tuples that the
+ * TRUNCATE removes.
*/
AfterTriggerCheckTruncate(relids);
@@ -657,7 +657,7 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
@@ -681,9 +681,8 @@ truncate_check_rel(Relation rel)
RelationGetRelationName(rel))));
/*
- * We can never allow truncation of shared or nailed-in-cache
- * relations, because we can't support changing their relfilenode
- * values.
+ * We can never allow truncation of shared or nailed-in-cache relations,
+ * because we can't support changing their relfilenode values.
*/
if (rel->rd_rel->relisshared || rel->rd_isnailed)
ereport(ERROR,
@@ -692,13 +691,13 @@ truncate_check_rel(Relation rel)
RelationGetRelationName(rel))));
/*
- * Don't allow truncate on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow truncate on temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
}
/*----------
@@ -1141,16 +1140,20 @@ change_varattnos_of_a_node(Node *node, const AttrNumber *newattno)
AttrNumber *
varattnos_map(TupleDesc old, TupleDesc new)
{
- int i,j;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i,
+ j;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- for (j=1; j<= new->natts; j++)
- if (!strcmp(NameStr(old->attrs[i-1]->attname), NameStr(new->attrs[j-1]->attname)))
- attmap[i-1] = j;
+ for (j = 1; j <= new->natts; j++)
+ if (!strcmp(NameStr(old->attrs[i - 1]->attname), NameStr(new->attrs[j - 1]->attname)))
+ attmap[i - 1] = j;
}
return attmap;
}
@@ -1160,16 +1163,19 @@ varattnos_map(TupleDesc old, TupleDesc new)
* ColumnDefs
*/
AttrNumber *
-varattnos_map_schema(TupleDesc old, List *schema)
+varattnos_map_schema(TupleDesc old, List *schema)
{
- int i;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- attmap[i-1] = findAttrByName(NameStr(old->attrs[i-1]->attname), schema);
+ attmap[i - 1] = findAttrByName(NameStr(old->attrs[i - 1]->attname), schema);
}
return attmap;
}
@@ -1244,14 +1250,14 @@ StoreCatalogInheritance(Oid relationId, List *supers)
static void
StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation relation)
+ int16 seqNumber, Relation relation)
{
- Datum datum[Natts_pg_inherits];
- char nullarr[Natts_pg_inherits];
- ObjectAddress childobject,
- parentobject;
- HeapTuple tuple;
- TupleDesc desc = RelationGetDescr(relation);
+ Datum datum[Natts_pg_inherits];
+ char nullarr[Natts_pg_inherits];
+ ObjectAddress childobject,
+ parentobject;
+ HeapTuple tuple;
+ TupleDesc desc = RelationGetDescr(relation);
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
@@ -2100,8 +2106,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
ATPrepSetTableSpace(tab, rel, cmd->name);
pass = AT_PASS_MISC; /* doesn't actually matter */
break;
- case AT_SetRelOptions: /* SET (...) */
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_SetRelOptions: /* SET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATSimplePermissionsRelationOrIndex(rel);
/* This command never recurses */
/* No command-specific prep needed */
@@ -2274,10 +2280,10 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
* Nothing to do here; Phase 3 does the work
*/
break;
- case AT_SetRelOptions: /* SET (...) */
+ case AT_SetRelOptions: /* SET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, false);
break;
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
@@ -2564,8 +2570,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
/*
* If we are rebuilding the tuples OR if we added any new NOT NULL
* constraints, check all not-null constraints. This is a bit of
- * overkill but it minimizes risk of bugs, and heap_attisnull is
- * a pretty cheap test anyway.
+ * overkill but it minimizes risk of bugs, and heap_attisnull is a
+ * pretty cheap test anyway.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
@@ -2679,13 +2685,13 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
foreach(l, notnull_attrs)
{
- int attn = lfirst_int(l);
+ int attn = lfirst_int(l);
- if (heap_attisnull(tuple, attn+1))
+ if (heap_attisnull(tuple, attn + 1))
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
- NameStr(newTupDesc->attrs[attn]->attname))));
+ NameStr(newTupDesc->attrs[attn]->attname))));
}
foreach(l, tab->constraints)
@@ -5105,7 +5111,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
if (!list_member_oid(tab->changedConstraintOids,
foundObject.objectId))
{
- char *defstring = pg_get_constraintdef_string(foundObject.objectId);
+ char *defstring = pg_get_constraintdef_string(foundObject.objectId);
/*
* Put NORMAL dependencies at the front of the list and
@@ -5302,10 +5308,10 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
/*
* Now we can drop the existing constraints and indexes --- constraints
* first, since some of them might depend on the indexes. In fact, we
- * have to delete FOREIGN KEY constraints before UNIQUE constraints,
- * but we already ordered the constraint list to ensure that would happen.
- * It should be okay to use DROP_RESTRICT here, since nothing else should
- * be depending on these objects.
+ * have to delete FOREIGN KEY constraints before UNIQUE constraints, but
+ * we already ordered the constraint list to ensure that would happen. It
+ * should be okay to use DROP_RESTRICT here, since nothing else should be
+ * depending on these objects.
*/
foreach(l, tab->changedConstraintOids)
{
@@ -5482,17 +5488,17 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
tuple_class->relowner != newOwnerId)
{
/* if it's an owned sequence, disallow changing it by itself */
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relationOid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot change owner of sequence \"%s\"",
NameStr(tuple_class->relname)),
- errdetail("Sequence \"%s\" is linked to table \"%s\".",
- NameStr(tuple_class->relname),
- get_rel_name(tableId))));
+ errdetail("Sequence \"%s\" is linked to table \"%s\".",
+ NameStr(tuple_class->relname),
+ get_rel_name(tableId))));
}
break;
case RELKIND_TOASTVALUE:
@@ -6051,12 +6057,12 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname,
}
static char *
-decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
+decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
{
- Form_pg_constraint con;
- bool isnull;
- Datum attr;
- Datum expr;
+ Form_pg_constraint con;
+ bool isnull;
+ Datum attr;
+ Datum expr;
con = (Form_pg_constraint) GETSTRUCT(contup);
attr = heap_getattr(contup, Anum_pg_constraint_conbin, tupdesc, &isnull);
@@ -6107,7 +6113,7 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs",
- RelationGetRelationName(child_rel), parent->relname)));
+ RelationGetRelationName(child_rel), parent->relname)));
/*
* Don't allow any duplicates in the list of parents. We scan through the
@@ -6140,8 +6146,8 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
heap_close(catalogRelation, RowExclusiveLock);
/*
- * If the new parent is found in our list of inheritors, we have a circular
- * structure
+ * If the new parent is found in our list of inheritors, we have a
+ * circular structure
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
@@ -6183,12 +6189,12 @@ ATExecAddInherits(Relation child_rel, RangeVar *parent)
static void
MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
{
- Relation attrdesc;
- AttrNumber parent_attno;
- int parent_natts;
- TupleDesc tupleDesc;
+ Relation attrdesc;
+ AttrNumber parent_attno;
+ int parent_natts;
+ TupleDesc tupleDesc;
TupleConstr *constr;
- HeapTuple tuple;
+ HeapTuple tuple;
tupleDesc = RelationGetDescr(parent_rel);
parent_natts = tupleDesc->natts;
@@ -6221,13 +6227,13 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(child_rel), NameStr(attribute->attname))));
+ RelationGetRelationName(child_rel), NameStr(attribute->attname))));
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table must be NOT NULL",
- NameStr(attribute->attname))));
+ errmsg("column \"%s\" in child table must be NOT NULL",
+ NameStr(attribute->attname))));
childatt->attinhcount++;
simple_heap_update(attrdesc, &tuple->t_self, tuple);
@@ -6555,13 +6561,13 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
/* if it's an owned sequence, disallow moving it by itself */
if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
{
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move an owned sequence into another schema"),
+ errmsg("cannot move an owned sequence into another schema"),
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index e53ae3b61b..50d5e7d84e 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.38 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.39 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -402,7 +402,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
if (!HeapTupleIsValid(tuple))
{
- if ( ! stmt->missing_ok )
+ if (!stmt->missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -481,8 +481,8 @@ DropTableSpace(DropTableSpaceStmt *stmt)
}
/*
- * Note: because we checked that the tablespace was empty, there should
- * be no need to worry about flushing shared buffers or free space map
+ * Note: because we checked that the tablespace was empty, there should be
+ * no need to worry about flushing shared buffers or free space map
* entries for relations in the tablespace.
*/
@@ -1069,7 +1069,7 @@ tblspc_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_tblspc_create_rec *xlrec = (xl_tblspc_create_rec *) rec;
appendStringInfo(buf, "create ts: %u \"%s\"",
- xlrec->ts_id, xlrec->ts_path);
+ xlrec->ts_id, xlrec->ts_path);
}
else if (info == XLOG_TBLSPC_DROP)
{
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6d63356e10..1ed15614ce 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.208 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.209 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -480,7 +480,7 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior,
if (!HeapTupleIsValid(tup))
{
- if (! missing_ok)
+ if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("trigger \"%s\" for table \"%s\" does not exist",
@@ -856,8 +856,8 @@ RelationBuildTriggers(Relation relation)
/*
* Note: since we scan the triggers using TriggerRelidNameIndexId, we will
* be reading the triggers in name order, except possibly during
- * emergency-recovery operations (ie, IgnoreSystemIndexes). This in
- * turn ensures that triggers will be fired in name order.
+ * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
+ * ensures that triggers will be fired in name order.
*/
ScanKeyInit(&skey,
Anum_pg_trigger_tgrelid,
@@ -2940,26 +2940,30 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- constraint->catalogname, constraint->schemaname,
+ constraint->catalogname, constraint->schemaname,
constraint->relname)));
}
- /*
+ /*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
- if (constraint->schemaname) {
- Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+ if (constraint->schemaname)
+ {
+ Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+
namespaceSearchList = list_make1_oid(namespaceId);
- } else {
+ }
+ else
+ {
namespaceSearchList = fetch_search_path(true);
}
found = false;
foreach(namespaceSearchCell, namespaceSearchList)
{
- Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
+ Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2978,18 +2982,18 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
- Oid constraintNamespaceId;
+ Oid constraintNamespaceId;
/*
* Foreign key constraints have triggers on both the
- * parent and child tables. Since these tables may be
- * in different schemas we must pick the child table
- * because that table "owns" the constraint.
+ * parent and child tables. Since these tables may be in
+ * different schemas we must pick the child table because
+ * that table "owns" the constraint.
*
* Referential triggers on the parent table other than
- * NOACTION_DEL and NOACTION_UPD are ignored below, so
- * it is possible to not check them here, but it seems
- * safer to always check.
+ * NOACTION_DEL and NOACTION_UPD are ignored below, so it
+ * is possible to not check them here, but it seems safer
+ * to always check.
*/
if (pg_trigger->tgfoid == F_RI_FKEY_NOACTION_DEL ||
pg_trigger->tgfoid == F_RI_FKEY_NOACTION_UPD ||
@@ -3006,16 +3010,16 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
constraintNamespaceId = get_rel_namespace(pg_trigger->tgrelid);
/*
- * If this constraint is not in the schema we're
- * currently searching for, keep looking.
+ * If this constraint is not in the schema we're currently
+ * searching for, keep looking.
*/
if (constraintNamespaceId != searchNamespaceId)
continue;
/*
- * If we found some, check that they fit the deferrability but
- * skip referential action ones, since they are silently never
- * deferrable.
+ * If we found some, check that they fit the deferrability
+ * but skip referential action ones, since they are
+ * silently never deferrable.
*/
if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL &&
@@ -3029,8 +3033,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
if (stmt->deferred && !pg_trigger->tgdeferrable)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("constraint \"%s\" is not deferrable",
- constraint->relname)));
+ errmsg("constraint \"%s\" is not deferrable",
+ constraint->relname)));
oidlist = lappend_oid(oidlist, HeapTupleGetOid(htup));
}
found = true;
@@ -3147,9 +3151,9 @@ AfterTriggerCheckTruncate(List *relids)
event = event->ate_next)
{
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
- * back by subxact abort, it's OK because the effects of the
- * TRUNCATE must get rolled back too.)
+ * We can ignore completed events. (Even if a DONE flag is rolled
+ * back by subxact abort, it's OK because the effects of the TRUNCATE
+ * must get rolled back too.)
*/
if (event->ate_event & AFTER_TRIGGER_DONE)
continue;
@@ -3162,10 +3166,9 @@ AfterTriggerCheckTruncate(List *relids)
}
/*
- * Also scan events queued by incomplete queries. This could only
- * matter if a TRUNCATE is executed by a function or trigger within
- * an updating query on the same relation, which is pretty perverse,
- * but let's check.
+ * Also scan events queued by incomplete queries. This could only matter
+ * if a TRUNCATE is executed by a function or trigger within an updating
+ * query on the same relation, which is pretty perverse, but let's check.
*/
for (depth = 0; depth <= afterTriggers->query_depth; depth++)
{
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 6dfa6296d5..53cca73a9d 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.96 2006/07/31 20:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.97 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -343,12 +343,12 @@ DefineType(List *names, List *parameters)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
- * minimum sane check would be for execute-with-grant-option. But we don't
- * have a way to make the type go away if the grant option is revoked, so
- * ownership seems better.
+ * minimum sane check would be for execute-with-grant-option. But we
+ * don't have a way to make the type go away if the grant option is
+ * revoked, so ownership seems better.
*/
if (inputOid && !pg_proc_ownercheck(inputOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
@@ -587,8 +587,8 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Base type must be a plain base type or another domain. Domains over
- * pseudotypes would create a security hole. Domains over composite
- * types might be made to work in the future, but not today.
+ * pseudotypes would create a security hole. Domains over composite types
+ * might be made to work in the future, but not today.
*/
typtype = baseType->typtype;
if (typtype != 'b' && typtype != 'd')
@@ -1840,7 +1840,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in check constraint")));
+ errmsg("cannot use aggregate function in check constraint")));
/*
* Convert to string form for storage.
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 589111713b..2e9b27a598 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.173 2006/07/13 16:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.174 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -377,8 +377,8 @@ CreateRole(CreateRoleStmt *stmt)
GetUserId(), false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -696,8 +696,8 @@ AlterRole(AlterRoleStmt *stmt)
false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -845,7 +845,7 @@ DropRole(DropRoleStmt *stmt)
else
{
ereport(NOTICE,
- (errmsg("role \"%s\" does not exist, skipping",
+ (errmsg("role \"%s\" does not exist, skipping",
role)));
}
@@ -1075,8 +1075,8 @@ RenameRole(const char *oldname, const char *newname)
ReleaseSysCache(oldtuple);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
@@ -1132,8 +1132,8 @@ GrantRole(GrantRoleStmt *stmt)
}
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
@@ -1149,7 +1149,7 @@ GrantRole(GrantRoleStmt *stmt)
* Drop the objects owned by a given list of roles.
*/
void
-DropOwnedObjects(DropOwnedStmt * stmt)
+DropOwnedObjects(DropOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
@@ -1175,7 +1175,7 @@ DropOwnedObjects(DropOwnedStmt * stmt)
* Give the objects owned by a given list of roles away to another user.
*/
void
-ReassignOwnedObjects(ReassignOwnedStmt * stmt)
+ReassignOwnedObjects(ReassignOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
@@ -1360,8 +1360,8 @@ AddRoleMems(const char *rolename, Oid roleid,
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
@@ -1473,8 +1473,8 @@ DelRoleMems(const char *rolename, Oid roleid,
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 5a6d5a04b0..e9f0bf363e 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.340 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.341 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ typedef struct VRelStats
Size min_tlen;
Size max_tlen;
bool hasindex;
- TransactionId minxid; /* Minimum Xid present anywhere on table */
+ TransactionId minxid; /* Minimum Xid present anywhere on table */
/* vtlinks array for tuple chain following - sorted by new_tid */
int num_vtlinks;
VTupleLink vtlinks;
@@ -238,7 +238,7 @@ static int vac_cmp_blk(const void *left, const void *right);
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
static bool enough_space(VacPage vacpage, Size len);
-static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
+static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
/****************************************************************************
@@ -320,8 +320,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
errhint("Use VACUUM FULL, then VACUUM FREEZE.")));
/*
- * Send info about dead objects to the statistics collector, unless
- * we are in autovacuum --- autovacuum.c does this for itself.
+ * Send info about dead objects to the statistics collector, unless we are
+ * in autovacuum --- autovacuum.c does this for itself.
*/
if (vacstmt->vacuum && !IsAutoVacuumProcess())
pgstat_vacuum_tabstat();
@@ -481,20 +481,21 @@ vacuum(VacuumStmt *vacstmt, List *relids)
* PostgresMain().
*/
StartTransactionCommand();
+
/*
- * Re-establish the transaction snapshot. This is wasted effort
- * when we are called as a normal utility command, because the
- * new transaction will be dropped immediately by PostgresMain();
- * but it's necessary if we are called from autovacuum because
- * autovacuum might continue on to do an ANALYZE-only call.
+ * Re-establish the transaction snapshot. This is wasted effort when
+ * we are called as a normal utility command, because the new
+ * transaction will be dropped immediately by PostgresMain(); but it's
+ * necessary if we are called from autovacuum because autovacuum might
+ * continue on to do an ANALYZE-only call.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
}
if (vacstmt->vacuum)
{
- TransactionId minxid,
- vacuumxid;
+ TransactionId minxid,
+ vacuumxid;
/*
* If it was a database-wide VACUUM, print FSM usage statistics (we
@@ -593,9 +594,9 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
TransactionId limit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its xid to the table, it's
+ * tables. Since lazy vacuum doesn't write its xid to the table, it's
* safe to ignore it. In theory it could be problematic to ignore lazy
* vacuums on a full vacuum, but keep in mind that only one vacuum process
* can be working on a particular table at any time, and that each vacuum
@@ -704,6 +705,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
pgcform->relhasindex = hasindex;
dirty = true;
}
+
/*
* If we have discovered that there are no indexes, then there's no
* primary key either. This could be done more thoroughly...
@@ -740,11 +742,11 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
/*
* vac_update_dbminxid() -- update the minimum Xid present in one database
*
- * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
- * of it. datminxid is updated to the minimum of all relminxid found in
- * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
- * found in pg_class. The values are also returned in minxid and
- * vacuumxid, respectively.
+ * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
+ * of it. datminxid is updated to the minimum of all relminxid found in
+ * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
+ * found in pg_class. The values are also returned in minxid and
+ * vacuumxid, respectively.
*
* We violate transaction semantics here by overwriting the database's
* existing pg_database tuple with the new values. This is reasonably
@@ -760,15 +762,15 @@ vac_update_dbminxid(Oid dbid, TransactionId *minxid, TransactionId *vacuumxid)
HeapTuple tuple;
Form_pg_database dbform;
Relation relation;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple classTup;
- TransactionId newMinXid = InvalidTransactionId;
- TransactionId newVacXid = InvalidTransactionId;
+ TransactionId newMinXid = InvalidTransactionId;
+ TransactionId newVacXid = InvalidTransactionId;
bool dirty = false;
- /*
- * We must seqscan pg_class to find the minimum Xid, because there
- * is no index that can help us here.
+ /*
+ * We must seqscan pg_class to find the minimum Xid, because there is no
+ * index that can help us here.
*/
relation = heap_open(RelationRelationId, AccessShareLock);
@@ -845,7 +847,7 @@ vac_update_dbminxid(Oid dbid, TransactionId *minxid, TransactionId *vacuumxid)
*vacuumxid = newVacXid;
/* Mark the flat-file copy of pg_database for update at commit */
- database_file_update_needed();
+ database_file_update_needed();
}
@@ -970,14 +972,14 @@ vac_truncate_clog(TransactionId myminxid, TransactionId myvacxid)
* XXX -- the test we use here is fairly arbitrary. Note that in the
* autovacuum database-wide code, a template database is always processed
* with VACUUM FREEZE, so we can be sure that it will be truly frozen so
- * it won't be need to be processed here again soon.
+ * it won't be need to be processed here again soon.
*
* FIXME -- here we could get into a kind of loop if the database being
* chosen is not actually a template database, because we'll not freeze
* it, so its age may not really decrease if there are any live
* non-freezable tuples. Consider forcing a vacuum freeze if autovacuum
- * is invoked by a backend. On the other hand, forcing a vacuum freeze
- * on a user database may not a be a very polite thing to do.
+ * is invoked by a backend. On the other hand, forcing a vacuum freeze on
+ * a user database may not a be a very polite thing to do.
*/
if (!AutoVacuumingActive() && age > (int32) ((MaxTransactionId >> 3) * 3))
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC);
@@ -1022,18 +1024,18 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
else
{
/*
- * During a lazy VACUUM we do not run any user-supplied functions,
- * and so it should be safe to not create a transaction snapshot.
+ * During a lazy VACUUM we do not run any user-supplied functions, and
+ * so it should be safe to not create a transaction snapshot.
*
* We can furthermore set the inVacuum flag, which lets other
* concurrent VACUUMs know that they can ignore this one while
* determining their OldestXmin. (The reason we don't set inVacuum
* during a full VACUUM is exactly that we may have to run user-
- * defined functions for functional indexes, and we want to make
- * sure that if they use the snapshot set above, any tuples it
- * requires can't get removed from other tables. An index function
- * that depends on the contents of other tables is arguably broken,
- * but we won't break it here by violating transaction semantics.)
+ * defined functions for functional indexes, and we want to make sure
+ * that if they use the snapshot set above, any tuples it requires
+ * can't get removed from other tables. An index function that
+ * depends on the contents of other tables is arguably broken, but we
+ * won't break it here by violating transaction semantics.)
*
* Note: the inVacuum flag remains set until CommitTransaction or
* AbortTransaction. We don't want to clear it until we reset
@@ -1059,8 +1061,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* Open the relation and get the appropriate lock on it.
*
- * There's a race condition here: the rel may have gone away since
- * the last time we saw it. If so, we don't need to vacuum it.
+ * There's a race condition here: the rel may have gone away since the
+ * last time we saw it. If so, we don't need to vacuum it.
*/
onerel = try_relation_open(relid, lmode);
@@ -1116,7 +1118,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return; /* assume no long-lived data in temp tables */
+ return; /* assume no long-lived data in temp tables */
}
/*
@@ -1207,7 +1209,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
i;
VRelStats *vacrelstats;
TransactionId FreezeLimit,
- OldestXmin;
+ OldestXmin;
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
@@ -1221,13 +1223,13 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacrelstats->hasindex = false;
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
- * earliest transaction that could insert new tuples in the table after our
- * VACUUM is done.
+ * earliest transaction that could insert new tuples in the table after
+ * our VACUUM is done.
*/
vacrelstats->minxid = RecentXmin;
@@ -1557,7 +1559,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
}
else
{
- TransactionId min;
+ TransactionId min;
num_tuples += 1;
notup = false;
@@ -1566,7 +1568,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (tuple.t_len > max_tlen)
max_tlen = tuple.t_len;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
@@ -1710,23 +1712,23 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
TransactionId
vactuple_get_minxid(HeapTuple tuple)
{
- TransactionId min = InvalidTransactionId;
+ TransactionId min = InvalidTransactionId;
- /*
- * Initialize calculations with Xmin. NB -- may be FrozenXid and
- * we don't want that one.
+ /*
+ * Initialize calculations with Xmin. NB -- may be FrozenXid and we don't
+ * want that one.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple->t_data)))
min = HeapTupleHeaderGetXmin(tuple->t_data);
/*
* If Xmax is not marked INVALID, we assume it's valid without making
- * further checks on it --- it must be recently obsoleted or still running,
- * else HeapTupleSatisfiesVacuum would have deemed it removable.
+ * further checks on it --- it must be recently obsoleted or still
+ * running, else HeapTupleSatisfiesVacuum would have deemed it removable.
*/
if (!(tuple->t_data->t_infomask | HEAP_XMAX_INVALID))
{
- TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
+ TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
/* If xmax is a plain Xid, consider it by itself */
if (!(tuple->t_data->t_infomask | HEAP_XMAX_IS_MULTI))
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index c839b951d9..c89dc20404 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -36,7 +36,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.79 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.80 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,7 +90,7 @@ typedef struct LVRelStats
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
- BlockNumber tot_free_pages; /* total pages with >= threshold space */
+ BlockNumber tot_free_pages; /* total pages with >= threshold space */
} LVRelStats;
@@ -103,15 +103,15 @@ static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
TransactionId OldestXmin);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static void lazy_vacuum_index(Relation indrel,
- IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult **stats,
+ LVRelStats *vacrelstats);
static void lazy_cleanup_index(Relation indrel,
- IndexBulkDeleteResult *stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult *stats,
+ LVRelStats *vacrelstats);
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
- TransactionId OldestXmin);
+ TransactionId OldestXmin);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats, TransactionId OldestXmin);
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
@@ -143,7 +143,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
int nindexes;
BlockNumber possibly_freeable;
TransactionId OldestXmin,
- FreezeLimit;
+ FreezeLimit;
if (vacstmt->verbose)
elevel = INFO;
@@ -160,8 +160,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacrelstats->threshold = GetAvgFSMRequestSize(&onerel->rd_node);
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by lazy_scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by lazy_scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
@@ -440,7 +440,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
num_tuples += 1;
hastup = true;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
@@ -472,8 +472,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
* its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always
- * be taken if there are no indexes.)
+ * page, so remember its free space as-is. (This path will always be
+ * taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
{
@@ -803,11 +803,12 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
}
}
vacrelstats->num_free_pages = j;
+
/*
* If tot_free_pages was more than num_free_pages, we can't tell for sure
* what its correct value is now, because we don't know which of the
- * forgotten pages are getting truncated. Conservatively set it equal
- * to num_free_pages.
+ * forgotten pages are getting truncated. Conservatively set it equal to
+ * num_free_pages.
*/
vacrelstats->tot_free_pages = j;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 2e270cde56..c910f6376f 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.118 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.119 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -585,7 +585,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source)
* limit on names, so we can tell whether we're being passed an initial
* role name or a saved/restored value. (NOTE: we rely on guc.c to have
* properly truncated any incoming value, but not to truncate already-stored
- * values. See GUC_IS_NAME processing.)
+ * values. See GUC_IS_NAME processing.)
*/
extern char *session_authorization_string; /* in guc.c */
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index df7f479f31..a79bd3cd4f 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.97 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.98 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,7 +119,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
def->colname = pstrdup(tle->resname);
def->typename = makeTypeNameFromOid(exprType((Node *) tle->expr),
- exprTypmod((Node *) tle->expr));
+ exprTypmod((Node *) tle->expr));
def->inhcount = 0;
def->is_local = true;
def->is_not_null = false;
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 4b8a166ffa..5e5ca085ba 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.279 2006/08/12 20:05:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.280 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,17 +76,17 @@ static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
ScanDirection direction,
DestReceiver *dest);
static void ExecSelect(TupleTableSlot *slot,
- DestReceiver *dest, EState *estate);
+ DestReceiver *dest, EState *estate);
static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
static void ExecDelete(ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
-static void ExecProcessReturning(ProjectionInfo *projectReturning,
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
+static void ExecProcessReturning(ProjectionInfo *projectReturning,
TupleTableSlot *tupleSlot,
TupleTableSlot *planSlot,
DestReceiver *dest);
@@ -758,32 +758,33 @@ InitPlan(QueryDesc *queryDesc, int eflags)
econtext = CreateExprContext(estate);
/*
- * Build a projection for each result rel. Note that any SubPlans
- * in the RETURNING lists get attached to the topmost plan node.
+ * Build a projection for each result rel. Note that any SubPlans in
+ * the RETURNING lists get attached to the topmost plan node.
*/
Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations);
resultRelInfo = estate->es_result_relations;
foreach(l, parseTree->returningLists)
{
- List *rlist = (List *) lfirst(l);
- List *rliststate;
+ List *rlist = (List *) lfirst(l);
+ List *rliststate;
rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
resultRelInfo->ri_projectReturning =
ExecBuildProjectionInfo(rliststate, econtext, slot);
resultRelInfo++;
}
+
/*
- * Because we already ran ExecInitNode() for the top plan node,
- * any subplans we just attached to it won't have been initialized;
- * so we have to do it here. (Ugly, but the alternatives seem worse.)
+ * Because we already ran ExecInitNode() for the top plan node, any
+ * subplans we just attached to it won't have been initialized; so we
+ * have to do it here. (Ugly, but the alternatives seem worse.)
*/
foreach(l, planstate->subPlan)
{
SubPlanState *sstate = (SubPlanState *) lfirst(l);
Assert(IsA(sstate, SubPlanState));
- if (sstate->planstate == NULL) /* already inited? */
+ if (sstate->planstate == NULL) /* already inited? */
ExecInitSubPlan(sstate, estate, eflags);
}
}
@@ -1191,7 +1192,7 @@ lnext: ;
erm->rti,
&update_ctid,
update_xmax,
- estate->es_snapshot->curcid);
+ estate->es_snapshot->curcid);
if (!TupIsNull(newSlot))
{
slot = planSlot = newSlot;
@@ -1215,9 +1216,9 @@ lnext: ;
}
/*
- * Create a new "clean" tuple with all junk attributes removed.
- * We don't need to do this for DELETE, however (there will
- * in fact be no non-junk attributes in a DELETE!)
+ * Create a new "clean" tuple with all junk attributes removed. We
+ * don't need to do this for DELETE, however (there will in fact
+ * be no non-junk attributes in a DELETE!)
*/
if (operation != CMD_DELETE)
slot = ExecFilterJunk(junkfilter, slot);
@@ -1515,8 +1516,8 @@ ldelete:;
if (resultRelInfo->ri_projectReturning)
{
/*
- * We have to put the target tuple into a slot, which means
- * first we gotta fetch it. We can use the trigger tuple slot.
+ * We have to put the target tuple into a slot, which means first we
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *slot = estate->es_trig_tuple_slot;
HeapTupleData deltuple;
@@ -1815,13 +1816,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
* dest: where to send the output
*/
static void
-ExecProcessReturning(ProjectionInfo *projectReturning,
+ExecProcessReturning(ProjectionInfo *projectReturning,
TupleTableSlot *tupleSlot,
TupleTableSlot *planSlot,
DestReceiver *dest)
{
- ExprContext *econtext = projectReturning->pi_exprContext;
- TupleTableSlot *retSlot;
+ ExprContext *econtext = projectReturning->pi_exprContext;
+ TupleTableSlot *retSlot;
/*
* Reset per-tuple memory context to free any expression evaluation
@@ -1942,12 +1943,12 @@ EvalPlanQual(EState *estate, Index rti,
* If tuple was inserted by our own transaction, we have to check
* cmin against curCid: cmin >= curCid means our command cannot
* see the tuple, so we should ignore it. Without this we are
- * open to the "Halloween problem" of indefinitely re-updating
- * the same tuple. (We need not check cmax because
- * HeapTupleSatisfiesDirty will consider a tuple deleted by
- * our transaction dead, regardless of cmax.) We just checked
- * that priorXmax == xmin, so we can test that variable instead
- * of doing HeapTupleHeaderGetXmin again.
+ * open to the "Halloween problem" of indefinitely re-updating the
+ * same tuple. (We need not check cmax because
+ * HeapTupleSatisfiesDirty will consider a tuple deleted by our
+ * transaction dead, regardless of cmax.) We just checked that
+ * priorXmax == xmin, so we can test that variable instead of
+ * doing HeapTupleHeaderGetXmin again.
*/
if (TransactionIdIsCurrentTransactionId(priorXmax) &&
HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
@@ -2379,7 +2380,8 @@ OpenIntoRel(QueryDesc *queryDesc)
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("tablespace \"%s\" does not exist",
parseTree->intoTableSpaceName)));
- } else
+ }
+ else
{
tablespaceId = GetDefaultTablespace();
/* note InvalidOid is OK in this case */
@@ -2426,15 +2428,15 @@ OpenIntoRel(QueryDesc *queryDesc)
FreeTupleDesc(tupdesc);
/*
- * Advance command counter so that the newly-created relation's
- * catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's catalog
+ * tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the INTO relation. Note that
- * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
- * that the TOAST table will be visible for insertion.
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
+ * the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId);
@@ -2449,11 +2451,11 @@ OpenIntoRel(QueryDesc *queryDesc)
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
- * Note that for a non-temp INTO table, this is safe only because we
- * know that the catalog changes above will have been WAL-logged, and
- * so RecordTransactionCommit will think it needs to WAL-log the
- * eventual transaction commit. Else the commit might be lost, even
- * though all the data is safely fsync'd ...
+ * Note that for a non-temp INTO table, this is safe only because we know
+ * that the catalog changes above will have been WAL-logged, and so
+ * RecordTransactionCommit will think it needs to WAL-log the eventual
+ * transaction commit. Else the commit might be lost, even though all the
+ * data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
estate->es_into_relation_descriptor = intoRelationDesc;
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 9f1fd54a8e..cd13bc6597 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.58 2006/08/02 01:59:45 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.59 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -197,7 +197,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
case T_ValuesScan:
result = (PlanState *) ExecInitValuesScan((ValuesScan *) node,
- estate, eflags);
+ estate, eflags);
break;
/*
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 7e9e51f139..3f9c090725 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.194 2006/09/28 20:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.195 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,8 +108,8 @@ static Datum ExecEvalRow(RowExprState *rstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRowCompare(RowCompareExprState *rstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
@@ -882,7 +882,7 @@ get_cached_rowtype(Oid type_id, int32 typmod,
static void
ShutdownTupleDescRef(Datum arg)
{
- TupleDesc *cache_field = (TupleDesc *) DatumGetPointer(arg);
+ TupleDesc *cache_field = (TupleDesc *) DatumGetPointer(arg);
if (*cache_field)
ReleaseTupleDesc(*cache_field);
@@ -2015,8 +2015,8 @@ ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
/* if first time through, initialize */
if (cstate->attrMap == NULL)
{
- MemoryContext old_cxt;
- int n;
+ MemoryContext old_cxt;
+ int n;
/* allocate state in long-lived memory context */
old_cxt = MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
@@ -2502,7 +2502,7 @@ ExecEvalRowCompare(RowCompareExprState *rstate,
switch (rctype)
{
- /* EQ and NE cases aren't allowed here */
+ /* EQ and NE cases aren't allowed here */
case ROWCOMPARE_LT:
result = (cmpresult < 0);
break;
@@ -2722,7 +2722,7 @@ ExecEvalNullTest(NullTestState *nstate,
for (att = 1; att <= tupDesc->natts; att++)
{
/* ignore dropped columns */
- if (tupDesc->attrs[att-1]->attisdropped)
+ if (tupDesc->attrs[att - 1]->attisdropped)
continue;
if (heap_attisnull(&tmptup, att))
{
@@ -2764,7 +2764,7 @@ ExecEvalNullTest(NullTestState *nstate,
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
- return (Datum) 0; /* keep compiler quiet */
+ return (Datum) 0; /* keep compiler quiet */
}
}
}
@@ -3564,16 +3564,17 @@ ExecInitExpr(Expr *node, PlanState *parent)
i = 0;
forboth(l, rcexpr->opnos, l2, rcexpr->opclasses)
{
- Oid opno = lfirst_oid(l);
- Oid opclass = lfirst_oid(l2);
- int strategy;
- Oid subtype;
- bool recheck;
- Oid proc;
+ Oid opno = lfirst_oid(l);
+ Oid opclass = lfirst_oid(l2);
+ int strategy;
+ Oid subtype;
+ bool recheck;
+ Oid proc;
get_op_opclass_properties(opno, opclass,
&strategy, &subtype, &recheck);
proc = get_opclass_proc(opclass, subtype, BTORDER_PROC);
+
/*
* If we enforced permissions checks on index support
* functions, we'd need to make a check here. But the
@@ -3632,11 +3633,12 @@ ExecInitExpr(Expr *node, PlanState *parent)
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("could not identify a comparison function for type %s",
format_type_be(minmaxexpr->minmaxtype))));
+
/*
* If we enforced permissions checks on index support
- * functions, we'd need to make a check here. But the
- * index support machinery doesn't do that, and neither
- * does this code.
+ * functions, we'd need to make a check here. But the index
+ * support machinery doesn't do that, and neither does this
+ * code.
*/
fmgr_info(typentry->cmp_proc, &(mstate->cfunc));
state = (ExprState *) mstate;
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 6e5f30bde4..fb1fdf9e72 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.97 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.98 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -722,7 +722,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot)
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the physical tuple.
+ * physical form. The slot retains ownership of the physical tuple.
* Likewise, if it contains a regular tuple we convert to minimal form.
*
* As above, the result must be treated as read-only.
@@ -792,8 +792,8 @@ ExecMaterializeSlot(TupleTableSlot *slot)
Assert(!slot->tts_isempty);
/*
- * If we have a regular physical tuple, and it's locally palloc'd,
- * we have nothing to do.
+ * If we have a regular physical tuple, and it's locally palloc'd, we have
+ * nothing to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree && slot->tts_mintuple == NULL)
return slot->tts_tuple;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index aa5aeb57f3..d8290b3b5b 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.139 2006/08/04 21:33:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.140 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -898,8 +898,8 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
/*
- * For each index, open the index relation and save pg_index info.
- * We acquire RowExclusiveLock, signifying we will update the index.
+ * For each index, open the index relation and save pg_index info. We
+ * acquire RowExclusiveLock, signifying we will update the index.
*/
i = 0;
foreach(l, indexoidlist)
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index e2e5654383..f9b8067109 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.106 2006/09/06 20:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.107 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -433,7 +433,7 @@ postquel_sub_params(SQLFunctionCachePtr fcache,
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
for (i = 0; i < nargs; i++)
@@ -897,8 +897,8 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
parse = (Query *) lfirst(list_tail(queryTreeList));
/*
- * Note: eventually replace this with QueryReturnsTuples? We'd need
- * a more general method of determining the output type, though.
+ * Note: eventually replace this with QueryReturnsTuples? We'd need a
+ * more general method of determining the output type, though.
*/
isSelect = (parse->commandType == CMD_SELECT && parse->into == NULL);
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 19410997b2..44609bb40b 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.145 2006/07/27 19:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.146 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -331,16 +331,16 @@ advance_transition_function(AggState *aggstate,
AggStatePerGroup pergroupstate,
FunctionCallInfoData *fcinfo)
{
- int numArguments = peraggstate->numArguments;
+ int numArguments = peraggstate->numArguments;
MemoryContext oldContext;
- Datum newVal;
- int i;
+ Datum newVal;
+ int i;
if (peraggstate->transfn.fn_strict)
{
/*
- * For a strict transfn, nothing happens when there's a NULL input;
- * we just keep the prior transValue.
+ * For a strict transfn, nothing happens when there's a NULL input; we
+ * just keep the prior transValue.
*/
for (i = 1; i <= numArguments; i++)
{
@@ -434,14 +434,14 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
{
- AggStatePerAgg peraggstate = &aggstate->peragg[aggno];
- AggStatePerGroup pergroupstate = &pergroup[aggno];
- AggrefExprState *aggrefstate = peraggstate->aggrefstate;
- Aggref *aggref = peraggstate->aggref;
+ AggStatePerAgg peraggstate = &aggstate->peragg[aggno];
+ AggStatePerGroup pergroupstate = &pergroup[aggno];
+ AggrefExprState *aggrefstate = peraggstate->aggrefstate;
+ Aggref *aggref = peraggstate->aggref;
FunctionCallInfoData fcinfo;
- int i;
- ListCell *arg;
- MemoryContext oldContext;
+ int i;
+ ListCell *arg;
+ MemoryContext oldContext;
/* Switch memory context just once for all args */
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@@ -495,8 +495,8 @@ process_sorted_aggregate(AggState *aggstate,
bool haveOldVal = false;
MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
MemoryContext oldContext;
- Datum *newVal;
- bool *isNull;
+ Datum *newVal;
+ bool *isNull;
FunctionCallInfoData fcinfo;
tuplesort_performsort(peraggstate->sortstate);
@@ -624,7 +624,7 @@ static Bitmapset *
find_unaggregated_cols(AggState *aggstate)
{
Agg *node = (Agg *) aggstate->ss.ps.plan;
- Bitmapset *colnos;
+ Bitmapset *colnos;
colnos = NULL;
(void) find_unaggregated_cols_walker((Node *) node->plan.targetlist,
@@ -686,20 +686,20 @@ build_hash_table(AggState *aggstate)
tmpmem);
/*
- * Create a list of the tuple columns that actually need to be stored
- * in hashtable entries. The incoming tuples from the child plan node
- * will contain grouping columns, other columns referenced in our
- * targetlist and qual, columns used to compute the aggregate functions,
- * and perhaps just junk columns we don't use at all. Only columns of the
- * first two types need to be stored in the hashtable, and getting rid of
- * the others can make the table entries significantly smaller. To avoid
- * messing up Var numbering, we keep the same tuple descriptor for
- * hashtable entries as the incoming tuples have, but set unwanted columns
- * to NULL in the tuples that go into the table.
+ * Create a list of the tuple columns that actually need to be stored in
+ * hashtable entries. The incoming tuples from the child plan node will
+ * contain grouping columns, other columns referenced in our targetlist
+ * and qual, columns used to compute the aggregate functions, and perhaps
+ * just junk columns we don't use at all. Only columns of the first two
+ * types need to be stored in the hashtable, and getting rid of the others
+ * can make the table entries significantly smaller. To avoid messing up
+ * Var numbering, we keep the same tuple descriptor for hashtable entries
+ * as the incoming tuples have, but set unwanted columns to NULL in the
+ * tuples that go into the table.
*
* To eliminate duplicates, we build a bitmapset of the needed columns,
- * then convert it to an integer list (cheaper to scan at runtime).
- * The list is in decreasing order so that the first entry is the largest;
+ * then convert it to an integer list (cheaper to scan at runtime). The
+ * list is in decreasing order so that the first entry is the largest;
* lookup_hash_entry depends on this to use slot_getsomeattrs correctly.
*
* Note: at present, searching the tlist/qual is not really necessary
@@ -767,7 +767,7 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
slot_getsomeattrs(inputslot, linitial_int(aggstate->hash_needed));
foreach(l, aggstate->hash_needed)
{
- int varNumber = lfirst_int(l) - 1;
+ int varNumber = lfirst_int(l) - 1;
hashslot->tts_values[varNumber] = inputslot->tts_values[varNumber];
hashslot->tts_isnull[varNumber] = inputslot->tts_isnull[varNumber];
@@ -968,10 +968,10 @@ agg_retrieve_direct(AggState *aggstate)
/*
* Use the representative input tuple for any references to
- * non-aggregated input columns in the qual and tlist. (If we are
- * not grouping, and there are no input rows at all, we will come
- * here with an empty firstSlot ... but if not grouping, there can't
- * be any references to non-aggregated input columns, so no problem.)
+ * non-aggregated input columns in the qual and tlist. (If we are not
+ * grouping, and there are no input rows at all, we will come here
+ * with an empty firstSlot ... but if not grouping, there can't be any
+ * references to non-aggregated input columns, so no problem.)
*/
econtext->ecxt_scantuple = firstSlot;
@@ -1226,8 +1226,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/*
* initialize child nodes
*
- * If we are doing a hashed aggregation then the child plan does not
- * need to handle REWIND efficiently; see ExecReScanAgg.
+ * If we are doing a hashed aggregation then the child plan does not need
+ * to handle REWIND efficiently; see ExecReScanAgg.
*/
if (node->aggstrategy == AGG_HASHED)
eflags &= ~EXEC_FLAG_REWIND;
@@ -1321,7 +1321,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
Aggref *aggref = (Aggref *) aggrefstate->xprstate.expr;
AggStatePerAgg peraggstate;
Oid inputTypes[FUNC_MAX_ARGS];
- int numArguments;
+ int numArguments;
HeapTuple aggTuple;
Form_pg_aggregate aggform;
Oid aggtranstype;
@@ -1332,7 +1332,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*finalfnexpr;
Datum textInitVal;
int i;
- ListCell *lc;
+ ListCell *lc;
/* Planner should have assigned aggregate to correct level */
Assert(aggref->agglevelsup == 0);
@@ -1364,9 +1364,9 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
peraggstate->numArguments = numArguments;
/*
- * Get actual datatypes of the inputs. These could be different
- * from the agg's declared input types, when the agg accepts ANY,
- * ANYARRAY or ANYELEMENT.
+ * Get actual datatypes of the inputs. These could be different from
+ * the agg's declared input types, when the agg accepts ANY, ANYARRAY
+ * or ANYELEMENT.
*/
i = 0;
foreach(lc, aggref->args)
@@ -1481,8 +1481,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/*
* If the transfn is strict and the initval is NULL, make sure input
- * type and transtype are the same (or at least binary-compatible),
- * so that it's OK to use the first input value as the initial
+ * type and transtype are the same (or at least binary-compatible), so
+ * that it's OK to use the first input value as the initial
* transValue. This should have been checked at agg definition time,
* but just in case...
*/
@@ -1504,9 +1504,9 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
Assert(node->aggstrategy != AGG_HASHED);
/*
- * We don't currently implement DISTINCT aggs for aggs having
- * more than one argument. This isn't required for anything
- * in the SQL spec, but really it ought to be implemented for
+ * We don't currently implement DISTINCT aggs for aggs having more
+ * than one argument. This isn't required for anything in the SQL
+ * spec, but really it ought to be implemented for
* feature-completeness. FIXME someday.
*/
if (numArguments != 1)
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index 719381f10b..1034f96b6e 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.70 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.71 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -345,9 +345,9 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. However, if caller is passing us an exprCtxt
- * then forcibly rescan all the subnodes now, so that we can pass
- * the exprCtxt down to the subnodes (needed for appendrel indexscan).
+ * first ExecProcNode. However, if caller is passing us an exprCtxt
+ * then forcibly rescan all the subnodes now, so that we can pass the
+ * exprCtxt down to the subnodes (needed for appendrel indexscan).
*/
if (subnode->chgParam == NULL || exprCtxt != NULL)
{
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 62f166cdeb..e1e0062269 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.13 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.14 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -192,8 +192,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
pgstat_count_heap_fetch(&scan->rs_pgstat_info);
/*
- * Set up the result slot to point to this tuple. Note that the
- * slot acquires a pin on the buffer.
+ * Set up the result slot to point to this tuple. Note that the slot
+ * acquires a pin on the buffer.
*/
ExecStoreTuple(&scan->rs_ctup,
slot,
@@ -201,8 +201,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
false);
/*
- * If we are using lossy info, we have to recheck the qual
- * conditions at every tuple.
+ * If we are using lossy info, we have to recheck the qual conditions
+ * at every tuple.
*/
if (tbmres->ntuples < 0)
{
@@ -237,7 +237,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
static void
bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
{
- BlockNumber page = tbmres->blockno;
+ BlockNumber page = tbmres->blockno;
Buffer buffer;
Snapshot snapshot;
Page dp;
@@ -259,9 +259,9 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
snapshot = scan->rs_snapshot;
/*
- * We must hold share lock on the buffer content while examining
- * tuple visibility. Afterwards, however, the tuples we have found
- * to be visible are guaranteed good as long as we hold the buffer pin.
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -269,10 +269,9 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
maxoff = PageGetMaxOffsetNumber(dp);
/*
- * Determine how many entries we need to look at on this page. If
- * the bitmap is lossy then we need to look at each physical item
- * pointer; otherwise we just look through the offsets listed in
- * tbmres.
+ * Determine how many entries we need to look at on this page. If the
+ * bitmap is lossy then we need to look at each physical item pointer;
+ * otherwise we just look through the offsets listed in tbmres.
*/
if (tbmres->ntuples >= 0)
{
@@ -467,8 +466,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
- * Assert caller didn't ask for an unsafe snapshot --- see comments
- * at head of file.
+ * Assert caller didn't ask for an unsafe snapshot --- see comments at
+ * head of file.
*/
Assert(IsMVCCSnapshot(estate->es_snapshot));
@@ -549,8 +548,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
* initialize child nodes
*
* We do this last because the child nodes will open indexscans on our
- * relation's indexes, and we want to be sure we have acquired a lock
- * on the relation first.
+ * relation's indexes, and we want to be sure we have acquired a lock on
+ * the relation first.
*/
outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 6a0303cddd..1b2e5a7652 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.20 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.21 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,8 +57,8 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
/*
* If we have runtime keys and they've not already been set up, do it now.
* Array keys are also treated as runtime keys; note that if ExecReScan
- * returns with biss_RuntimeKeysReady still false, then there is an
- * empty array key so we should do nothing.
+ * returns with biss_RuntimeKeysReady still false, then there is an empty
+ * array key so we should do nothing.
*/
if (!node->biss_RuntimeKeysReady &&
(node->biss_NumRuntimeKeys != 0 || node->biss_NumArrayKeys != 0))
@@ -152,9 +152,9 @@ ExecBitmapIndexReScan(BitmapIndexScanState *node, ExprContext *exprCtxt)
* If we are doing runtime key calculations (ie, the index keys depend on
* data from an outer scan), compute the new key values.
*
- * Array keys are also treated as runtime keys; note that if we
- * return with biss_RuntimeKeysReady still false, then there is an
- * empty array key so no index scan is needed.
+ * Array keys are also treated as runtime keys; note that if we return
+ * with biss_RuntimeKeysReady still false, then there is an empty array
+ * key so no index scan is needed.
*/
if (node->biss_NumRuntimeKeys != 0)
ExecIndexEvalRuntimeKeys(econtext,
@@ -249,8 +249,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
/*
* We do not open or lock the base relation here. We assume that an
- * ancestor BitmapHeapScan node is holding AccessShareLock (or better)
- * on the heap relation throughout the execution of the plan tree.
+ * ancestor BitmapHeapScan node is holding AccessShareLock (or better) on
+ * the heap relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
@@ -265,7 +265,7 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
*/
relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
indexstate->biss_RelationDesc = index_open(node->indexid,
- relistarget ? NoLock : AccessShareLock);
+ relistarget ? NoLock : AccessShareLock);
/*
* Initialize index-specific scan state
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 43084ed05b..14fd016e5a 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.84 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.85 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,11 +119,11 @@ ExecHashJoin(HashJoinState *node)
* since we aren't going to be able to skip the join on the strength
* of an empty inner relation anyway.)
*
- * If we are rescanning the join, we make use of information gained
- * on the previous scan: don't bother to try the prefetch if the
- * previous scan found the outer relation nonempty. This is not
- * 100% reliable since with new parameters the outer relation might
- * yield different results, but it's a good heuristic.
+ * If we are rescanning the join, we make use of information gained on
+ * the previous scan: don't bother to try the prefetch if the previous
+ * scan found the outer relation nonempty. This is not 100% reliable
+ * since with new parameters the outer relation might yield different
+ * results, but it's a good heuristic.
*
* The only way to make the check is to try to fetch a tuple from the
* outer plan node. If we succeed, we have to stash it away for later
@@ -173,8 +173,8 @@ ExecHashJoin(HashJoinState *node)
/*
* Reset OuterNotEmpty for scan. (It's OK if we fetched a tuple
- * above, because ExecHashJoinOuterGetTuple will immediately
- * set it again.)
+ * above, because ExecHashJoinOuterGetTuple will immediately set it
+ * again.)
*/
node->hj_OuterNotEmpty = false;
}
@@ -788,12 +788,12 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
MinimalTuple tuple;
/*
- * Since both the hash value and the MinimalTuple length word are
- * uint32, we can read them both in one BufFileRead() call without
- * any type cheating.
+ * Since both the hash value and the MinimalTuple length word are uint32,
+ * we can read them both in one BufFileRead() call without any type
+ * cheating.
*/
nread = BufFileRead(file, (void *) header, sizeof(header));
- if (nread == 0) /* end of file */
+ if (nread == 0) /* end of file */
{
ExecClearTuple(tupleSlot);
return NULL;
@@ -834,8 +834,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
/*
* okay to reuse the hash table; needn't rescan inner, either.
*
- * What we do need to do is reset our state about the emptiness
- * of the outer relation, so that the new scan of the outer will
+ * What we do need to do is reset our state about the emptiness of
+ * the outer relation, so that the new scan of the outer will
* update it correctly if it turns out to be empty this time.
* (There's no harm in clearing it now because ExecHashJoin won't
* need the info. In the other cases, where the hash table
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 84ee56beb0..9773f2341e 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.116 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.117 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -233,9 +233,9 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
bool isNull;
/*
- * For each run-time key, extract the run-time expression and
- * evaluate it with respect to the current outer tuple. We then stick
- * the result into the proper scan key.
+ * For each run-time key, extract the run-time expression and evaluate
+ * it with respect to the current outer tuple. We then stick the
+ * result into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
* stored in the outer scan's tuple, not in
@@ -290,8 +290,8 @@ ExecIndexEvalArrayKeys(ExprContext *econtext,
bool *elem_nulls;
/*
- * Compute and deconstruct the array expression.
- * (Notes in ExecIndexEvalRuntimeKeys() apply here too.)
+ * Compute and deconstruct the array expression. (Notes in
+ * ExecIndexEvalRuntimeKeys() apply here too.)
*/
arraydatum = ExecEvalExpr(array_expr,
econtext,
@@ -317,8 +317,9 @@ ExecIndexEvalArrayKeys(ExprContext *econtext,
}
/*
- * Note: we expect the previous array data, if any, to be automatically
- * freed by resetting the per-tuple context; hence no pfree's here.
+ * Note: we expect the previous array data, if any, to be
+ * automatically freed by resetting the per-tuple context; hence no
+ * pfree's here.
*/
arrayKeys[j].elem_values = elem_values;
arrayKeys[j].elem_nulls = elem_nulls;
@@ -524,7 +525,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
*/
relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
indexstate->iss_RelationDesc = index_open(node->indexid,
- relistarget ? NoLock : AccessShareLock);
+ relistarget ? NoLock : AccessShareLock);
/*
* Initialize index-specific scan state
@@ -543,7 +544,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
&indexstate->iss_NumScanKeys,
&indexstate->iss_RuntimeKeys,
&indexstate->iss_NumRuntimeKeys,
- NULL, /* no ArrayKeys */
+ NULL, /* no ArrayKeys */
NULL);
/*
@@ -661,7 +662,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
/*
* If there are any RowCompareExpr quals, we need extra ScanKey entries
* for them, and possibly extra runtime-key entries. Count up what's
- * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
+ * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
* be allocated as separate chunks, but we have to count anyway to make
* runtime_keys large enough, so might as well just do one palloc.)
*/
@@ -784,9 +785,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
{
/* (indexkey, indexkey, ...) op (expression, expression, ...) */
RowCompareExpr *rc = (RowCompareExpr *) clause;
- ListCell *largs_cell = list_head(rc->largs);
- ListCell *rargs_cell = list_head(rc->rargs);
- ListCell *opnos_cell = list_head(rc->opnos);
+ ListCell *largs_cell = list_head(rc->largs);
+ ListCell *rargs_cell = list_head(rc->rargs);
+ ListCell *opnos_cell = list_head(rc->opnos);
ScanKey first_sub_key = &scan_keys[extra_scan_keys];
/* Scan RowCompare columns and generate subsidiary ScanKey items */
@@ -859,7 +860,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
opclass = index->rd_indclass->values[varattno - 1];
get_op_opclass_properties(opno, opclass,
- &op_strategy, &op_subtype, &op_recheck);
+ &op_strategy, &op_subtype, &op_recheck);
if (op_strategy != rc->rctype)
elog(ERROR, "RowCompare index qualification contains wrong operator");
@@ -871,11 +872,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
*/
ScanKeyEntryInitialize(this_sub_key,
flags,
- varattno, /* attribute number */
- op_strategy, /* op's strategy */
- op_subtype, /* strategy subtype */
- opfuncid, /* reg proc to use */
- scanvalue); /* constant */
+ varattno, /* attribute number */
+ op_strategy, /* op's strategy */
+ op_subtype, /* strategy subtype */
+ opfuncid, /* reg proc to use */
+ scanvalue); /* constant */
extra_scan_keys++;
}
@@ -883,8 +884,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
scan_keys[extra_scan_keys - 1].sk_flags |= SK_ROW_END;
/*
- * We don't use ScanKeyEntryInitialize for the header because
- * it isn't going to contain a valid sk_func pointer.
+ * We don't use ScanKeyEntryInitialize for the header because it
+ * isn't going to contain a valid sk_func pointer.
*/
MemSet(this_scan_key, 0, sizeof(ScanKeyData));
this_scan_key->sk_flags = SK_ROW_HEADER;
@@ -937,7 +938,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index,
* initialize the scan key's fields appropriately
*/
ScanKeyEntryInitialize(this_scan_key,
- 0, /* flags */
+ 0, /* flags */
varattno, /* attribute number to scan */
strategy, /* op's strategy */
subtype, /* strategy subtype */
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 84a494cf0d..3774edc612 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.56 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.57 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,16 +124,16 @@ ExecMaterial(MaterialState *node)
}
/*
- * Append returned tuple to tuplestore. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will move
- * forward over the added tuple. This is what we want.
+ * Append returned tuple to tuplestore. NOTE: because the tuplestore
+ * is certainly in EOF state, its read position will move forward over
+ * the added tuple. This is what we want.
*/
if (tuplestorestate)
tuplestore_puttupleslot(tuplestorestate, outerslot);
/*
- * And return a copy of the tuple. (XXX couldn't we just return
- * the outerslot?)
+ * And return a copy of the tuple. (XXX couldn't we just return the
+ * outerslot?)
*/
return ExecCopySlot(slot, outerslot);
}
@@ -162,10 +162,10 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
matstate->ss.ps.state = estate;
/*
- * We must have random access to the subplan output to do backward scan
- * or mark/restore. We also prefer to materialize the subplan output
- * if we might be called on to rewind and replay it many times.
- * However, if none of these cases apply, we can skip storing the data.
+ * We must have random access to the subplan output to do backward scan or
+ * mark/restore. We also prefer to materialize the subplan output if we
+ * might be called on to rewind and replay it many times. However, if none
+ * of these cases apply, we can skip storing the data.
*/
matstate->randomAccess = (eflags & (EXEC_FLAG_REWIND |
EXEC_FLAG_BACKWARD |
@@ -194,8 +194,8 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
/*
* initialize child nodes
*
- * We shield the child node from the need to support REWIND, BACKWARD,
- * or MARK/RESTORE.
+ * We shield the child node from the need to support REWIND, BACKWARD, or
+ * MARK/RESTORE.
*/
eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 75403dfa71..8a9f6fe230 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.81 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.82 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -957,10 +957,10 @@ ExecMergeJoin(MergeJoinState *node)
}
/*
- * Load up the new inner tuple's comparison values. If we
- * see that it contains a NULL and hence can't match any
- * outer tuple, we can skip the comparison and assume the
- * new tuple is greater than current outer.
+ * Load up the new inner tuple's comparison values. If we see
+ * that it contains a NULL and hence can't match any outer
+ * tuple, we can skip the comparison and assume the new tuple
+ * is greater than current outer.
*/
if (!MJEvalInnerValues(node, innerTupleSlot))
{
@@ -1377,8 +1377,8 @@ ExecMergeJoin(MergeJoinState *node)
else
{
/*
- * current inner can't possibly match any outer;
- * better to advance the inner scan than the outer.
+ * current inner can't possibly match any outer; better to
+ * advance the inner scan than the outer.
*/
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
}
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index f12f2f624f..1c0a696f53 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.42 2006/03/05 15:58:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.43 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -314,10 +314,10 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags)
* initialize child nodes
*
* Tell the inner child that cheap rescans would be good. (This is
- * unnecessary if we are doing nestloop with inner indexscan, because
- * the rescan will always be with a fresh parameter --- but since
- * nodeIndexscan doesn't actually care about REWIND, there's no point
- * in dealing with that refinement.)
+ * unnecessary if we are doing nestloop with inner indexscan, because the
+ * rescan will always be with a fresh parameter --- but since
+ * nodeIndexscan doesn't actually care about REWIND, there's no point in
+ * dealing with that refinement.)
*/
outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate,
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index c2ed276e56..a6287793d4 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.60 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.61 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,7 +148,7 @@ InitScanRelation(SeqScanState *node, EState *estate)
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ps.plan)->scanrelid);
+ ((SeqScan *) node->ps.plan)->scanrelid);
currentScanDesc = heap_beginscan(currentRelation,
estate->es_snapshot,
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index b586a37a64..dd47348997 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.57 2006/06/27 16:53:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.58 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -158,9 +158,9 @@ ExecInitSort(Sort *node, EState *estate, int eflags)
sortstate->ss.ps.state = estate;
/*
- * We must have random access to the sort output to do backward scan
- * or mark/restore. We also prefer to materialize the sort output
- * if we might be called on to rewind and replay it many times.
+ * We must have random access to the sort output to do backward scan or
+ * mark/restore. We also prefer to materialize the sort output if we
+ * might be called on to rewind and replay it many times.
*/
sortstate->randomAccess = (eflags & (EXEC_FLAG_REWIND |
EXEC_FLAG_BACKWARD |
@@ -189,8 +189,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags)
/*
* initialize child nodes
*
- * We shield the child node from the need to support REWIND, BACKWARD,
- * or MARK/RESTORE.
+ * We shield the child node from the need to support REWIND, BACKWARD, or
+ * MARK/RESTORE.
*/
eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
@@ -316,6 +316,7 @@ ExecReScanSort(SortState *node, ExprContext *exprCtxt)
node->sort_Done = false;
tuplesort_end((Tuplesortstate *) node->tuplesortstate);
node->tuplesortstate = NULL;
+
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 600a39d4ee..2a5fd6d955 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.79 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.80 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -332,8 +332,8 @@ ExecScanSubPlan(SubPlanState *node,
/*
* For ALL, ANY, and ROWCOMPARE sublinks, load up the Params
- * representing the columns of the sub-select, and then evaluate
- * the combining expression.
+ * representing the columns of the sub-select, and then evaluate the
+ * combining expression.
*/
col = 1;
foreach(plst, subplan->paramIds)
@@ -434,8 +434,8 @@ buildSubPlanHash(SubPlanState *node)
* NULL) results of the IN operation, then we have to store subplan output
* rows that are partly or wholly NULL. We store such rows in a separate
* hash table that we expect will be much smaller than the main table. (We
- * can use hashing to eliminate partly-null rows that are not distinct.
- * We keep them separate to minimize the cost of the inevitable full-table
+ * can use hashing to eliminate partly-null rows that are not distinct. We
+ * keep them separate to minimize the cost of the inevitable full-table
* searches; see findPartialMatch.)
*
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
@@ -682,9 +682,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate, int eflags)
/*
* Start up the subplan (this is a very cut-down form of InitPlan())
*
- * The subplan will never need to do BACKWARD scan or MARK/RESTORE.
- * If it is a parameterless subplan (not initplan), we suggest that it
- * be prepared to handle REWIND efficiently; otherwise there is no need.
+ * The subplan will never need to do BACKWARD scan or MARK/RESTORE. If it
+ * is a parameterless subplan (not initplan), we suggest that it be
+ * prepared to handle REWIND efficiently; otherwise there is no need.
*/
eflags &= EXEC_FLAG_EXPLAIN_ONLY;
if (subplan->parParam == NIL && subplan->setParam == NIL)
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 0436ea82be..22e148ec6c 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.31 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.32 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -204,7 +204,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
* too soon during shutdown.
*/
ExecAssignScanType(&subquerystate->ss,
- CreateTupleDescCopy(ExecGetResultType(subquerystate->subplan)));
+ CreateTupleDescCopy(ExecGetResultType(subquerystate->subplan)));
/*
* Initialize result tuple type and projection info.
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index b59ff9ca8e..52d9290457 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.50 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.51 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,9 +60,9 @@ TidListCreate(TidScanState *tidstate)
ListCell *l;
/*
- * We initialize the array with enough slots for the case that all
- * quals are simple OpExprs. If there's any ScalarArrayOpExprs,
- * we may have to enlarge the array.
+ * We initialize the array with enough slots for the case that all quals
+ * are simple OpExprs. If there's any ScalarArrayOpExprs, we may have to
+ * enlarge the array.
*/
numAllocTids = list_length(evalList);
tidList = (ItemPointerData *)
@@ -78,9 +78,9 @@ TidListCreate(TidScanState *tidstate)
if (is_opclause(expr))
{
- FuncExprState *fexstate = (FuncExprState *) exstate;
- Node *arg1;
- Node *arg2;
+ FuncExprState *fexstate = (FuncExprState *) exstate;
+ Node *arg1;
+ Node *arg2;
arg1 = get_leftop(expr);
arg2 = get_rightop(expr);
@@ -154,15 +154,14 @@ TidListCreate(TidScanState *tidstate)
/*
* Sort the array of TIDs into order, and eliminate duplicates.
- * Eliminating duplicates is necessary since we want OR semantics
- * across the list. Sorting makes it easier to detect duplicates,
- * and as a bonus ensures that we will visit the heap in the most
- * efficient way.
+ * Eliminating duplicates is necessary since we want OR semantics across
+ * the list. Sorting makes it easier to detect duplicates, and as a bonus
+ * ensures that we will visit the heap in the most efficient way.
*/
if (numTids > 1)
{
- int lastTid;
- int i;
+ int lastTid;
+ int i;
qsort((void *) tidList, numTids, sizeof(ItemPointerData),
itemptr_comparator);
@@ -188,8 +187,8 @@ itemptr_comparator(const void *a, const void *b)
{
const ItemPointerData *ipa = (const ItemPointerData *) a;
const ItemPointerData *ipb = (const ItemPointerData *) b;
- BlockNumber ba = ItemPointerGetBlockNumber(ipa);
- BlockNumber bb = ItemPointerGetBlockNumber(ipb);
+ BlockNumber ba = ItemPointerGetBlockNumber(ipa);
+ BlockNumber bb = ItemPointerGetBlockNumber(ipb);
OffsetNumber oa = ItemPointerGetOffsetNumber(ipa);
OffsetNumber ob = ItemPointerGetOffsetNumber(ipb);
diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c
index 1d4bb08d4d..c6a1a94038 100644
--- a/src/backend/executor/nodeValuesscan.c
+++ b/src/backend/executor/nodeValuesscan.c
@@ -2,14 +2,14 @@
*
* nodeValuesscan.c
* Support routines for scanning Values lists
- * ("VALUES (...), (...), ..." in rangetable).
+ * ("VALUES (...), (...), ..." in rangetable).
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.2 2006/08/02 18:58:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.3 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,10 +47,10 @@ static TupleTableSlot *
ValuesNext(ValuesScanState *node)
{
TupleTableSlot *slot;
- EState *estate;
- ExprContext *econtext;
- ScanDirection direction;
- List *exprlist;
+ EState *estate;
+ ExprContext *econtext;
+ ScanDirection direction;
+ List *exprlist;
/*
* get information from the estate and scan state
@@ -83,9 +83,9 @@ ValuesNext(ValuesScanState *node)
}
/*
- * Always clear the result slot; this is appropriate if we are at the
- * end of the data, and if we're not, we still need it as the first step
- * of the store-virtual-tuple protocol. It seems wise to clear the slot
+ * Always clear the result slot; this is appropriate if we are at the end
+ * of the data, and if we're not, we still need it as the first step of
+ * the store-virtual-tuple protocol. It seems wise to clear the slot
* before we reset the context it might have pointers into.
*/
ExecClearTuple(slot);
@@ -107,18 +107,18 @@ ValuesNext(ValuesScanState *node)
ReScanExprContext(econtext);
/*
- * Build the expression eval state in the econtext's per-tuple
- * memory. This is a tad unusual, but we want to delete the eval
- * state again when we move to the next row, to avoid growth of
- * memory requirements over a long values list.
+ * Build the expression eval state in the econtext's per-tuple memory.
+ * This is a tad unusual, but we want to delete the eval state again
+ * when we move to the next row, to avoid growth of memory
+ * requirements over a long values list.
*/
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Pass NULL, not my plan node, because we don't want anything
- * in this transient state linking into permanent state. The
- * only possibility is a SubPlan, and there shouldn't be any
- * (any subselects in the VALUES list should be InitPlans).
+ * Pass NULL, not my plan node, because we don't want anything in this
+ * transient state linking into permanent state. The only possibility
+ * is a SubPlan, and there shouldn't be any (any subselects in the
+ * VALUES list should be InitPlans).
*/
exprstatelist = (List *) ExecInitExpr((Expr *) exprlist, NULL);
@@ -126,8 +126,8 @@ ValuesNext(ValuesScanState *node)
Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts);
/*
- * Compute the expressions and build a virtual result tuple.
- * We already did ExecClearTuple(slot).
+ * Compute the expressions and build a virtual result tuple. We
+ * already did ExecClearTuple(slot).
*/
values = slot->tts_values;
isnull = slot->tts_isnull;
@@ -135,7 +135,7 @@ ValuesNext(ValuesScanState *node)
resind = 0;
foreach(lc, exprstatelist)
{
- ExprState *estate = (ExprState *) lfirst(lc);
+ ExprState *estate = (ExprState *) lfirst(lc);
values[resind] = ExecEvalExpr(estate,
econtext,
@@ -181,12 +181,12 @@ ExecValuesScan(ValuesScanState *node)
ValuesScanState *
ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
{
- ValuesScanState *scanstate;
- RangeTblEntry *rte;
- TupleDesc tupdesc;
- ListCell *vtl;
- int i;
- PlanState *planstate;
+ ValuesScanState *scanstate;
+ RangeTblEntry *rte;
+ TupleDesc tupdesc;
+ ListCell *vtl;
+ int i;
+ PlanState *planstate;
/*
* ValuesScan should not have any children.
@@ -208,8 +208,8 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
/*
* Create expression contexts. We need two, one for per-sublist
- * processing and one for execScan.c to use for quals and projections.
- * We cheat a little by using ExecAssignExprContext() to build both.
+ * processing and one for execScan.c to use for quals and projections. We
+ * cheat a little by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, planstate);
scanstate->rowcontext = planstate->ps_ExprContext;
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index eb8d11fed9..5410c364e0 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.163 2006/09/07 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.164 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -827,8 +827,8 @@ SPI_cursor_open(const char *name, void *plan,
int k;
/*
- * Check that the plan is something the Portal code will special-case
- * as returning one tupleset.
+ * Check that the plan is something the Portal code will special-case as
+ * returning one tupleset.
*/
if (!SPI_is_cursor_plan(spiplan))
{
@@ -846,7 +846,7 @@ SPI_cursor_open(const char *name, void *plan,
errmsg("cannot open empty query as cursor")));
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is name of a SQL command, eg INSERT */
+ /* translator: %s is name of a SQL command, eg INSERT */
errmsg("cannot open %s query as cursor",
CreateQueryTag(queryTree))));
}
@@ -885,7 +885,7 @@ SPI_cursor_open(const char *name, void *plan,
{
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (spiplan->nargs - 1) * sizeof(ParamExternData));
+ (spiplan->nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = spiplan->nargs;
for (k = 0; k < spiplan->nargs; k++)
@@ -1348,7 +1348,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
for (k = 0; k < nargs; k++)
@@ -1482,9 +1482,9 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
ActiveSnapshot = NULL;
/*
- * The last canSetTag query sets the status values returned
- * to the caller. Be careful to free any tuptables not
- * returned, to avoid intratransaction memory leak.
+ * The last canSetTag query sets the status values returned to
+ * the caller. Be careful to free any tuptables not returned,
+ * to avoid intratransaction memory leak.
*/
if (queryTree->canSetTag)
{
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index defe45334c..6e3ef149f3 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.143 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.144 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,16 +75,17 @@ static Port *pam_port_cludge; /* Workaround for passing "Port *port" into
#include <winldap.h>
/* Correct header from the Platform SDK */
-typedef ULONG (*__ldap_start_tls_sA)(
- IN PLDAP ExternalHandle,
- OUT PULONG ServerReturnValue,
- OUT LDAPMessage **result,
- IN PLDAPControlA *ServerControls,
- IN PLDAPControlA *ClientControls
+typedef
+ULONG(*__ldap_start_tls_sA) (
+ IN PLDAP ExternalHandle,
+ OUT PULONG ServerReturnValue,
+ OUT LDAPMessage ** result,
+ IN PLDAPControlA * ServerControls,
+ IN PLDAPControlA * ClientControls
);
#endif
-static int CheckLDAPAuth(Port *port);
+static int CheckLDAPAuth(Port *port);
#endif
@@ -347,9 +348,9 @@ auth_failed(Port *port, int status)
break;
#endif /* USE_PAM */
#ifdef USE_LDAP
- case uaLDAP:
- errstr = gettext_noop("LDAP authentication failed for user \"%s\"");
- break;
+ case uaLDAP:
+ errstr = gettext_noop("LDAP authentication failed for user \"%s\"");
+ break;
#endif /* USE_LDAP */
default:
errstr = gettext_noop("authentication failed for user \"%s\": invalid authentication method");
@@ -480,9 +481,9 @@ ClientAuthentication(Port *port)
#endif /* USE_PAM */
#ifdef USE_LDAP
- case uaLDAP:
- status = CheckLDAPAuth(port);
- break;
+ case uaLDAP:
+ status = CheckLDAPAuth(port);
+ break;
#endif
case uaTrust:
@@ -709,100 +710,102 @@ CheckPAMAuth(Port *port, char *user, char *password)
static int
CheckLDAPAuth(Port *port)
{
- char *passwd;
- char server[128];
- char basedn[128];
- char prefix[128];
- char suffix[128];
- LDAP *ldap;
- int ssl = 0;
- int r;
- int ldapversion = LDAP_VERSION3;
- int ldapport = LDAP_PORT;
- char fulluser[128];
-
- if (!port->auth_arg || port->auth_arg[0] == '\0')
- {
- ereport(LOG,
- (errmsg("LDAP configuration URL not specified")));
- return STATUS_ERROR;
- }
-
- /*
- * Crack the LDAP url. We do a very trivial parse..
- * ldap[s]://<server>[:<port>]/<basedn>[;prefix[;suffix]]
- */
-
- server[0] = '\0';
- basedn[0] = '\0';
- prefix[0] = '\0';
- suffix[0] = '\0';
-
- /* ldap, including port number */
- r = sscanf(port->auth_arg,
- "ldap://%127[^:]:%i/%127[^;];%127[^;];%127s",
- server, &ldapport, basedn, prefix, suffix);
- if (r < 3)
- {
- /* ldaps, including port number */
- r = sscanf(port->auth_arg,
- "ldaps://%127[^:]:%i/%127[^;];%127[^;];%127s",
- server, &ldapport, basedn, prefix, suffix);
- if (r >=3) ssl = 1;
- }
- if (r < 3)
- {
- /* ldap, no port number */
- r = sscanf(port->auth_arg,
- "ldap://%127[^/]/%127[^;];%127[^;];%127s",
- server, basedn, prefix, suffix);
- }
- if (r < 2)
- {
- /* ldaps, no port number */
- r = sscanf(port->auth_arg,
- "ldaps://%127[^/]/%127[^;];%127[^;];%127s",
- server, basedn, prefix, suffix);
- if (r >= 2) ssl = 1;
- }
- if (r < 2)
- {
- ereport(LOG,
- (errmsg("invalid LDAP URL: \"%s\"",
+ char *passwd;
+ char server[128];
+ char basedn[128];
+ char prefix[128];
+ char suffix[128];
+ LDAP *ldap;
+ int ssl = 0;
+ int r;
+ int ldapversion = LDAP_VERSION3;
+ int ldapport = LDAP_PORT;
+ char fulluser[128];
+
+ if (!port->auth_arg || port->auth_arg[0] == '\0')
+ {
+ ereport(LOG,
+ (errmsg("LDAP configuration URL not specified")));
+ return STATUS_ERROR;
+ }
+
+ /*
+ * Crack the LDAP url. We do a very trivial parse..
+ * ldap[s]://<server>[:<port>]/<basedn>[;prefix[;suffix]]
+ */
+
+ server[0] = '\0';
+ basedn[0] = '\0';
+ prefix[0] = '\0';
+ suffix[0] = '\0';
+
+ /* ldap, including port number */
+ r = sscanf(port->auth_arg,
+ "ldap://%127[^:]:%i/%127[^;];%127[^;];%127s",
+ server, &ldapport, basedn, prefix, suffix);
+ if (r < 3)
+ {
+ /* ldaps, including port number */
+ r = sscanf(port->auth_arg,
+ "ldaps://%127[^:]:%i/%127[^;];%127[^;];%127s",
+ server, &ldapport, basedn, prefix, suffix);
+ if (r >= 3)
+ ssl = 1;
+ }
+ if (r < 3)
+ {
+ /* ldap, no port number */
+ r = sscanf(port->auth_arg,
+ "ldap://%127[^/]/%127[^;];%127[^;];%127s",
+ server, basedn, prefix, suffix);
+ }
+ if (r < 2)
+ {
+ /* ldaps, no port number */
+ r = sscanf(port->auth_arg,
+ "ldaps://%127[^/]/%127[^;];%127[^;];%127s",
+ server, basedn, prefix, suffix);
+ if (r >= 2)
+ ssl = 1;
+ }
+ if (r < 2)
+ {
+ ereport(LOG,
+ (errmsg("invalid LDAP URL: \"%s\"",
port->auth_arg)));
- return STATUS_ERROR;
- }
-
- sendAuthRequest(port, AUTH_REQ_PASSWORD);
-
- passwd = recv_password_packet(port);
- if (passwd == NULL)
- return STATUS_EOF; /* client wouldn't send password */
-
- ldap = ldap_init(server, ldapport);
- if (!ldap)
- {
+ return STATUS_ERROR;
+ }
+
+ sendAuthRequest(port, AUTH_REQ_PASSWORD);
+
+ passwd = recv_password_packet(port);
+ if (passwd == NULL)
+ return STATUS_EOF; /* client wouldn't send password */
+
+ ldap = ldap_init(server, ldapport);
+ if (!ldap)
+ {
#ifndef WIN32
- ereport(LOG,
- (errmsg("could not initialize LDAP: error code %d",
- errno)));
+ ereport(LOG,
+ (errmsg("could not initialize LDAP: error code %d",
+ errno)));
#else
- ereport(LOG,
- (errmsg("could not initialize LDAP: error code %d",
- (int) LdapGetLastError())));
+ ereport(LOG,
+ (errmsg("could not initialize LDAP: error code %d",
+ (int) LdapGetLastError())));
#endif
- return STATUS_ERROR;
- }
-
- if ((r = ldap_set_option(ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS)
- {
- ereport(LOG,
- (errmsg("could not set LDAP protocol version: error code %d", r)));
- return STATUS_ERROR;
- }
-
- if (ssl)
- {
+ return STATUS_ERROR;
+ }
+
+ if ((r = ldap_set_option(ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS)
+ {
+ ereport(LOG,
+ (errmsg("could not set LDAP protocol version: error code %d", r)));
+ return STATUS_ERROR;
+ }
+
+ if (ssl)
+ {
#ifndef WIN32
if ((r = ldap_start_tls_s(ldap, NULL, NULL)) != LDAP_SUCCESS)
#else
@@ -815,17 +818,20 @@ CheckLDAPAuth(Port *port)
* exist on Windows 2000, and causes a load error for the whole
* exe if referenced.
*/
- HANDLE ldaphandle;
-
+ HANDLE ldaphandle;
+
ldaphandle = LoadLibrary("WLDAP32.DLL");
if (ldaphandle == NULL)
{
- /* should never happen since we import other files from wldap32, but check anyway */
+ /*
+ * should never happen since we import other files from
+ * wldap32, but check anyway
+ */
ereport(LOG,
(errmsg("could not load wldap32.dll")));
return STATUS_ERROR;
}
- _ldap_start_tls_sA = (__ldap_start_tls_sA)GetProcAddress(ldaphandle, "ldap_start_tls_sA");
+ _ldap_start_tls_sA = (__ldap_start_tls_sA) GetProcAddress(ldaphandle, "ldap_start_tls_sA");
if (_ldap_start_tls_sA == NULL)
{
ereport(LOG,
@@ -839,33 +845,32 @@ CheckLDAPAuth(Port *port)
* process and is automatically cleaned up on process exit.
*/
}
- if ((r = _ldap_start_tls_sA(ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
+ if ((r = _ldap_start_tls_sA(ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
#endif
- {
- ereport(LOG,
- (errmsg("could not start LDAP TLS session: error code %d", r)));
- return STATUS_ERROR;
- }
- }
-
- snprintf(fulluser, sizeof(fulluser)-1, "%s%s%s",
+ {
+ ereport(LOG,
+ (errmsg("could not start LDAP TLS session: error code %d", r)));
+ return STATUS_ERROR;
+ }
+ }
+
+ snprintf(fulluser, sizeof(fulluser) - 1, "%s%s%s",
prefix, port->user_name, suffix);
- fulluser[sizeof(fulluser)-1] = '\0';
+ fulluser[sizeof(fulluser) - 1] = '\0';
- r = ldap_simple_bind_s(ldap, fulluser, passwd);
- ldap_unbind(ldap);
+ r = ldap_simple_bind_s(ldap, fulluser, passwd);
+ ldap_unbind(ldap);
- if (r != LDAP_SUCCESS)
- {
- ereport(LOG,
- (errmsg("LDAP login failed for user \"%s\" on server \"%s\": error code %d",
+ if (r != LDAP_SUCCESS)
+ {
+ ereport(LOG,
+ (errmsg("LDAP login failed for user \"%s\" on server \"%s\": error code %d",
fulluser, server, r)));
- return STATUS_ERROR;
- }
-
- return STATUS_OK;
-}
+ return STATUS_ERROR;
+ }
+ return STATUS_OK;
+}
#endif /* USE_LDAP */
/*
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 20c2cc7a3b..06a5b9a6c0 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.72 2006/09/04 14:57:27 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.73 2006/10/04 00:29:53 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -110,7 +110,7 @@ static DH *load_dh_buffer(const char *, size_t);
static DH *tmp_dh_cb(SSL *s, int is_export, int keylength);
static int verify_cb(int, X509_STORE_CTX *);
static void info_cb(const SSL *ssl, int type, int args);
-static void initialize_SSL(void);
+static void initialize_SSL(void);
static void destroy_SSL(void);
static int open_server_SSL(Port *);
static void close_SSL(Port *);
@@ -795,8 +795,9 @@ initialize_SSL(void)
else
{
/*
- * Check the Certificate Revocation List (CRL) if file exists.
- * http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci803160,00.html
+ * Check the Certificate Revocation List (CRL) if file exists.
+ * http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci803160,
+ * 00.html
*/
X509_STORE *cvstore = SSL_CTX_get_cert_store(SSL_context);
@@ -807,19 +808,19 @@ initialize_SSL(void)
/* OpenSSL 0.96 does not support X509_V_FLAG_CRL_CHECK */
#ifdef X509_V_FLAG_CRL_CHECK
X509_STORE_set_flags(cvstore,
- X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
+ X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
#else
ereport(LOG,
- (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" ignored",
- ROOT_CRL_FILE),
- errdetail("Installed SSL library does not support CRL.")));
+ (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" ignored",
+ ROOT_CRL_FILE),
+ errdetail("Installed SSL library does not support CRL.")));
#endif
else
{
/* Not fatal - we do not require CRL */
ereport(LOG,
- (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" not found, skipping: %s",
- ROOT_CRL_FILE, SSLerrmessage()),
+ (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" not found, skipping: %s",
+ ROOT_CRL_FILE, SSLerrmessage()),
errdetail("Will not check certificates against CRL.")));
}
}
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index e2f1f6c869..776d167ff2 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.155 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.156 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -613,8 +613,8 @@ parse_hba_auth(ListCell **line_item, UserAuth *userauth_p,
*userauth_p = uaPAM;
#endif
#ifdef USE_LDAP
- else if (strcmp(token,"ldap") == 0)
- *userauth_p = uaLDAP;
+ else if (strcmp(token, "ldap") == 0)
+ *userauth_p = uaLDAP;
#endif
else
{
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 3a4c74b0a8..9bf244410e 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.187 2006/08/11 20:44:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.188 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -594,13 +594,14 @@ StreamConnection(int server_fd, Port *port)
}
#ifdef WIN32
+
/*
- * This is a Win32 socket optimization. The ideal size is 32k.
- * http://support.microsoft.com/kb/823764/EN-US/
+ * This is a Win32 socket optimization. The ideal size is 32k.
+ * http://support.microsoft.com/kb/823764/EN-US/
*/
on = PQ_BUFFER_SIZE * 4;
if (setsockopt(port->sock, SOL_SOCKET, SO_SNDBUF, (char *) &on,
- sizeof(on)) < 0)
+ sizeof(on)) < 0)
{
elog(LOG, "setsockopt(SO_SNDBUF) failed: %m");
return STATUS_ERROR;
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 150d28b44f..a750cfe9fd 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.104 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.105 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -90,6 +90,7 @@ main(int argc, char *argv[])
set_pglocale_pgservice(argv[0], "postgres");
#ifdef WIN32
+
/*
* Windows uses codepages rather than the environment, so we work around
* that by querying the environment explicitly first for LC_COLLATE and
@@ -156,8 +157,7 @@ main(int argc, char *argv[])
check_root(progname);
/*
- * Dispatch to one of various subprograms depending on first
- * argument.
+ * Dispatch to one of various subprograms depending on first argument.
*/
#ifdef EXEC_BACKEND
@@ -166,6 +166,7 @@ main(int argc, char *argv[])
#endif
#ifdef WIN32
+
/*
* Start our win32 signal implementation
*
@@ -190,7 +191,7 @@ main(int argc, char *argv[])
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in launch of either a
* postmaster, a standalone backend, or a standalone bootstrap run.
* Note that this code will NOT be executed when a backend or
@@ -211,12 +212,11 @@ startup_hacks(const char *progname)
/*
- * On some platforms, unaligned memory accesses result in a kernel
- * trap; the default kernel behavior is to emulate the memory
- * access, but this results in a significant performance penalty.
- * We ought to fix PG not to make such unaligned memory accesses,
- * so this code disables the kernel emulation: unaligned accesses
- * will result in SIGBUS instead.
+ * On some platforms, unaligned memory accesses result in a kernel trap;
+ * the default kernel behavior is to emulate the memory access, but this
+ * results in a significant performance penalty. We ought to fix PG not to
+ * make such unaligned memory accesses, so this code disables the kernel
+ * emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
@@ -230,8 +230,7 @@ startup_hacks(const char *progname)
write_stderr("%s: setsysinfo failed: %s\n",
progname, strerror(errno));
#endif
-
-#endif /* NOFIXADE */
+#endif /* NOFIXADE */
#ifdef WIN32
@@ -253,9 +252,9 @@ startup_hacks(const char *progname)
}
/* In case of general protection fault, don't show GUI popup box */
- SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
@@ -315,7 +314,7 @@ help(const char *progname)
printf(_(" -x NUM internal use\n"));
printf(_("\nPlease read the documentation for the complete list of run-time\n"
- "configuration settings and how to set them on the command line or in\n"
+ "configuration settings and how to set them on the command line or in\n"
"the configuration file.\n\n"
"Report bugs to <pgsql-bugs@postgresql.org>.\n"));
}
@@ -330,19 +329,18 @@ check_root(const char *progname)
{
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
- "possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "possible system security compromise. See the documentation for\n"
+ "more information on how to properly start the server.\n");
exit(1);
}
/*
- * Also make sure that real and effective uids are the same.
- * Executing as a setuid program from a root shell is a security
- * hole, since on many platforms a nefarious subroutine could
- * setuid back to root if real uid is root. (Since nobody
- * actually uses postgres as a setuid program, trying to
- * actively fix this situation seems more trouble than it's worth;
- * we'll just expend the effort to check for it.)
+ * Also make sure that real and effective uids are the same. Executing as
+ * a setuid program from a root shell is a security hole, since on many
+ * platforms a nefarious subroutine could setuid back to root if real uid
+ * is root. (Since nobody actually uses postgres as a setuid program,
+ * trying to actively fix this situation seems more trouble than it's
+ * worth; we'll just expend the effort to check for it.)
*/
if (getuid() != geteuid())
{
@@ -350,17 +348,17 @@ check_root(const char *progname)
progname);
exit(1);
}
-#else /* WIN32 */
+#else /* WIN32 */
if (pgwin32_is_admin())
{
write_stderr("Execution of PostgreSQL by a user with administrative permissions is not\n"
"permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
- "possible system security compromises. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "possible system security compromises. See the documentation for\n"
+ "more information on how to properly start the server.\n");
exit(1);
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index fb1037f170..f58a2ad3ec 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.350 2006/08/30 23:34:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.351 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1050,7 +1050,7 @@ _copyRowExpr(RowExpr *from)
static RowCompareExpr *
_copyRowCompareExpr(RowCompareExpr *from)
{
- RowCompareExpr *newnode = makeNode(RowCompareExpr);
+ RowCompareExpr *newnode = makeNode(RowCompareExpr);
COPY_SCALAR_FIELD(rctype);
COPY_NODE_FIELD(opnos);
@@ -2682,7 +2682,7 @@ _copyDeallocateStmt(DeallocateStmt *from)
}
static DropOwnedStmt *
-_copyDropOwnedStmt(DropOwnedStmt * from)
+_copyDropOwnedStmt(DropOwnedStmt *from)
{
DropOwnedStmt *newnode = makeNode(DropOwnedStmt);
@@ -2693,7 +2693,7 @@ _copyDropOwnedStmt(DropOwnedStmt * from)
}
static ReassignOwnedStmt *
-_copyReassignOwnedStmt(ReassignOwnedStmt * from)
+_copyReassignOwnedStmt(ReassignOwnedStmt *from)
{
ReassignOwnedStmt *newnode = makeNode(ReassignOwnedStmt);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 1912cdd319..45dc76af9b 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -18,7 +18,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.284 2006/08/30 23:34:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.285 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1496,7 +1496,7 @@ _equalDeallocateStmt(DeallocateStmt *a, DeallocateStmt *b)
}
static bool
-_equalDropOwnedStmt(DropOwnedStmt * a, DropOwnedStmt * b)
+_equalDropOwnedStmt(DropOwnedStmt *a, DropOwnedStmt *b)
{
COMPARE_NODE_FIELD(roles);
COMPARE_SCALAR_FIELD(behavior);
@@ -1505,7 +1505,7 @@ _equalDropOwnedStmt(DropOwnedStmt * a, DropOwnedStmt * b)
}
static bool
-_equalReassignOwnedStmt(ReassignOwnedStmt * a, ReassignOwnedStmt * b)
+_equalReassignOwnedStmt(ReassignOwnedStmt *a, ReassignOwnedStmt *b)
{
COMPARE_NODE_FIELD(roles);
COMPARE_NODE_FIELD(newrole);
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index 7555cbd0dd..9f6aa22707 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.51 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.52 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -330,7 +330,7 @@ makeFuncExpr(Oid funcid, Oid rettype, List *args, CoercionForm fformat)
DefElem *
makeDefElem(char *name, Node *arg)
{
- DefElem *res = makeNode(DefElem);
+ DefElem *res = makeNode(DefElem);
res->defname = name;
res->arg = arg;
diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c
index 1d4e1d48e8..7274f59788 100644
--- a/src/backend/nodes/params.c
+++ b/src/backend/nodes/params.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.6 2006/04/22 01:25:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.7 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,14 +37,14 @@ copyParamList(ParamListInfo from)
/* sizeof(ParamListInfoData) includes the first array element */
size = sizeof(ParamListInfoData) +
- (from->numParams - 1) * sizeof(ParamExternData);
+ (from->numParams - 1) *sizeof(ParamExternData);
retval = (ParamListInfo) palloc(size);
memcpy(retval, from, size);
/*
- * Flat-copy is not good enough for pass-by-ref data values, so make
- * a pass over the array to copy those.
+ * Flat-copy is not good enough for pass-by-ref data values, so make a
+ * pass over the array to copy those.
*/
for (i = 0; i < retval->numParams; i++)
{
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 0c2b6920cd..12da0781a0 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.153 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.154 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,13 +43,13 @@ static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti);
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
static RelOptInfo *make_one_rel_by_joins(PlannerInfo *root, int levels_needed,
List *initial_rels);
@@ -253,7 +253,7 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
* set_append_rel_pathlist
* Build access paths for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
@@ -271,8 +271,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
* we do better? (This will take some redesign because the executor
- * currently supposes that every rowMark relation is involved in every
- * row returned by the query.)
+ * currently supposes that every rowMark relation is involved in every row
+ * returned by the query.)
*/
if (get_rowmark(root->parse, parentRTindex))
ereport(ERROR,
@@ -336,16 +336,16 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
childrel->max_attr);
/*
- * Compute the child's access paths, and add the cheapest one
- * to the Append path we are constructing for the parent.
+ * Compute the child's access paths, and add the cheapest one to the
+ * Append path we are constructing for the parent.
*
- * It's possible that the child is itself an appendrel, in which
- * case we can "cut out the middleman" and just add its child
- * paths to our own list. (We don't try to do this earlier because
- * we need to apply both levels of transformation to the quals.)
- * This test also handles the case where the child rel need not
- * be scanned because of constraint exclusion: it'll have an
- * Append path with no subpaths, and will vanish from our list.
+ * It's possible that the child is itself an appendrel, in which case
+ * we can "cut out the middleman" and just add its child paths to our
+ * own list. (We don't try to do this earlier because we need to
+ * apply both levels of transformation to the quals.) This test also
+ * handles the case where the child rel need not be scanned because of
+ * constraint exclusion: it'll have an Append path with no subpaths,
+ * and will vanish from our list.
*/
set_rel_pathlist(root, childrel, childRTindex);
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index 269a6ed7e7..f51bac9b0e 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.81 2006/07/14 14:52:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.82 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -429,16 +429,16 @@ clause_selectivity(PlannerInfo *root,
rinfo = (RestrictInfo *) clause;
/*
- * If the clause is marked pseudoconstant, then it will be used as
- * a gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may
- * be taken as having selectivity 0.0, since it will surely mean
- * no rows out of the plan. This case is simple enough that we
- * need not bother caching the result.
+ * If the clause is marked pseudoconstant, then it will be used as a
+ * gating qual and should not affect selectivity estimates; hence
+ * return 1.0. The only exception is that a constant FALSE may be
+ * taken as having selectivity 0.0, since it will surely mean no rows
+ * out of the plan. This case is simple enough that we need not
+ * bother caching the result.
*/
if (rinfo->pseudoconstant)
{
- if (! IsA(rinfo->clause, Const))
+ if (!IsA(rinfo->clause, Const))
return s1;
}
@@ -529,7 +529,7 @@ clause_selectivity(PlannerInfo *root,
else if (IsA(clause, Const))
{
/* bool constant is pretty easy... */
- Const *con = (Const *) clause;
+ Const *con = (Const *) clause;
s1 = con->constisnull ? 0.0 :
DatumGetBool(con->constvalue) ? 1.0 : 0.0;
@@ -542,7 +542,7 @@ clause_selectivity(PlannerInfo *root,
if (IsA(subst, Const))
{
/* bool constant is pretty easy... */
- Const *con = (Const *) subst;
+ Const *con = (Const *) subst;
s1 = con->constisnull ? 0.0 :
DatumGetBool(con->constvalue) ? 1.0 : 0.0;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index cd289423ec..f2a6d294ee 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -54,7 +54,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.166 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.167 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,7 +92,7 @@ double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
-int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
+int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
Cost disable_cost = 100000000.0;
@@ -276,13 +276,13 @@ cost_index(IndexPath *path, PlannerInfo *root,
if (outer_rel != NULL && outer_rel->rows > 1)
{
/*
- * For repeated indexscans, scale up the number of tuples fetched
- * in the Mackert and Lohman formula by the number of scans, so
- * that we estimate the number of pages fetched by all the scans.
- * Then pro-rate the costs for one scan. In this case we assume
- * all the fetches are random accesses. XXX it'd be good to
- * include correlation in this model, but it's not clear how to do
- * that without double-counting cache effects.
+ * For repeated indexscans, scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans. Then
+ * pro-rate the costs for one scan. In this case we assume all the
+ * fetches are random accesses. XXX it'd be good to include
+ * correlation in this model, but it's not clear how to do that
+ * without double-counting cache effects.
*/
double num_scans = outer_rel->rows;
@@ -385,7 +385,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
@@ -406,7 +406,8 @@ index_pages_fetched(double tuples_fetched, BlockNumber pages,
Assert(T <= total_pages);
/* b is pro-rated share of effective_cache_size */
- b = (double) effective_cache_size * T / total_pages;
+ b = (double) effective_cache_size *T / total_pages;
+
/* force it positive and integral */
if (b <= 1.0)
b = 1.0;
@@ -543,10 +544,10 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
if (outer_rel != NULL && outer_rel->rows > 1)
{
/*
- * For repeated bitmap scans, scale up the number of tuples fetched
- * in the Mackert and Lohman formula by the number of scans, so
- * that we estimate the number of pages fetched by all the scans.
- * Then pro-rate for one scan.
+ * For repeated bitmap scans, scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans. Then
+ * pro-rate for one scan.
*/
double num_scans = outer_rel->rows;
@@ -573,7 +574,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
- * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
+ * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
* too. For lack of a better idea, interpolate like this to determine the
* cost per page.
*/
@@ -748,7 +749,7 @@ cost_tidscan(Path *path, PlannerInfo *root,
{
/* Each element of the array yields 1 tuple */
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
ntuples += estimate_array_length(arraynode);
}
@@ -849,8 +850,8 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
Assert(baserel->rtekind == RTE_VALUES);
/*
- * For now, estimate list evaluation cost at one operator eval per
- * list (probably pretty bogus, but is it worth being smarter?)
+ * For now, estimate list evaluation cost at one operator eval per list
+ * (probably pretty bogus, but is it worth being smarter?)
*/
cpu_per_tuple = cpu_operator_cost;
@@ -875,7 +876,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
* If the total volume exceeds work_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice work_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
@@ -1745,7 +1746,7 @@ cost_qual_eval_walker(Node *node, QualCost *total)
* array elements before the answer is determined.
*/
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
total->per_tuple +=
cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
@@ -1967,7 +1968,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
*
* If we are doing an outer join, take that into account: the output must
* be at least as large as the non-nullable input. (Is there any chance
- * of being even smarter?) (XXX this is not really right, because it
+ * of being even smarter?) (XXX this is not really right, because it
* assumes all the restriction clauses are join clauses; we should figure
* pushed-down clauses separately.)
*
@@ -2132,10 +2133,10 @@ set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
Assert(rte->rtekind == RTE_VALUES);
/*
- * Estimate number of rows the values list will return.
- * We know this precisely based on the list length (well,
- * barring set-returning functions in list items, but that's
- * a refinement not catered for anywhere else either).
+ * Estimate number of rows the values list will return. We know this
+ * precisely based on the list length (well, barring set-returning
+ * functions in list items, but that's a refinement not catered for
+ * anywhere else either).
*/
rel->tuples = list_length(rte->values_lists);
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 7810012b2b..b15affa54d 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.211 2006/07/22 15:41:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.212 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,10 +54,10 @@ static List *find_saop_paths(PlannerInfo *root, RelOptInfo *rel,
List *clauses, List *outer_clauses,
bool istoplevel, RelOptInfo *outer_rel);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
- List *paths, RelOptInfo *outer_rel);
+ List *paths, RelOptInfo *outer_rel);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
- List *paths, RelOptInfo *outer_rel);
+ List *paths, RelOptInfo *outer_rel);
static List *pull_indexpath_quals(Path *bitmapqual);
static bool lists_intersect_ptr(List *list1, List *list2);
static bool match_clause_to_indexcol(IndexOptInfo *index,
@@ -66,7 +66,7 @@ static bool match_clause_to_indexcol(IndexOptInfo *index,
Relids outer_relids,
SaOpControl saop_control);
static bool is_indexable_operator(Oid expr_op, Oid opclass,
- bool indexkey_on_left);
+ bool indexkey_on_left);
static bool match_rowcompare_to_indexcol(IndexOptInfo *index,
int indexcol,
Oid opclass,
@@ -324,8 +324,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
/*
* 1. Match the index against the available restriction clauses.
* found_clause is set true only if at least one of the current
- * clauses was used (and, if saop_control is SAOP_REQUIRE, it
- * has to have been a ScalarArrayOpExpr clause).
+ * clauses was used (and, if saop_control is SAOP_REQUIRE, it has to
+ * have been a ScalarArrayOpExpr clause).
*/
restrictclauses = group_clauses_by_indexkey(index,
clauses,
@@ -422,8 +422,8 @@ find_saop_paths(PlannerInfo *root, RelOptInfo *rel,
ListCell *l;
/*
- * Since find_usable_indexes is relatively expensive, don't bother to
- * run it unless there are some top-level ScalarArrayOpExpr clauses.
+ * Since find_usable_indexes is relatively expensive, don't bother to run
+ * it unless there are some top-level ScalarArrayOpExpr clauses.
*/
foreach(l, clauses)
{
@@ -588,15 +588,15 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. As a compromise, we sort the paths by selectivity. We
+ * OR clauses. As a compromise, we sort the paths by selectivity. We
* always take the first, and sequentially add on paths that result in a
* lower estimated cost.
*
* We also make some effort to detect directly redundant input paths, as
- * can happen if there are multiple possibly usable indexes. (Another
- * way it can happen is that best_inner_indexscan will find the same OR
- * join clauses that create_or_index_quals has pulled OR restriction
- * clauses out of, and then both versions show up as duplicate paths.) We
+ * can happen if there are multiple possibly usable indexes. (Another way
+ * it can happen is that best_inner_indexscan will find the same OR join
+ * clauses that create_or_index_quals has pulled OR restriction clauses
+ * out of, and then both versions show up as duplicate paths.) We
* consider an index redundant if any of its index conditions were already
* used by earlier indexes. (We could use predicate_implied_by to have a
* more intelligent, but much more expensive, check --- but in most cases
@@ -796,7 +796,7 @@ lists_intersect_ptr(List *list1, List *list2)
foreach(cell1, list1)
{
- void *datum1 = lfirst(cell1);
+ void *datum1 = lfirst(cell1);
ListCell *cell2;
foreach(cell2, list2)
@@ -963,7 +963,7 @@ group_clauses_by_indexkey(IndexOptInfo *index,
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* expand_indexqual_rowcompare().
@@ -1004,10 +1004,10 @@ match_clause_to_indexcol(IndexOptInfo *index,
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
- * happen anyway, since a pseudoconstant clause couldn't contain a
- * Var, but what if someone builds an expression index on a constant?
- * It's not totally unreasonable to do so with a partial index, either.)
+ * Never match pseudoconstants to indexes. (Normally this could not
+ * happen anyway, since a pseudoconstant clause couldn't contain a Var,
+ * but what if someone builds an expression index on a constant? It's not
+ * totally unreasonable to do so with a partial index, either.)
*/
if (rinfo->pseudoconstant)
return false;
@@ -1421,9 +1421,9 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
* NOTE: because we cache on outer_relids rather than outer_rel->relids,
* we will report the same path and hence path cost for joins with
* different sets of irrelevant rels on the outside. Now that cost_index
- * is sensitive to outer_rel->rows, this is not really right. However
- * the error is probably not large. Is it worth establishing a separate
- * cache entry for each distinct outer_rel->relids set to get this right?
+ * is sensitive to outer_rel->rows, this is not really right. However the
+ * error is probably not large. Is it worth establishing a separate cache
+ * entry for each distinct outer_rel->relids set to get this right?
*/
foreach(l, rel->index_inner_paths)
{
@@ -1442,11 +1442,11 @@ best_inner_indexscan(PlannerInfo *root, RelOptInfo *rel,
*
* Note: because we include restriction clauses, we will find indexscans
* that could be plain indexscans, ie, they don't require the join context
- * at all. This may seem redundant, but we need to include those scans in
+ * at all. This may seem redundant, but we need to include those scans in
* the input given to choose_bitmap_and() to be sure we find optimal AND
- * combinations of join and non-join scans. Also, even if the "best
- * inner indexscan" is just a plain indexscan, it will have a different
- * cost estimate because of cache effects.
+ * combinations of join and non-join scans. Also, even if the "best inner
+ * indexscan" is just a plain indexscan, it will have a different cost
+ * estimate because of cache effects.
*/
clause_list = find_clauses_for_join(root, rel, outer_relids, isouterjoin);
@@ -2210,7 +2210,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
foreach(l, (List *) lfirst(clausegroup_item))
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Expr *clause = rinfo->clause;
+ Expr *clause = rinfo->clause;
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
@@ -2240,7 +2240,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
{
resultquals = list_concat(resultquals,
expand_indexqual_opclause(rinfo,
- curClass));
+ curClass));
}
else if (IsA(clause, ScalarArrayOpExpr))
{
@@ -2340,6 +2340,7 @@ static List *
expand_indexqual_opclause(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
+
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
@@ -2421,7 +2422,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opclass)
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*/
static RestrictInfo *
@@ -2430,18 +2431,18 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
int indexcol)
{
RowCompareExpr *clause = (RowCompareExpr *) rinfo->clause;
- bool var_on_left;
- int op_strategy;
- Oid op_subtype;
- bool op_recheck;
- int matching_cols;
- Oid expr_op;
- List *opclasses;
- List *subtypes;
- List *new_ops;
- ListCell *largs_cell;
- ListCell *rargs_cell;
- ListCell *opnos_cell;
+ bool var_on_left;
+ int op_strategy;
+ Oid op_subtype;
+ bool op_recheck;
+ int matching_cols;
+ Oid expr_op;
+ List *opclasses;
+ List *subtypes;
+ List *new_ops;
+ ListCell *largs_cell;
+ ListCell *rargs_cell;
+ ListCell *opnos_cell;
/* We have to figure out (again) how the first col matches */
var_on_left = match_index_to_operand((Node *) linitial(clause->largs),
@@ -2459,12 +2460,12 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
subtypes = list_make1_oid(op_subtype);
/*
- * See how many of the remaining columns match some index column
- * in the same way. A note about rel membership tests: we assume
- * that the clause as a whole is already known to use only Vars from
- * the indexed relation and possibly some acceptable outer relations.
- * So the "other" side of any potential index condition is OK as long
- * as it doesn't use Vars from the indexed relation.
+ * See how many of the remaining columns match some index column in the
+ * same way. A note about rel membership tests: we assume that the clause
+ * as a whole is already known to use only Vars from the indexed relation
+ * and possibly some acceptable outer relations. So the "other" side of
+ * any potential index condition is OK as long as it doesn't use Vars from
+ * the indexed relation.
*/
matching_cols = 1;
largs_cell = lnext(list_head(clause->largs));
@@ -2498,10 +2499,10 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
break; /* no good, volatile comparison value */
/*
- * The Var side can match any column of the index. If the user
- * does something weird like having multiple identical index
- * columns, we insist the match be on the first such column,
- * to avoid confusing the executor.
+ * The Var side can match any column of the index. If the user does
+ * something weird like having multiple identical index columns, we
+ * insist the match be on the first such column, to avoid confusing
+ * the executor.
*/
for (i = 0; i < index->ncolumns; i++)
{
@@ -2534,9 +2535,9 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
return rinfo;
/*
- * We have to generate a subset rowcompare (possibly just one OpExpr).
- * The painful part of this is changing < to <= or > to >=, so deal with
- * that first.
+ * We have to generate a subset rowcompare (possibly just one OpExpr). The
+ * painful part of this is changing < to <= or > to >=, so deal with that
+ * first.
*/
if (op_strategy == BTLessEqualStrategyNumber ||
op_strategy == BTGreaterEqualStrategyNumber)
@@ -2546,8 +2547,8 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
}
else
{
- ListCell *opclasses_cell;
- ListCell *subtypes_cell;
+ ListCell *opclasses_cell;
+ ListCell *subtypes_cell;
if (op_strategy == BTLessStrategyNumber)
op_strategy = BTLessEqualStrategyNumber;
@@ -2561,13 +2562,13 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
expr_op = get_opclass_member(lfirst_oid(opclasses_cell),
lfirst_oid(subtypes_cell),
op_strategy);
- if (!OidIsValid(expr_op)) /* should not happen */
+ if (!OidIsValid(expr_op)) /* should not happen */
elog(ERROR, "could not find member %d of opclass %u",
op_strategy, lfirst_oid(opclasses_cell));
if (!var_on_left)
{
expr_op = get_commutator(expr_op);
- if (!OidIsValid(expr_op)) /* should not happen */
+ if (!OidIsValid(expr_op)) /* should not happen */
elog(ERROR, "could not find commutator of member %d of opclass %u",
op_strategy, lfirst_oid(opclasses_cell));
}
@@ -2596,7 +2597,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo,
}
else
{
- Expr *opexpr;
+ Expr *opexpr;
opexpr = make_opclause(linitial_oid(new_ops), BOOLOID, false,
copyObject(linitial(clause->largs)),
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index f5e9b1e987..6882439ca3 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.106 2006/08/17 17:06:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.107 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@ static void hash_inner_and_outer(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, JoinType jointype);
static Path *best_appendrel_indexscan(PlannerInfo *root, RelOptInfo *rel,
- RelOptInfo *outer_rel, JoinType jointype);
+ RelOptInfo *outer_rel, JoinType jointype);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
RelOptInfo *outerrel,
RelOptInfo *innerrel,
@@ -544,9 +544,9 @@ match_unsorted_outer(PlannerInfo *root,
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost. We can ignore
* inner_cheapest_total on the first iteration, since we already made
- * a path with it --- but not on later iterations with shorter
- * sort keys, because then we are considering a different situation,
- * viz using a simpler mergejoin to avoid a sort of the inner rel.
+ * a path with it --- but not on later iterations with shorter sort
+ * keys, because then we are considering a different situation, viz
+ * using a simpler mergejoin to avoid a sort of the inner rel.
*/
num_sortkeys = list_length(innersortkeys);
if (num_sortkeys > 1 && !useallclauses)
@@ -792,7 +792,7 @@ hash_inner_and_outer(PlannerInfo *root,
* best_appendrel_indexscan
* Finds the best available set of inner indexscans for a nestloop join
* with the given append relation on the inside and the given outer_rel
- * outside. Returns an AppendPath comprising the best inner scans, or
+ * outside. Returns an AppendPath comprising the best inner scans, or
* NULL if there are no possible inner indexscans.
*/
static Path *
@@ -820,9 +820,9 @@ best_appendrel_indexscan(PlannerInfo *root, RelOptInfo *rel,
Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
/*
- * Check to see if child was rejected by constraint exclusion.
- * If so, it will have a cheapest_total_path that's an Append path
- * with no members (see set_plain_rel_pathlist).
+ * Check to see if child was rejected by constraint exclusion. If so,
+ * it will have a cheapest_total_path that's an Append path with no
+ * members (see set_plain_rel_pathlist).
*/
if (IsA(childrel->cheapest_total_path, AppendPath) &&
((AppendPath *) childrel->cheapest_total_path)->subpaths == NIL)
@@ -835,10 +835,10 @@ best_appendrel_indexscan(PlannerInfo *root, RelOptInfo *rel,
outer_rel, jointype);
/*
- * If no luck on an indexpath for this rel, we'll still consider
- * an Append substituting the cheapest-total inner path. However
- * we must find at least one indexpath, else there's not going to
- * be any improvement over the base path for the appendrel.
+ * If no luck on an indexpath for this rel, we'll still consider an
+ * Append substituting the cheapest-total inner path. However we must
+ * find at least one indexpath, else there's not going to be any
+ * improvement over the base path for the appendrel.
*/
if (bestinnerjoin)
found_indexscan = true;
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index ac12bbd5f7..bed9db5043 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.79 2006/03/05 15:58:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.80 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,11 +87,11 @@ make_rels_by_joins(PlannerInfo *root, int level, List **joinrels)
/*
* An exception occurs when there is a clauseless join inside a
- * construct that restricts join order, i.e., an outer join RHS
- * or an IN (sub-SELECT) construct. Here, the rel may well have
- * join clauses against stuff outside the OJ RHS or IN sub-SELECT,
- * but the clauseless join *must* be done before we can make use
- * of those join clauses. So do the clauseless join bit.
+ * construct that restricts join order, i.e., an outer join RHS or
+ * an IN (sub-SELECT) construct. Here, the rel may well have join
+ * clauses against stuff outside the OJ RHS or IN sub-SELECT, but
+ * the clauseless join *must* be done before we can make use of
+ * those join clauses. So do the clauseless join bit.
*
* See also the last-ditch case below.
*/
@@ -386,9 +386,9 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
joinrelids = bms_union(rel1->relids, rel2->relids);
/*
- * If we have any outer joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the OJ list
- * for conflicts.
+ * If we have any outer joins, the proposed join might be illegal; and in
+ * any case we have to determine its join type. Scan the OJ list for
+ * conflicts.
*/
jointype = JOIN_INNER; /* default if no match to an OJ */
is_valid_inner = true;
@@ -485,16 +485,16 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
/*
- * This IN clause is not relevant unless its RHS overlaps the
- * proposed join. (Check this first as a fast path for dismissing
- * most irrelevant INs quickly.)
+ * This IN clause is not relevant unless its RHS overlaps the proposed
+ * join. (Check this first as a fast path for dismissing most
+ * irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
/*
- * If we are still building the IN clause's RHS, then this IN
- * clause isn't relevant yet.
+ * If we are still building the IN clause's RHS, then this IN clause
+ * isn't relevant yet.
*/
if (bms_is_subset(joinrelids, ininfo->righthand))
continue;
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index 0e602a4476..56142535c1 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.80 2006/07/14 14:52:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.81 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -106,8 +106,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass outer_rel = NULL so that sub-clauses
- * that are actually joins will be ignored.
+ * indexquals. We pass outer_rel = NULL so that sub-clauses that
+ * are actually joins will be ignored.
*/
List *orpaths;
ListCell *k;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index fb5d38255d..b254598fcf 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.78 2006/08/17 17:02:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.79 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -909,7 +909,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths,
* representing a backwards scan of the index. Return NIL if can't do it.
*
* If 'canonical' is TRUE, we remove duplicate pathkeys (which can occur
- * if two index columns are equijoined, eg WHERE x = 1 AND y = 1). This
+ * if two index columns are equijoined, eg WHERE x = 1 AND y = 1). This
* is required if the result is to be compared directly to a canonical query
* pathkeys list. However, some callers want a list with exactly one entry
* per index column, and they must pass FALSE.
@@ -1106,8 +1106,8 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
outer_expr = (Node *)
makeRelabelType((Expr *) outer_expr,
((RelabelType *) sub_key)->resulttype,
- ((RelabelType *) sub_key)->resulttypmod,
- ((RelabelType *) sub_key)->relabelformat);
+ ((RelabelType *) sub_key)->resulttypmod,
+ ((RelabelType *) sub_key)->relabelformat);
}
else
continue;
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index a912994987..a7dd5b262d 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -24,7 +24,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.27 2006/03/05 15:58:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.28 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ IsTidEqualAnyClause(ScalarArrayOpExpr *node, int varno)
/* CTID must be first argument */
if (arg1 && IsA(arg1, Var))
{
- Var *var = (Var *) arg1;
+ Var *var = (Var *) arg1;
if (var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID &&
@@ -187,7 +187,7 @@ TidQualFromExpr(Node *expr, int varno)
{
foreach(l, ((BoolExpr *) expr)->args)
{
- List *frtn = TidQualFromExpr((Node *) lfirst(l), varno);
+ List *frtn = TidQualFromExpr((Node *) lfirst(l), varno);
if (frtn)
rlst = list_concat(rlst, frtn);
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index ae51505954..14f1f1a10f 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.216 2006/08/02 01:59:45 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.217 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path
static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path,
Plan *outer_plan, Plan *inner_plan);
static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path,
@@ -98,7 +98,7 @@ static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
Index scanrelid);
static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
- Index scanrelid);
+ Index scanrelid);
static BitmapAnd *make_bitmap_and(List *bitmapplans);
static BitmapOr *make_bitmap_or(List *bitmapplans);
static NestLoop *make_nestloop(List *tlist,
@@ -216,9 +216,9 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
tlist = build_relation_tlist(rel);
/*
- * Extract the relevant restriction clauses from the parent relation.
- * The executor must apply all these restrictions during the scan,
- * except for pseudoconstants which we'll take care of below.
+ * Extract the relevant restriction clauses from the parent relation. The
+ * executor must apply all these restrictions during the scan, except for
+ * pseudoconstants which we'll take care of below.
*/
scan_clauses = rel->baserestrictinfo;
@@ -282,9 +282,9 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
}
/*
- * If there are any pseudoconstant clauses attached to this node,
- * insert a gating Result node that evaluates the pseudoconstants
- * as one-time quals.
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
*/
if (root->hasPseudoConstantQuals)
plan = create_gating_plan(root, plan, scan_clauses);
@@ -327,8 +327,8 @@ use_physical_tlist(RelOptInfo *rel)
int i;
/*
- * We can do this for real relation scans, subquery scans, function
- * scans, and values scans (but not for, eg, joins).
+ * We can do this for real relation scans, subquery scans, function scans,
+ * and values scans (but not for, eg, joins).
*/
if (rel->rtekind != RTE_RELATION &&
rel->rtekind != RTE_SUBQUERY &&
@@ -466,9 +466,9 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
}
/*
- * If there are any pseudoconstant clauses attached to this node,
- * insert a gating Result node that evaluates the pseudoconstants
- * as one-time quals.
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
*/
if (root->hasPseudoConstantQuals)
plan = create_gating_plan(root, plan, best_path->joinrestrictinfo);
@@ -991,9 +991,9 @@ create_bitmap_scan_plan(PlannerInfo *root,
*
* Unlike create_indexscan_plan(), we need take no special thought here
* for partial index predicates; this is because the predicate conditions
- * are already listed in bitmapqualorig and indexquals. Bitmap scans
- * have to do it that way because predicate conditions need to be rechecked
- * if the scan becomes lossy.
+ * are already listed in bitmapqualorig and indexquals. Bitmap scans have
+ * to do it that way because predicate conditions need to be rechecked if
+ * the scan becomes lossy.
*/
qpqual = NIL;
foreach(l, scan_clauses)
@@ -1137,6 +1137,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
subindexquals = lappend(subindexquals,
make_ands_explicit(subindexqual));
}
+
/*
* In the presence of ScalarArrayOpExpr quals, we might have built
* BitmapOrPaths with just one subpath; don't add an OR step.
@@ -1152,7 +1153,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
plan->total_cost = opath->path.total_cost;
plan->plan_rows =
clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
- plan->plan_width = 0; /* meaningless */
+ plan->plan_width = 0; /* meaningless */
}
/*
@@ -1202,10 +1203,10 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
Expr *pred = (Expr *) lfirst(l);
/*
- * We know that the index predicate must have been implied by
- * the query condition as a whole, but it may or may not be
- * implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * We know that the index predicate must have been implied by the
+ * query condition as a whole, but it may or may not be implied by
+ * the conditions that got pushed into the bitmapqual. Avoid
+ * generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
{
@@ -1244,8 +1245,8 @@ create_tidscan_plan(PlannerInfo *root, TidPath *best_path,
scan_clauses = extract_actual_clauses(scan_clauses, false);
/*
- * Remove any clauses that are TID quals. This is a bit tricky since
- * the tidquals list has implicit OR semantics.
+ * Remove any clauses that are TID quals. This is a bit tricky since the
+ * tidquals list has implicit OR semantics.
*/
ortidquals = best_path->tidquals;
if (list_length(ortidquals) > 1)
@@ -1333,7 +1334,7 @@ create_functionscan_plan(PlannerInfo *root, Path *best_path,
*/
static ValuesScan *
create_valuesscan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses)
+ List *tlist, List *scan_clauses)
{
ValuesScan *scan_plan;
Index scan_relid = best_path->parent->relid;
@@ -1411,9 +1412,9 @@ create_nestloop_plan(PlannerInfo *root,
* join quals; failing to prove that doesn't result in an incorrect
* plan. It is the right way to proceed because adding more quals to
* the stuff we got from the original query would just make it harder
- * to detect duplication. (Also, to change this we'd have to be
- * wary of UPDATE/DELETE/SELECT FOR UPDATE target relations; see
- * notes above about EvalPlanQual.)
+ * to detect duplication. (Also, to change this we'd have to be wary
+ * of UPDATE/DELETE/SELECT FOR UPDATE target relations; see notes
+ * above about EvalPlanQual.)
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
@@ -1693,7 +1694,7 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
if (IsA(clause, OpExpr))
{
- OpExpr *op = (OpExpr *) clause;
+ OpExpr *op = (OpExpr *) clause;
if (list_length(op->args) != 2)
elog(ERROR, "indexqual clause is not binary opclause");
@@ -1718,7 +1719,7 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
else if (IsA(clause, RowCompareExpr))
{
RowCompareExpr *rc = (RowCompareExpr *) clause;
- ListCell *lc;
+ ListCell *lc;
/*
* Check to see if the indexkey is on the right; if so, commute
@@ -1734,13 +1735,13 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
* attribute this is and change the indexkey operand as needed.
*
* Save the index opclass for only the first column. We will
- * return the operator and opclass info for just the first
- * column of the row comparison; the executor will have to
- * look up the rest if it needs them.
+ * return the operator and opclass info for just the first column
+ * of the row comparison; the executor will have to look up the
+ * rest if it needs them.
*/
foreach(lc, rc->largs)
{
- Oid tmp_opclass;
+ Oid tmp_opclass;
lfirst(lc) = fix_indexqual_operand(lfirst(lc),
index,
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 8299f6756b..2a8e1f528e 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.122 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.123 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,10 +40,10 @@ int join_collapse_limit;
static void add_vars_to_targetlist(PlannerInfo *root, List *vars,
Relids where_needed);
static List *deconstruct_recurse(PlannerInfo *root, Node *jtnode,
- bool below_outer_join, Relids *qualscope);
+ bool below_outer_join, Relids *qualscope);
static OuterJoinInfo *make_outerjoininfo(PlannerInfo *root,
- Relids left_rels, Relids right_rels,
- bool is_full_join, Node *clause);
+ Relids left_rels, Relids right_rels,
+ bool is_full_join, Node *clause);
static void distribute_qual_to_rels(PlannerInfo *root, Node *clause,
bool is_pushed_down,
bool is_deduced,
@@ -71,12 +71,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
@@ -181,7 +181,7 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars, Relids where_needed)
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add OuterJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add OuterJoinInfo nodes
* to root->oj_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
@@ -198,9 +198,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars, Relids where_needed)
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
@@ -258,20 +258,19 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
ListCell *l;
/*
- * First, recurse to handle child joins. We collapse subproblems
- * into a single joinlist whenever the resulting joinlist wouldn't
- * exceed from_collapse_limit members. Also, always collapse
- * one-element subproblems, since that won't lengthen the joinlist
- * anyway.
+ * First, recurse to handle child joins. We collapse subproblems into
+ * a single joinlist whenever the resulting joinlist wouldn't exceed
+ * from_collapse_limit members. Also, always collapse one-element
+ * subproblems, since that won't lengthen the joinlist anyway.
*/
*qualscope = NULL;
joinlist = NIL;
remaining = list_length(f->fromlist);
foreach(l, f->fromlist)
{
- Relids sub_qualscope;
- List *sub_joinlist;
- int sub_members;
+ Relids sub_qualscope;
+ List *sub_joinlist;
+ int sub_members;
sub_joinlist = deconstruct_recurse(root, lfirst(l),
below_outer_join,
@@ -407,7 +406,8 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join,
(list_length(leftjoinlist) + list_length(rightjoinlist) <=
join_collapse_limit))
joinlist = list_concat(leftjoinlist, rightjoinlist);
- else /* force the join order at this node */
+ else
+ /* force the join order at this node */
joinlist = list_make1(list_make2(leftjoinlist, rightjoinlist));
}
else
@@ -454,9 +454,9 @@ make_outerjoininfo(PlannerInfo *root,
* any nullable rel is FOR UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
- * FOR UPDATE applied to a view. Only after rewriting and flattening
- * do we know whether the view contains an outer join.
+ * parser. It's because the parser hasn't got enough info --- consider
+ * FOR UPDATE applied to a view. Only after rewriting and flattening do
+ * we know whether the view contains an outer join.
*/
foreach(l, root->parse->rowMarks)
{
@@ -475,7 +475,7 @@ make_outerjoininfo(PlannerInfo *root,
{
ojinfo->min_lefthand = left_rels;
ojinfo->min_righthand = right_rels;
- ojinfo->lhs_strict = false; /* don't care about this */
+ ojinfo->lhs_strict = false; /* don't care about this */
return ojinfo;
}
@@ -494,19 +494,19 @@ make_outerjoininfo(PlannerInfo *root,
ojinfo->lhs_strict = bms_overlap(strict_relids, left_rels);
/*
- * Required LHS is basically the LHS rels mentioned in the clause...
- * but if there aren't any, punt and make it the full LHS, to avoid
- * having an empty min_lefthand which will confuse later processing.
- * (We don't try to be smart about such cases, just correct.)
- * We may have to add more rels based on lower outer joins; see below.
+ * Required LHS is basically the LHS rels mentioned in the clause... but
+ * if there aren't any, punt and make it the full LHS, to avoid having an
+ * empty min_lefthand which will confuse later processing. (We don't try
+ * to be smart about such cases, just correct.) We may have to add more
+ * rels based on lower outer joins; see below.
*/
ojinfo->min_lefthand = bms_intersect(clause_relids, left_rels);
if (bms_is_empty(ojinfo->min_lefthand))
ojinfo->min_lefthand = bms_copy(left_rels);
/*
- * Required RHS is normally the full set of RHS rels. Sometimes we
- * can exclude some, see below.
+ * Required RHS is normally the full set of RHS rels. Sometimes we can
+ * exclude some, see below.
*/
ojinfo->min_righthand = bms_copy(right_rels);
@@ -532,6 +532,7 @@ make_outerjoininfo(PlannerInfo *root,
ojinfo->min_lefthand = bms_add_members(ojinfo->min_lefthand,
otherinfo->min_righthand);
}
+
/*
* For a lower OJ in our RHS, if our join condition does not use the
* lower join's RHS and the lower OJ's join condition is strict, we
@@ -630,23 +631,23 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* If the clause is an outer-join clause, we must force it to the OJ's
* semantic level to preserve semantics.
*
- * Otherwise, when the clause contains volatile functions, we force it
- * to be evaluated at its original syntactic level. This preserves the
+ * Otherwise, when the clause contains volatile functions, we force it to
+ * be evaluated at its original syntactic level. This preserves the
* expected semantics.
*
- * When the clause contains no volatile functions either, it is actually
- * a pseudoconstant clause that will not change value during any one
- * execution of the plan, and hence can be used as a one-time qual in
- * a gating Result plan node. We put such a clause into the regular
+ * When the clause contains no volatile functions either, it is actually a
+ * pseudoconstant clause that will not change value during any one
+ * execution of the plan, and hence can be used as a one-time qual in a
+ * gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually
- * best to put a gating node as high in the plan tree as possible.
- * If we are not below an outer join, we can actually push the
- * pseudoconstant qual all the way to the top of the tree. If we are
- * below an outer join, we leave the qual at its original syntactic level
- * (we could push it up to just below the outer join, but that seems more
- * complex than it's worth).
+ * plan node the pseudoconstant clause is assigned to. It's usually best
+ * to put a gating node as high in the plan tree as possible. If we are
+ * not below an outer join, we can actually push the pseudoconstant qual
+ * all the way to the top of the tree. If we are below an outer join, we
+ * leave the qual at its original syntactic level (we could push it up to
+ * just below the outer join, but that seems more complex than it's
+ * worth).
*/
if (bms_is_empty(relids))
{
@@ -793,8 +794,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* Mark the qual as "pushed down" if it can be applied at a level below
* its original syntactic level. This allows us to distinguish original
* JOIN/ON quals from higher-level quals pushed down to the same joinrel.
- * A qual originating from WHERE is always considered "pushed down".
- * Note that for an outer-join qual, we have to compare to ojscope not
+ * A qual originating from WHERE is always considered "pushed down". Note
+ * that for an outer-join qual, we have to compare to ojscope not
* qualscope.
*/
if (!is_pushed_down)
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 2baf8e391d..e64340ed21 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.21 2006/08/12 02:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.22 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -217,7 +217,7 @@ find_minmax_aggs_walker(Node *node, List **context)
{
Aggref *aggref = (Aggref *) node;
Oid aggsortop;
- Expr *curTarget;
+ Expr *curTarget;
MinMaxAggInfo *info;
ListCell *l;
@@ -464,7 +464,7 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info)
subparse->limitOffset = NULL;
subparse->limitCount = (Node *) makeConst(INT8OID, sizeof(int64),
Int64GetDatum(1),
- false, false /* not by val */);
+ false, false /* not by val */ );
/*
* Generate the plan for the subquery. We already have a Path for the
@@ -478,9 +478,9 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info)
* in our cost estimates. But that looks painful, and in most cases the
* fraction of NULLs isn't high enough to change the decision.
*
- * The NOT NULL qual has to go on the actual indexscan; create_plan
- * might have stuck a gating Result atop that, if there were any
- * pseudoconstant quals.
+ * The NOT NULL qual has to go on the actual indexscan; create_plan might
+ * have stuck a gating Result atop that, if there were any pseudoconstant
+ * quals.
*/
plan = create_plan(&subroot, (Path *) info->path);
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index ae44e2bc35..e01379e8e3 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.96 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.97 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -126,8 +126,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
/*
* Construct RelOptInfo nodes for all base relations in query, and
* indirectly for all appendrel member relations ("other rels"). This
- * will give us a RelOptInfo for every "simple" (non-join) rel involved
- * in the query.
+ * will give us a RelOptInfo for every "simple" (non-join) rel involved in
+ * the query.
*
* Note: the reason we find the rels by searching the jointree and
* appendrel list, rather than just scanning the rangetable, is that the
@@ -137,11 +137,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
add_base_rels_to_query(root, (Node *) parse->jointree);
/*
- * We should now have size estimates for every actual table involved
- * in the query, so we can compute total_table_pages. Note that
- * appendrels are not double-counted here, even though we don't bother
- * to distinguish RelOptInfos for appendrel parents, because the parents
- * will still have size zero.
+ * We should now have size estimates for every actual table involved in
+ * the query, so we can compute total_table_pages. Note that appendrels
+ * are not double-counted here, even though we don't bother to distinguish
+ * RelOptInfos for appendrel parents, because the parents will still have
+ * size zero.
*
* XXX if a table is self-joined, we will count it once per appearance,
* which perhaps is the wrong thing ... but that's not completely clear,
@@ -155,7 +155,7 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
total_pages += (double) brel->pages;
}
@@ -165,8 +165,8 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
* Examine the targetlist and qualifications, adding entries to baserel
* targetlists for all referenced Vars. Restrict and join clauses are
* added to appropriate lists belonging to the mentioned relations. We
- * also build lists of equijoined keys for pathkey construction, and
- * form a target joinlist for make_one_rel() to work from.
+ * also build lists of equijoined keys for pathkey construction, and form
+ * a target joinlist for make_one_rel() to work from.
*
* Note: all subplan nodes will have "flat" (var-only) tlists. This
* implies that all expression evaluations are done at the root of the
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index f8eb95baf4..da18bc5a6f 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.208 2006/08/12 02:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.209 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -427,9 +427,9 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get processed.
- * We can skip it in VALUES lists, however, since they can't contain
- * any Vars at all.
+ * else sublinks expanded out from join aliases wouldn't get processed. We
+ * can skip it in VALUES lists, however, since they can't contain any Vars
+ * at all.
*/
if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
expr = flatten_join_alias_vars(root, expr);
@@ -450,8 +450,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* still must do it for quals (to get AND/OR flatness); and if we are in a
* subquery we should not assume it will be done only once.
*
- * For VALUES lists we never do this at all, again on the grounds that
- * we should optimize for one-time evaluation.
+ * For VALUES lists we never do this at all, again on the grounds that we
+ * should optimize for one-time evaluation.
*/
if (kind != EXPRKIND_VALUES &&
(root->parse->jointree->fromlist != NIL ||
@@ -593,8 +593,8 @@ inheritance_planner(PlannerInfo *root)
subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
/*
- * If this child rel was excluded by constraint exclusion, exclude
- * it from the plan.
+ * If this child rel was excluded by constraint exclusion, exclude it
+ * from the plan.
*/
if (is_dummy_plan(subplan))
continue;
@@ -1098,12 +1098,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* Deal with the RETURNING clause if any. It's convenient to pass the
- * returningList through setrefs.c now rather than at top level (if
- * we waited, handling inherited UPDATE/DELETE would be much harder).
+ * returningList through setrefs.c now rather than at top level (if we
+ * waited, handling inherited UPDATE/DELETE would be much harder).
*/
if (parse->returningList)
{
- List *rlist;
+ List *rlist;
rlist = set_returning_clause_references(parse->returningList,
result_plan,
@@ -1132,11 +1132,11 @@ is_dummy_plan(Plan *plan)
{
if (IsA(plan, Result))
{
- List *rcqual = (List *) ((Result *) plan)->resconstantqual;
+ List *rcqual = (List *) ((Result *) plan)->resconstantqual;
if (list_length(rcqual) == 1)
{
- Const *constqual = (Const *) linitial(rcqual);
+ Const *constqual = (Const *) linitial(rcqual);
if (constqual && IsA(constqual, Const))
{
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 3f3a719941..298c8d1cff 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.125 2006/08/28 14:32:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.126 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -453,13 +453,13 @@ trivial_subqueryscan(SubqueryScan *plan)
return false; /* tlist doesn't match junk status */
/*
- * We accept either a Var referencing the corresponding element of
- * the subplan tlist, or a Const equaling the subplan element.
- * See generate_setop_tlist() for motivation.
+ * We accept either a Var referencing the corresponding element of the
+ * subplan tlist, or a Const equaling the subplan element. See
+ * generate_setop_tlist() for motivation.
*/
if (ptle->expr && IsA(ptle->expr, Var))
{
- Var *var = (Var *) ptle->expr;
+ Var *var = (Var *) ptle->expr;
Assert(var->varno == plan->scan.scanrelid);
Assert(var->varlevelsup == 0);
@@ -793,7 +793,7 @@ set_join_references(Join *join)
*
* To handle bitmap-scan plan trees, we have to be able to recurse down
* to the bottom BitmapIndexScan nodes; likewise, appendrel indexscans
- * require recursing through Append nodes. This is split out as a separate
+ * require recursing through Append nodes. This is split out as a separate
* function so that it can recurse.
*/
static void
@@ -1339,7 +1339,7 @@ replace_vars_with_subplan_refs_mutator(Node *node,
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top plan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will still have their
* original varno, but Vars for other rels will have varno OUTER.
*
@@ -1359,8 +1359,8 @@ set_returning_clause_references(List *rlist,
/*
* We can perform the desired Var fixup by abusing the join_references
- * machinery that normally handles inner indexscan fixup. We search
- * the top plan's targetlist for Vars of non-result relations, and use
+ * machinery that normally handles inner indexscan fixup. We search the
+ * top plan's targetlist for Vars of non-result relations, and use
* join_references to convert RETURNING Vars into references to those
* tlist entries, while leaving result-rel Vars as-is.
*/
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 95e560478d..94c97a55aa 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.111 2006/08/02 01:59:46 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.112 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,10 +85,10 @@ typedef struct finalize_primnode_context
static Node *convert_testexpr(Node *testexpr,
- int rtindex,
- List **righthandIds);
+ int rtindex,
+ List **righthandIds);
static Node *convert_testexpr_mutator(Node *node,
- convert_testexpr_context *context);
+ convert_testexpr_context *context);
static bool subplan_is_hashable(SubLink *slink, SubPlan *node);
static bool hash_ok_operator(OpExpr *expr);
static Node *replace_correlation_vars_mutator(Node *node, void *context);
@@ -498,13 +498,13 @@ convert_testexpr_mutator(Node *node,
return NULL;
if (IsA(node, Param))
{
- Param *param = (Param *) node;
+ Param *param = (Param *) node;
if (param->paramkind == PARAM_SUBLINK)
{
/*
- * We expect to encounter the Params in column-number sequence.
- * We could handle non-sequential order if necessary, but for now
+ * We expect to encounter the Params in column-number sequence. We
+ * could handle non-sequential order if necessary, but for now
* there's no need. (This is also a useful cross-check that we
* aren't finding any unexpected Params.)
*/
@@ -514,13 +514,14 @@ convert_testexpr_mutator(Node *node,
if (context->rtindex)
{
/* Make the Var node representing the subplan's result */
- Var *newvar;
+ Var *newvar;
newvar = makeVar(context->rtindex,
param->paramid,
param->paramtype,
-1,
0);
+
/*
* Copy it for caller. NB: we need a copy to avoid having
* doubly-linked substructure in the modified parse tree.
@@ -584,10 +585,10 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
return false;
/*
- * The estimated size of the subquery result must fit in work_mem.
- * (Note: we use sizeof(HeapTupleHeaderData) here even though the tuples
- * will actually be stored as MinimalTuples; this provides some fudge
- * factor for hashtable overhead.)
+ * The estimated size of the subquery result must fit in work_mem. (Note:
+ * we use sizeof(HeapTupleHeaderData) here even though the tuples will
+ * actually be stored as MinimalTuples; this provides some fudge factor
+ * for hashtable overhead.)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
@@ -616,7 +617,7 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
{
foreach(l, ((BoolExpr *) slink->testexpr)->args)
{
- Node *andarg = (Node *) lfirst(l);
+ Node *andarg = (Node *) lfirst(l);
if (!IsA(andarg, OpExpr))
return false; /* probably can't happen */
@@ -686,8 +687,8 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
return NULL;
if (sublink->testexpr && IsA(sublink->testexpr, OpExpr))
{
- List *opclasses;
- List *opstrats;
+ List *opclasses;
+ List *opstrats;
get_op_btree_interpretation(((OpExpr *) sublink->testexpr)->opno,
&opclasses, &opstrats);
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index bea7c03a73..eedd1a8ff0 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.43 2006/08/19 02:48:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.44 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,20 +40,20 @@ typedef struct reduce_outer_joins_state
} reduce_outer_joins_state;
static Node *pull_up_simple_subquery(PlannerInfo *root, Node *jtnode,
- RangeTblEntry *rte,
- bool below_outer_join,
- bool append_rel_member);
+ RangeTblEntry *rte,
+ bool below_outer_join,
+ bool append_rel_member);
static Node *pull_up_simple_union_all(PlannerInfo *root, Node *jtnode,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root,
- int parentRTindex, Query *setOpQuery);
+ int parentRTindex, Query *setOpQuery);
static void make_setop_translation_lists(Query *query,
Index newvarno,
List **col_mappings, List **translated_vars);
static bool is_simple_subquery(Query *subquery);
static bool is_simple_union_all(Query *subquery);
static bool is_simple_union_all_recurse(Node *setOp, Query *setOpQuery,
- List *colTypes);
+ List *colTypes);
static bool has_nullable_targetlist(Query *subquery);
static bool is_safe_append_member(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno,
@@ -66,7 +66,7 @@ static void reduce_outer_joins_pass2(Node *jtnode,
static void fix_in_clause_relids(List *in_info_list, int varno,
Relids subrelids);
static void fix_append_rel_relids(List *append_rel_list, int varno,
- Relids subrelids);
+ Relids subrelids);
static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
@@ -136,7 +136,7 @@ pull_up_IN_clauses(PlannerInfo *root, Node *node)
* side of an outer join. This restricts what we can do.
*
* append_rel_member is true if we are looking at a member subquery of
- * an append relation. This puts some different restrictions on what
+ * an append relation. This puts some different restrictions on what
* we can do.
*
* A tricky aspect of this code is that if we pull up a subquery we have
@@ -173,8 +173,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
* variables evaluated at the right place in the modified plan tree.
* Fix it someday.
*
- * If we are looking at an append-relation member, we can't pull
- * it up unless is_safe_append_member says so.
+ * If we are looking at an append-relation member, we can't pull it up
+ * unless is_safe_append_member says so.
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_subquery(rte->subquery) &&
@@ -186,14 +186,15 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
/*
* Alternatively, is it a simple UNION ALL subquery? If so, flatten
- * into an "append relation". We can do this regardless of nullability
- * considerations since this transformation does not result in
- * propagating non-Var expressions into upper levels of the query.
+ * into an "append relation". We can do this regardless of
+ * nullability considerations since this transformation does not
+ * result in propagating non-Var expressions into upper levels of the
+ * query.
*
* It's also safe to do this regardless of whether this query is
- * itself an appendrel member. (If you're thinking we should try
- * to flatten the two levels of appendrel together, you're right;
- * but we handle that in set_append_rel_pathlist, not here.)
+ * itself an appendrel member. (If you're thinking we should try to
+ * flatten the two levels of appendrel together, you're right; but we
+ * handle that in set_append_rel_pathlist, not here.)
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_union_all(rte->subquery))
@@ -258,7 +259,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode,
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*/
@@ -275,11 +276,10 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
ListCell *rt;
/*
- * Need a modifiable copy of the subquery to hack on. Even if we
- * didn't sometimes choose not to pull up below, we must do this
- * to avoid problems if the same subquery is referenced from
- * multiple jointree items (which can't happen normally, but might
- * after rule rewriting).
+ * Need a modifiable copy of the subquery to hack on. Even if we didn't
+ * sometimes choose not to pull up below, we must do this to avoid
+ * problems if the same subquery is referenced from multiple jointree
+ * items (which can't happen normally, but might after rule rewriting).
*/
subquery = copyObject(rte->subquery);
@@ -287,8 +287,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication,
- * or would that just make things uglier?
+ * subquery_planner(). Can we refactor to avoid code duplication, or
+ * would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
subroot->parse = subquery;
@@ -296,12 +296,12 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
subroot->append_rel_list = NIL;
/*
- * Pull up any IN clauses within the subquery's WHERE, so that we
- * don't leave unoptimized INs behind.
+ * Pull up any IN clauses within the subquery's WHERE, so that we don't
+ * leave unoptimized INs behind.
*/
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subroot,
- subquery->jointree->quals);
+ subquery->jointree->quals);
/*
* Recursively pull up the subquery's subqueries, so that
@@ -310,19 +310,19 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
*
* Note: below_outer_join = false is correct here even if we are within an
* outer join in the upper query; the lower query starts with a clean
- * slate for outer-join semantics. Likewise, we say we aren't handling
- * an appendrel member.
+ * slate for outer-join semantics. Likewise, we say we aren't handling an
+ * appendrel member.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree, false, false);
/*
- * Now we must recheck whether the subquery is still simple enough
- * to pull up. If not, abandon processing it.
+ * Now we must recheck whether the subquery is still simple enough to pull
+ * up. If not, abandon processing it.
*
- * We don't really need to recheck all the conditions involved,
- * but it's easier just to keep this "if" looking the same as the
- * one in pull_up_subqueries.
+ * We don't really need to recheck all the conditions involved, but it's
+ * easier just to keep this "if" looking the same as the one in
+ * pull_up_subqueries.
*/
if (is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)) &&
@@ -335,18 +335,18 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/*
* Give up, return unmodified RangeTblRef.
*
- * Note: The work we just did will be redone when the subquery
- * gets planned on its own. Perhaps we could avoid that by
- * storing the modified subquery back into the rangetable, but
- * I'm not gonna risk it now.
+ * Note: The work we just did will be redone when the subquery gets
+ * planned on its own. Perhaps we could avoid that by storing the
+ * modified subquery back into the rangetable, but I'm not gonna risk
+ * it now.
*/
return jtnode;
}
/*
- * Adjust level-0 varnos in subquery so that we can append its
- * rangetable to upper query's. We have to fix the subquery's
- * in_info_list and append_rel_list, as well.
+ * Adjust level-0 varnos in subquery so that we can append its rangetable
+ * to upper query's. We have to fix the subquery's in_info_list and
+ * append_rel_list, as well.
*/
rtoffset = list_length(parse->rtable);
OffsetVarNodes((Node *) subquery, rtoffset, 0);
@@ -354,18 +354,18 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
OffsetVarNodes((Node *) subroot->append_rel_list, rtoffset, 0);
/*
- * Upper-level vars in subquery are now one level closer to their
- * parent than before.
+ * Upper-level vars in subquery are now one level closer to their parent
+ * than before.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->in_info_list, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->append_rel_list, -1, 1);
/*
- * Replace all of the top query's references to the subquery's
- * outputs with copies of the adjusted subtlist items, being
- * careful not to replace any of the jointree structure. (This'd
- * be a lot cleaner if we could use query_tree_mutator.)
+ * Replace all of the top query's references to the subquery's outputs
+ * with copies of the adjusted subtlist items, being careful not to
+ * replace any of the jointree structure. (This'd be a lot cleaner if we
+ * could use query_tree_mutator.)
*/
subtlist = subquery->targetList;
parse->targetList = (List *)
@@ -404,27 +404,27 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
}
/*
- * Now append the adjusted rtable entries to upper query. (We hold
- * off until after fixing the upper rtable entries; no point in
- * running that code on the subquery ones too.)
+ * Now append the adjusted rtable entries to upper query. (We hold off
+ * until after fixing the upper rtable entries; no point in running that
+ * code on the subquery ones too.)
*/
parse->rtable = list_concat(parse->rtable, subquery->rtable);
/*
- * Pull up any FOR UPDATE/SHARE markers, too. (OffsetVarNodes
- * already adjusted the marker rtindexes, so just concat the lists.)
+ * Pull up any FOR UPDATE/SHARE markers, too. (OffsetVarNodes already
+ * adjusted the marker rtindexes, so just concat the lists.)
*/
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
/*
- * We also have to fix the relid sets of any parent InClauseInfo
- * nodes. (This could perhaps be done by ResolveNew, but it would
- * clutter that routine's API unreasonably.)
+ * We also have to fix the relid sets of any parent InClauseInfo nodes.
+ * (This could perhaps be done by ResolveNew, but it would clutter that
+ * routine's API unreasonably.)
*
- * Likewise, relids appearing in AppendRelInfo nodes have to be fixed
- * (but we took care of their translated_vars lists above). We already
- * checked that this won't require introducing multiple subrelids into
- * the single-slot AppendRelInfo structs.
+ * Likewise, relids appearing in AppendRelInfo nodes have to be fixed (but
+ * we took care of their translated_vars lists above). We already checked
+ * that this won't require introducing multiple subrelids into the
+ * single-slot AppendRelInfo structs.
*/
if (root->in_info_list || root->append_rel_list)
{
@@ -444,8 +444,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
subroot->append_rel_list);
/*
- * We don't have to do the equivalent bookkeeping for outer-join
- * info, because that hasn't been set up yet.
+ * We don't have to do the equivalent bookkeeping for outer-join info,
+ * because that hasn't been set up yet.
*/
Assert(root->oj_info_list == NIL);
Assert(subroot->oj_info_list == NIL);
@@ -457,8 +457,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
/* subquery won't be pulled up if it hasAggs, so no work there */
/*
- * Return the adjusted subquery jointree to replace the
- * RangeTblRef entry in parent's jointree.
+ * Return the adjusted subquery jointree to replace the RangeTblRef entry
+ * in parent's jointree.
*/
return (Node *) subquery->jointree;
}
@@ -468,7 +468,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
@@ -524,9 +524,9 @@ pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root, int parentRTindex,
/*
* Upper-level vars in subquery are now one level closer to their
- * parent than before. We don't have to worry about offsetting
- * varnos, though, because any such vars must refer to stuff above
- * the level of the query we are pulling into.
+ * parent than before. We don't have to worry about offsetting
+ * varnos, though, because any such vars must refer to stuff above the
+ * level of the query we are pulling into.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
@@ -658,9 +658,9 @@ is_simple_subquery(Query *subquery)
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of
- * these functions, if they get copied to multiple places in the upper
- * query, leading to surprising results.
+ * targetlist. Otherwise we might introduce multiple evaluations of these
+ * functions, if they get copied to multiple places in the upper query,
+ * leading to surprising results.
*/
if (contain_volatile_functions((Node *) subquery->targetList))
return false;
@@ -799,16 +799,15 @@ is_safe_append_member(Query *subquery)
ListCell *l;
/*
- * It's only safe to pull up the child if its jointree contains
- * exactly one RTE, else the AppendRelInfo data structure breaks.
- * The one base RTE could be buried in several levels of FromExpr,
- * however.
+ * It's only safe to pull up the child if its jointree contains exactly
+ * one RTE, else the AppendRelInfo data structure breaks. The one base RTE
+ * could be buried in several levels of FromExpr, however.
*
- * Also, the child can't have any WHERE quals because there's no
- * place to put them in an appendrel. (This is a bit annoying...)
- * If we didn't need to check this, we'd just test whether
- * get_relids_in_jointree() yields a singleton set, to be more
- * consistent with the coding of fix_append_rel_relids().
+ * Also, the child can't have any WHERE quals because there's no place to
+ * put them in an appendrel. (This is a bit annoying...) If we didn't
+ * need to check this, we'd just test whether get_relids_in_jointree()
+ * yields a singleton set, to be more consistent with the coding of
+ * fix_append_rel_relids().
*/
jtnode = subquery->jointree;
while (IsA(jtnode, FromExpr))
@@ -825,10 +824,10 @@ is_safe_append_member(Query *subquery)
/*
* XXX For the moment we also have to insist that the subquery's tlist
* includes only simple Vars. This is pretty annoying, but fixing it
- * seems to require nontrivial changes --- mainly because joinrel
- * tlists are presently assumed to contain only Vars. Perhaps a
- * pseudo-variable mechanism similar to the one speculated about
- * in pull_up_subqueries' comments would help? FIXME someday.
+ * seems to require nontrivial changes --- mainly because joinrel tlists
+ * are presently assumed to contain only Vars. Perhaps a pseudo-variable
+ * mechanism similar to the one speculated about in pull_up_subqueries'
+ * comments would help? FIXME someday.
*/
foreach(l, subquery->targetList)
{
@@ -1190,9 +1189,9 @@ fix_append_rel_relids(List *append_rel_list, int varno, Relids subrelids)
/*
* We only want to extract the member relid once, but we mustn't fail
- * immediately if there are multiple members; it could be that none of
- * the AppendRelInfo nodes refer to it. So compute it on first use.
- * Note that bms_singleton_member will complain if set is not singleton.
+ * immediately if there are multiple members; it could be that none of the
+ * AppendRelInfo nodes refer to it. So compute it on first use. Note that
+ * bms_singleton_member will complain if set is not singleton.
*/
foreach(l, append_rel_list)
{
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index b0d2e7d18c..22e1dd07f2 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -25,7 +25,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.55 2006/07/14 14:52:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.56 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -212,7 +212,7 @@ push_nots(Expr *qual)
if (negator)
{
- OpExpr *newopexpr = makeNode(OpExpr);
+ OpExpr *newopexpr = makeNode(OpExpr);
newopexpr->opno = negator;
newopexpr->opfuncid = InvalidOid;
@@ -228,9 +228,9 @@ push_nots(Expr *qual)
{
/*
* Negate a ScalarArrayOpExpr if there is a negator for its operator;
- * for example x = ANY (list) becomes x <> ALL (list).
- * Otherwise, retain the clause as it is (the NOT can't be pushed down
- * any farther).
+ * for example x = ANY (list) becomes x <> ALL (list). Otherwise,
+ * retain the clause as it is (the NOT can't be pushed down any
+ * farther).
*/
ScalarArrayOpExpr *saopexpr = (ScalarArrayOpExpr *) qual;
Oid negator = get_negator(saopexpr->opno);
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index c4bbd9d2ee..4d44242169 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -16,7 +16,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.83 2006/08/12 02:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.84 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -157,13 +157,13 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars
- * that belong to the result rel don't need to be added, because they
- * will be made to refer to the actual heap tuple.
+ * to make these Vars available for the RETURNING calculation. Vars that
+ * belong to the result rel don't need to be added, because they will be
+ * made to refer to the actual heap tuple.
*/
if (parse->returningList && list_length(parse->rtable) > 1)
{
- List *vars;
+ List *vars;
ListCell *l;
vars = pull_var_clause((Node *) parse->returningList, false);
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 3bf7223199..44aebd9ed3 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -6,14 +6,14 @@
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
@@ -22,7 +22,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.133 2006/08/10 02:36:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.134 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,14 +69,14 @@ static List *generate_append_tlist(List *colTypes, bool flag,
List *input_plans,
List *refnames_tlist);
static void expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte,
- Index rti);
+ Index rti);
static void make_inh_translation_lists(Relation oldrelation,
- Relation newrelation,
- Index newvarno,
- List **col_mappings,
- List **translated_vars);
+ Relation newrelation,
+ Index newvarno,
+ List **col_mappings,
+ List **translated_vars);
static Node *adjust_appendrel_attrs_mutator(Node *node,
- AppendRelInfo *context);
+ AppendRelInfo *context);
static Relids adjust_relid_set(Relids relids, Index oldrelid, Index newrelid);
static List *adjust_inherited_tlist(List *tlist,
AppendRelInfo *context);
@@ -713,21 +713,21 @@ find_all_inheritors(Oid parentrel)
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
void
expand_inherited_tables(PlannerInfo *root)
{
- Index nrtes;
- Index rti;
- ListCell *rl;
+ Index nrtes;
+ Index rti;
+ ListCell *rl;
/*
- * expand_inherited_rtentry may add RTEs to parse->rtable; there is
- * no need to scan them since they can't have inh=true. So just
- * scan as far as the original end of the rtable list.
+ * expand_inherited_rtentry may add RTEs to parse->rtable; there is no
+ * need to scan them since they can't have inh=true. So just scan as far
+ * as the original end of the rtable list.
*/
nrtes = list_length(root->parse->rtable);
rl = list_head(root->parse->rtable);
@@ -745,7 +745,7 @@ expand_inherited_tables(PlannerInfo *root)
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
@@ -801,22 +801,22 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
}
/*
- * Must open the parent relation to examine its tupdesc. We need not
- * lock it since the rewriter already obtained at least AccessShareLock
- * on each relation used in the query.
+ * Must open the parent relation to examine its tupdesc. We need not lock
+ * it since the rewriter already obtained at least AccessShareLock on each
+ * relation used in the query.
*/
oldrelation = heap_open(parentOID, NoLock);
/*
- * However, for each child relation we add to the query, we must obtain
- * an appropriate lock, because this will be the first use of those
- * relations in the parse/rewrite/plan pipeline.
+ * However, for each child relation we add to the query, we must obtain an
+ * appropriate lock, because this will be the first use of those relations
+ * in the parse/rewrite/plan pipeline.
*
* If the parent relation is the query's result relation, then we need
* RowExclusiveLock. Otherwise, check to see if the relation is accessed
* FOR UPDATE/SHARE or not. We can't just grab AccessShareLock because
* then the executor would be trying to upgrade the lock, leading to
- * possible deadlocks. (This code should match the parser and rewriter.)
+ * possible deadlocks. (This code should match the parser and rewriter.)
*/
if (rti == parse->resultRelation)
lockmode = RowExclusiveLock;
@@ -900,8 +900,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
/*
* The executor will check the parent table's access permissions when it
- * examines the parent's added RTE entry. There's no need to check
- * twice, so turn off access check bits in the original RTE.
+ * examines the parent's added RTE entry. There's no need to check twice,
+ * so turn off access check bits in the original RTE.
*/
rte->requiredPerms = 0;
}
@@ -948,8 +948,8 @@ make_inh_translation_lists(Relation oldrelation, Relation newrelation,
atttypmod = att->atttypmod;
/*
- * When we are generating the "translation list" for the parent
- * table of an inheritance set, no need to search for matches.
+ * When we are generating the "translation list" for the parent table
+ * of an inheritance set, no need to search for matches.
*/
if (oldrelation == newrelation)
{
@@ -964,9 +964,8 @@ make_inh_translation_lists(Relation oldrelation, Relation newrelation,
/*
* Otherwise we have to search for the matching column by name.
- * There's no guarantee it'll have the same column position,
- * because of cases like ALTER TABLE ADD COLUMN and multiple
- * inheritance.
+ * There's no guarantee it'll have the same column position, because
+ * of cases like ALTER TABLE ADD COLUMN and multiple inheritance.
*/
for (new_attno = 0; new_attno < newnatts; new_attno++)
{
@@ -979,7 +978,7 @@ make_inh_translation_lists(Relation oldrelation, Relation newrelation,
if (atttypid != att->atttypid || atttypmod != att->atttypmod)
elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type",
attname, RelationGetRelationName(newrelation));
-
+
numbers = lappend_int(numbers, new_attno + 1);
vars = lappend(vars, makeVar(newvarno,
(AttrNumber) (new_attno + 1),
@@ -1060,7 +1059,7 @@ adjust_appendrel_attrs_mutator(Node *node, AppendRelInfo *context)
var->varnoold = context->child_relid;
if (var->varattno > 0)
{
- Node *newnode;
+ Node *newnode;
if (var->varattno > list_length(context->translated_vars))
elog(ERROR, "attribute %d of relation \"%s\" does not exist",
@@ -1075,10 +1074,10 @@ adjust_appendrel_attrs_mutator(Node *node, AppendRelInfo *context)
else if (var->varattno == 0)
{
/*
- * Whole-row Var: if we are dealing with named rowtypes,
- * we can use a whole-row Var for the child table plus a
- * coercion step to convert the tuple layout to the parent's
- * rowtype. Otherwise we have to generate a RowExpr.
+ * Whole-row Var: if we are dealing with named rowtypes, we
+ * can use a whole-row Var for the child table plus a coercion
+ * step to convert the tuple layout to the parent's rowtype.
+ * Otherwise we have to generate a RowExpr.
*/
if (OidIsValid(context->child_reltype))
{
@@ -1217,9 +1216,9 @@ adjust_appendrel_attrs_mutator(Node *node, AppendRelInfo *context)
* BUT: although we don't need to recurse into subplans, we do need to
* make sure that they are copied, not just referenced as
* expression_tree_mutator will do by default. Otherwise we'll have the
- * same subplan node referenced from each arm of the finished APPEND
- * plan, which will cause trouble in the executor. This is a kluge that
- * should go away when we redesign querytrees.
+ * same subplan node referenced from each arm of the finished APPEND plan,
+ * which will cause trouble in the executor. This is a kluge that should
+ * go away when we redesign querytrees.
*/
if (is_subplan(node))
{
@@ -1267,7 +1266,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The relid sets are adjusted by substituting child_relid for parent_relid.
* (NOTE: oldrel is not necessarily the parent_relid relation!) We are also
- * careful to map attribute numbers within the array properly. User
+ * careful to map attribute numbers within the array properly. User
* attributes have to be mapped through col_mappings, but system attributes
* and whole-row references always have the same attno.
*
@@ -1353,7 +1352,7 @@ adjust_inherited_tlist(List *tlist, AppendRelInfo *context)
foreach(tl, tlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
- int newattno;
+ int newattno;
if (tle->resjunk)
continue; /* ignore junk items */
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 99d3147aeb..c9d9512b2f 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.221 2006/09/28 20:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.222 2006/10/04 00:29:55 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -403,7 +403,7 @@ count_agg_clauses_walker(Node *node, AggClauseCounts *counts)
Form_pg_aggregate aggform;
Oid aggtranstype;
int i;
- ListCell *l;
+ ListCell *l;
Assert(aggref->agglevelsup == 0);
counts->numAggs++;
@@ -887,7 +887,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
@@ -1020,7 +1020,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level)
static bool
is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
{
- Node *rightop;
+ Node *rightop;
/* The contained operator must be strict. */
if (!op_strict(expr->opno))
@@ -1288,12 +1288,13 @@ CommuteRowCompareExpr(RowCompareExpr *clause)
}
clause->opnos = newops;
+
/*
- * Note: we don't bother to update the opclasses list, but just set
- * it to empty. This is OK since this routine is currently only used
- * for index quals, and the index machinery won't use the opclass
- * information. The original opclass list is NOT valid if we have
- * commuted any cross-type comparisons, so don't leave it in place.
+ * Note: we don't bother to update the opclasses list, but just set it to
+ * empty. This is OK since this routine is currently only used for index
+ * quals, and the index machinery won't use the opclass information. The
+ * original opclass list is NOT valid if we have commuted any cross-type
+ * comparisons, so don't leave it in place.
*/
clause->opclasses = NIL; /* XXX */
@@ -2109,9 +2110,9 @@ eval_const_expressions_mutator(Node *node,
context);
if (arg && IsA(arg, RowExpr))
{
- RowExpr *rarg = (RowExpr *) arg;
- List *newargs = NIL;
- ListCell *l;
+ RowExpr *rarg = (RowExpr *) arg;
+ List *newargs = NIL;
+ ListCell *l;
/*
* We break ROW(...) IS [NOT] NULL into separate tests on its
@@ -2120,15 +2121,15 @@ eval_const_expressions_mutator(Node *node,
*/
foreach(l, rarg->args)
{
- Node *relem = (Node *) lfirst(l);
+ Node *relem = (Node *) lfirst(l);
/*
- * A constant field refutes the whole NullTest if it's of
- * the wrong nullness; else we can discard it.
+ * A constant field refutes the whole NullTest if it's of the
+ * wrong nullness; else we can discard it.
*/
if (relem && IsA(relem, Const))
{
- Const *carg = (Const *) relem;
+ Const *carg = (Const *) relem;
if (carg->constisnull ?
(ntest->nulltesttype == IS_NOT_NULL) :
@@ -2152,8 +2153,8 @@ eval_const_expressions_mutator(Node *node,
}
if (arg && IsA(arg, Const))
{
- Const *carg = (Const *) arg;
- bool result;
+ Const *carg = (Const *) arg;
+ bool result;
switch (ntest->nulltesttype)
{
@@ -2166,7 +2167,7 @@ eval_const_expressions_mutator(Node *node,
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
- result = false; /* keep compiler quiet */
+ result = false; /* keep compiler quiet */
break;
}
@@ -2188,8 +2189,8 @@ eval_const_expressions_mutator(Node *node,
context);
if (arg && IsA(arg, Const))
{
- Const *carg = (Const *) arg;
- bool result;
+ Const *carg = (Const *) arg;
+ bool result;
switch (btest->booltesttype)
{
@@ -2218,7 +2219,7 @@ eval_const_expressions_mutator(Node *node,
default:
elog(ERROR, "unrecognized booltesttype: %d",
(int) btest->booltesttype);
- result = false; /* keep compiler quiet */
+ result = false; /* keep compiler quiet */
break;
}
@@ -3174,7 +3175,7 @@ expression_tree_walker(Node *node,
break;
case T_Aggref:
{
- Aggref *expr = (Aggref *) node;
+ Aggref *expr = (Aggref *) node;
if (expression_tree_walker((Node *) expr->args,
walker, context))
@@ -3452,8 +3453,8 @@ query_tree_walker(Query *query,
if (query->utilityStmt)
{
/*
- * Certain utility commands contain general-purpose Querys embedded
- * in them --- if this is one, invoke the walker on the sub-Query.
+ * Certain utility commands contain general-purpose Querys embedded in
+ * them --- if this is one, invoke the walker on the sub-Query.
*/
if (IsA(query->utilityStmt, CopyStmt))
{
@@ -3828,8 +3829,8 @@ expression_tree_mutator(Node *node,
break;
case T_RowCompareExpr:
{
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- RowCompareExpr *newnode;
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+ RowCompareExpr *newnode;
FLATCOPY(newnode, rcexpr, RowCompareExpr);
MUTATE(newnode->largs, rcexpr->largs, List *);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 2cc79ed239..01f3151bee 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.132 2006/08/02 01:59:46 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.133 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -418,7 +418,7 @@ create_seqscan_path(PlannerInfo *root, RelOptInfo *rel)
* for an ordered index, or NoMovementScanDirection for
* an unordered index.
* 'outer_rel' is the outer relation if this is a join inner indexscan path.
- * (pathkeys and indexscandir are ignored if so.) NULL if not.
+ * (pathkeys and indexscandir are ignored if so.) NULL if not.
*
* Returns the new path node.
*/
@@ -680,12 +680,12 @@ create_result_path(List *quals)
/* Ideally should define cost_result(), but I'm too lazy */
pathnode->path.startup_cost = 0;
pathnode->path.total_cost = cpu_tuple_cost;
+
/*
- * In theory we should include the qual eval cost as well, but
- * at present that doesn't accomplish much except duplicate work that
- * will be done again in make_result; since this is only used for
- * degenerate cases, nothing interesting will be done with the path
- * cost values...
+ * In theory we should include the qual eval cost as well, but at present
+ * that doesn't accomplish much except duplicate work that will be done
+ * again in make_result; since this is only used for degenerate cases,
+ * nothing interesting will be done with the path cost values...
*/
return pathnode;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index aba4b88157..de14ddd2dc 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.126 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.127 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,9 +78,9 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
List *indexinfos = NIL;
/*
- * We need not lock the relation since it was already locked, either
- * by the rewriter or when expand_inherited_rtentry() added it to the
- * query's rangetable.
+ * We need not lock the relation since it was already locked, either by
+ * the rewriter or when expand_inherited_rtentry() added it to the query's
+ * rangetable.
*/
relation = heap_open(relationObjectId, NoLock);
@@ -95,8 +95,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
/*
* Estimate relation size --- unless it's an inheritance parent, in which
- * case the size will be computed later in set_append_rel_pathlist, and
- * we must leave it zero for now to avoid bollixing the total_table_pages
+ * case the size will be computed later in set_append_rel_pathlist, and we
+ * must leave it zero for now to avoid bollixing the total_table_pages
* calculation.
*/
if (!inhparent)
@@ -152,9 +152,9 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
/*
* Ignore invalid indexes, since they can't safely be used for
- * queries. Note that this is OK because the data structure
- * we are constructing is only used by the planner --- the
- * executor still needs to insert into "invalid" indexes!
+ * queries. Note that this is OK because the data structure we
+ * are constructing is only used by the planner --- the executor
+ * still needs to insert into "invalid" indexes!
*/
if (!index->indisvalid)
{
@@ -508,14 +508,14 @@ relation_excluded_by_constraints(RelOptInfo *rel, RangeTblEntry *rte)
/*
* We do not currently enforce that CHECK constraints contain only
* immutable functions, so it's necessary to check here. We daren't draw
- * conclusions from plan-time evaluation of non-immutable functions.
- * Since they're ANDed, we can just ignore any mutable constraints in
- * the list, and reason about the rest.
+ * conclusions from plan-time evaluation of non-immutable functions. Since
+ * they're ANDed, we can just ignore any mutable constraints in the list,
+ * and reason about the rest.
*/
safe_constraints = NIL;
foreach(lc, constraint_pred)
{
- Node *pred = (Node *) lfirst(lc);
+ Node *pred = (Node *) lfirst(lc);
if (!contain_mutable_functions(pred))
safe_constraints = lappend(safe_constraints, pred);
@@ -526,9 +526,9 @@ relation_excluded_by_constraints(RelOptInfo *rel, RangeTblEntry *rte)
* refute the entire collection at once. This may allow us to make proofs
* that would fail if we took them individually.
*
- * Note: we use rel->baserestrictinfo, not safe_restrictions as might
- * seem an obvious optimization. Some of the clauses might be OR clauses
- * that have volatile and nonvolatile subclauses, and it's OK to make
+ * Note: we use rel->baserestrictinfo, not safe_restrictions as might seem
+ * an obvious optimization. Some of the clauses might be OR clauses that
+ * have volatile and nonvolatile subclauses, and it's OK to make
* deductions with the nonvolatile parts.
*/
if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo))
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index b909e6d4bf..4a2609a4ab 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.9 2006/09/28 20:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.10 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -208,6 +208,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_AND:
+
/*
* AND-clause => AND-clause if A implies each of B's items
*/
@@ -224,6 +225,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_OR:
+
/*
* AND-clause => OR-clause if A implies any of B's items
*
@@ -241,6 +243,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
iterate_end(pred_info);
if (result)
return result;
+
/*
* Also check if any of A's items implies B
*
@@ -258,6 +261,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_ATOM:
+
/*
* AND-clause => atom if any of A's items implies B
*/
@@ -279,6 +283,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_OR:
+
/*
* OR-clause => OR-clause if each of A's items implies any
* of B's items. Messy but can't do it any more simply.
@@ -286,7 +291,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(citem, clause, clause_info)
{
- bool presult = false;
+ bool presult = false;
iterate_begin(pitem, predicate, pred_info)
{
@@ -308,6 +313,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
case CLASS_AND:
case CLASS_ATOM:
+
/*
* OR-clause => AND-clause if each of A's items implies B
*
@@ -331,6 +337,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_AND:
+
/*
* atom => AND-clause if A implies each of B's items
*/
@@ -347,6 +354,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_OR:
+
/*
* atom => OR-clause if A implies any of B's items
*/
@@ -363,6 +371,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_ATOM:
+
/*
* atom => atom is the base case
*/
@@ -427,6 +436,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_AND:
+
/*
* AND-clause R=> AND-clause if A refutes any of B's items
*
@@ -444,6 +454,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
iterate_end(pred_info);
if (result)
return result;
+
/*
* Also check if any of A's items refutes B
*
@@ -461,6 +472,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_OR:
+
/*
* AND-clause R=> OR-clause if A refutes each of B's items
*/
@@ -477,6 +489,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
@@ -484,6 +497,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* AND-clause R=> atom if any of A's items refutes B
*/
@@ -505,6 +519,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_OR:
+
/*
* OR-clause R=> OR-clause if A refutes each of B's items
*/
@@ -521,6 +536,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_AND:
+
/*
* OR-clause R=> AND-clause if each of A's items refutes
* any of B's items.
@@ -528,7 +544,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
result = true;
iterate_begin(citem, clause, clause_info)
{
- bool presult = false;
+ bool presult = false;
iterate_begin(pitem, predicate, pred_info)
{
@@ -549,6 +565,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
@@ -556,6 +573,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* OR-clause R=> atom if each of A's items refutes B
*/
@@ -574,6 +592,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
break;
case CLASS_ATOM:
+
/*
* If A is a NOT-clause, A R=> B if B => A's arg
*/
@@ -584,6 +603,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
switch (pclass)
{
case CLASS_AND:
+
/*
* atom R=> AND-clause if A refutes any of B's items
*/
@@ -600,6 +620,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_OR:
+
/*
* atom R=> OR-clause if A refutes each of B's items
*/
@@ -616,6 +637,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
@@ -623,6 +645,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* atom R=> atom is the base case
*/
@@ -654,8 +677,8 @@ predicate_classify(Node *clause, PredIterInfo info)
Assert(!IsA(clause, RestrictInfo));
/*
- * If we see a List, assume it's an implicit-AND list; this is the
- * correct semantics for lists of RestrictInfo nodes.
+ * If we see a List, assume it's an implicit-AND list; this is the correct
+ * semantics for lists of RestrictInfo nodes.
*/
if (IsA(clause, List))
{
@@ -685,13 +708,13 @@ predicate_classify(Node *clause, PredIterInfo info)
if (IsA(clause, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
/*
- * We can break this down into an AND or OR structure, but only if
- * we know how to iterate through expressions for the array's
- * elements. We can do that if the array operand is a non-null
- * constant or a simple ArrayExpr.
+ * We can break this down into an AND or OR structure, but only if we
+ * know how to iterate through expressions for the array's elements.
+ * We can do that if the array operand is a non-null constant or a
+ * simple ArrayExpr.
*/
if (arraynode && IsA(arraynode, Const) &&
!((Const *) arraynode)->constisnull)
@@ -716,7 +739,7 @@ predicate_classify(Node *clause, PredIterInfo info)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
@@ -852,7 +875,7 @@ arrayexpr_startup_fn(Node *clause, PredIterInfo info)
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
ArrayExprIterState *state;
- ArrayExpr *arrayexpr;
+ ArrayExpr *arrayexpr;
/* Create working state struct */
state = (ArrayExprIterState *) palloc(sizeof(ArrayExprIterState));
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 331855f8e9..4d5bffdb7b 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.82 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.83 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,9 +97,10 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
case RTE_SUBQUERY:
case RTE_FUNCTION:
case RTE_VALUES:
+
/*
- * Subquery, function, or values list --- set up attr range
- * and arrays
+ * Subquery, function, or values list --- set up attr range and
+ * arrays
*
* Note: 0 is included in range to support whole-row Vars
*/
@@ -417,8 +418,8 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
int ndx;
/*
- * We can't run into any child RowExprs here, but we could find
- * a whole-row Var with a ConvertRowtypeExpr atop it.
+ * We can't run into any child RowExprs here, but we could find a
+ * whole-row Var with a ConvertRowtypeExpr atop it.
*/
var = origvar;
while (!IsA(var, Var))
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index ec43ee39f0..9176ae1680 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.48 2006/07/01 18:38:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.49 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,14 +169,15 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
*/
return NIL;
}
+
/*
* If the sublist contains multiple RestrictInfos, we create an
* AND subclause. If there's just one, we have to check if it's
* an OR clause, and if so flatten it to preserve AND/OR flatness
* of our output.
*
- * We construct lists with and without sub-RestrictInfos, so
- * as not to have to regenerate duplicate RestrictInfos below.
+ * We construct lists with and without sub-RestrictInfos, so as
+ * not to have to regenerate duplicate RestrictInfos below.
*/
if (list_length(sublist) > 1)
{
@@ -186,7 +187,7 @@ make_restrictinfo_from_bitmapqual(Path *bitmapqual,
}
else
{
- RestrictInfo *subri = (RestrictInfo *) linitial(sublist);
+ RestrictInfo *subri = (RestrictInfo *) linitial(sublist);
Assert(IsA(subri, RestrictInfo));
if (restriction_is_or_clause(subri))
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 582747a23f..f4e58a3b8d 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.351 2006/09/18 16:04:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.352 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,7 +99,7 @@ static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt);
static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
List **extras_before, List **extras_after);
static List *transformInsertRow(ParseState *pstate, List *exprlist,
- List *stmtcols, List *icolumns, List *attrnos);
+ List *stmtcols, List *icolumns, List *attrnos);
static List *transformReturningList(ParseState *pstate, List *returningList);
static Query *transformIndexStmt(ParseState *pstate, IndexStmt *stmt);
static Query *transformRuleStmt(ParseState *query, RuleStmt *stmt,
@@ -133,7 +133,7 @@ static void transformFKConstraints(ParseState *pstate,
bool isAddConstraint);
static void applyColumnNames(List *dst, List *src);
static void getSetColTypes(ParseState *pstate, Node *node,
- List **colTypes, List **colTypmods);
+ List **colTypes, List **colTypmods);
static void transformLockingClause(Query *qry, LockingClause *lc);
static void transformConstraintAttrs(List *constraintList);
static void transformColumnType(ParseState *pstate, ColumnDef *column);
@@ -343,7 +343,7 @@ transformStmt(ParseState *pstate, Node *parseTree,
case T_CopyStmt:
{
- CopyStmt *n = (CopyStmt *) parseTree;
+ CopyStmt *n = (CopyStmt *) parseTree;
result = makeNode(Query);
result->commandType = CMD_UTILITY;
@@ -552,8 +552,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
/*
* We have three cases to deal with: DEFAULT VALUES (selectStmt == NULL),
- * VALUES list, or general SELECT input. We special-case VALUES, both
- * for efficiency and so we can handle DEFAULT specifications.
+ * VALUES list, or general SELECT input. We special-case VALUES, both for
+ * efficiency and so we can handle DEFAULT specifications.
*/
isGeneralSelect = (selectStmt && selectStmt->valuesLists == NIL);
@@ -602,8 +602,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
{
/*
* We have INSERT ... DEFAULT VALUES. We can handle this case by
- * emitting an empty targetlist --- all columns will be defaulted
- * when the planner expands the targetlist.
+ * emitting an empty targetlist --- all columns will be defaulted when
+ * the planner expands the targetlist.
*/
exprList = NIL;
}
@@ -705,25 +705,25 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
else if (list_length(selectStmt->valuesLists) > 1)
{
/*
- * Process INSERT ... VALUES with multiple VALUES sublists.
- * We generate a VALUES RTE holding the transformed expression
- * lists, and build up a targetlist containing Vars that reference
- * the VALUES RTE.
+ * Process INSERT ... VALUES with multiple VALUES sublists. We
+ * generate a VALUES RTE holding the transformed expression lists, and
+ * build up a targetlist containing Vars that reference the VALUES
+ * RTE.
*/
List *exprsLists = NIL;
int sublist_length = -1;
foreach(lc, selectStmt->valuesLists)
{
- List *sublist = (List *) lfirst(lc);
+ List *sublist = (List *) lfirst(lc);
/* Do basic expression transformation (same as a ROW() expr) */
sublist = transformExpressionList(pstate, sublist);
/*
- * All the sublists must be the same length, *after* transformation
- * (which might expand '*' into multiple items). The VALUES RTE
- * can't handle anything different.
+ * All the sublists must be the same length, *after*
+ * transformation (which might expand '*' into multiple items).
+ * The VALUES RTE can't handle anything different.
*/
if (sublist_length < 0)
{
@@ -747,8 +747,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
/*
* There mustn't have been any table references in the expressions,
- * else strange things would happen, like Cartesian products of
- * those tables with the VALUES list ...
+ * else strange things would happen, like Cartesian products of those
+ * tables with the VALUES list ...
*/
if (pstate->p_joinlist != NIL)
ereport(ERROR,
@@ -756,10 +756,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
errmsg("VALUES must not contain table references")));
/*
- * Another thing we can't currently support is NEW/OLD references
- * in rules --- seems we'd need something like SQL99's LATERAL
- * construct to ensure that the values would be available while
- * evaluating the VALUES RTE. This is a shame. FIXME
+ * Another thing we can't currently support is NEW/OLD references in
+ * rules --- seems we'd need something like SQL99's LATERAL construct
+ * to ensure that the values would be available while evaluating the
+ * VALUES RTE. This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) exprsLists, 0))
@@ -793,7 +793,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
* INSERT INTO foo VALUES(bar.*)
*
* The sublist is just computed directly as the Query's targetlist,
- * with no VALUES RTE. So it works just like SELECT without FROM.
+ * with no VALUES RTE. So it works just like SELECT without FROM.
*----------
*/
List *valuesLists = selectStmt->valuesLists;
@@ -818,7 +818,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
attnos = list_head(attrnos);
foreach(lc, exprList)
{
- Expr *expr = (Expr *) lfirst(lc);
+ Expr *expr = (Expr *) lfirst(lc);
ResTarget *col;
TargetEntry *tle;
@@ -836,10 +836,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
}
/*
- * If we have a RETURNING clause, we need to add the target relation
- * to the query namespace before processing it, so that Var references
- * in RETURNING will work. Also, remove any namespace entries added
- * in a sub-SELECT or VALUES list.
+ * If we have a RETURNING clause, we need to add the target relation to
+ * the query namespace before processing it, so that Var references in
+ * RETURNING will work. Also, remove any namespace entries added in a
+ * sub-SELECT or VALUES list.
*/
if (stmt->returningList)
{
@@ -875,7 +875,7 @@ static List *
transformInsertRow(ParseState *pstate, List *exprlist,
List *stmtcols, List *icolumns, List *attrnos)
{
- List *result;
+ List *result;
ListCell *lc;
ListCell *icols;
ListCell *attnos;
@@ -884,7 +884,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
@@ -905,7 +905,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
attnos = list_head(attrnos);
foreach(lc, exprlist)
{
- Expr *expr = (Expr *) lfirst(lc);
+ Expr *expr = (Expr *) lfirst(lc);
ResTarget *col;
col = (ResTarget *) lfirst(icols);
@@ -1292,10 +1292,10 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
TupleConstr *constr;
AclResult aclresult;
- bool including_defaults = false;
- bool including_constraints = false;
- bool including_indexes = false;
- ListCell *elem;
+ bool including_defaults = false;
+ bool including_constraints = false;
+ bool including_indexes = false;
+ ListCell *elem;
relation = heap_openrv(inhRelation->relation, AccessShareLock);
@@ -1318,32 +1318,33 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
constr = tupleDesc->constr;
foreach(elem, inhRelation->options)
+ {
+ int option = lfirst_int(elem);
+
+ switch (option)
{
- int option = lfirst_int(elem);
- switch (option)
- {
- case CREATE_TABLE_LIKE_INCLUDING_DEFAULTS:
- including_defaults = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS:
- including_defaults = false;
- break;
- case CREATE_TABLE_LIKE_INCLUDING_CONSTRAINTS:
- including_constraints = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_CONSTRAINTS:
- including_constraints = false;
- break;
- case CREATE_TABLE_LIKE_INCLUDING_INDEXES:
- including_indexes = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_INDEXES:
- including_indexes = false;
- break;
- default:
- elog(ERROR, "unrecognized CREATE TABLE LIKE option: %d", option);
- }
+ case CREATE_TABLE_LIKE_INCLUDING_DEFAULTS:
+ including_defaults = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS:
+ including_defaults = false;
+ break;
+ case CREATE_TABLE_LIKE_INCLUDING_CONSTRAINTS:
+ including_constraints = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_CONSTRAINTS:
+ including_constraints = false;
+ break;
+ case CREATE_TABLE_LIKE_INCLUDING_INDEXES:
+ including_indexes = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_INDEXES:
+ including_indexes = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized CREATE TABLE LIKE option: %d", option);
}
+ }
if (including_indexes)
elog(ERROR, "TODO");
@@ -1418,14 +1419,16 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
}
}
- if (including_constraints && tupleDesc->constr) {
- int ccnum;
+ if (including_constraints && tupleDesc->constr)
+ {
+ int ccnum;
AttrNumber *attmap = varattnos_map_schema(tupleDesc, cxt->columns);
- for(ccnum = 0; ccnum < tupleDesc->constr->num_check; ccnum++) {
- char *ccname = tupleDesc->constr->check[ccnum].ccname;
- char *ccbin = tupleDesc->constr->check[ccnum].ccbin;
- Node *ccbin_node = stringToNode(ccbin);
+ for (ccnum = 0; ccnum < tupleDesc->constr->num_check; ccnum++)
+ {
+ char *ccname = tupleDesc->constr->check[ccnum].ccname;
+ char *ccbin = tupleDesc->constr->check[ccnum].ccbin;
+ Node *ccbin_node = stringToNode(ccbin);
Constraint *n = makeNode(Constraint);
change_varattnos_of_a_node(ccbin_node, attmap);
@@ -1435,7 +1438,7 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
n->raw_expr = ccbin_node;
n->cooked_expr = NULL;
n->indexspace = NULL;
- cxt->ckconstraints = lappend(cxt->ckconstraints, (Node*)n);
+ cxt->ckconstraints = lappend(cxt->ckconstraints, (Node *) n);
}
}
@@ -1888,7 +1891,7 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in rule WHERE condition")));
+ errmsg("cannot use aggregate function in rule WHERE condition")));
/* save info about sublinks in where clause */
qry->hasSubLinks = pstate->p_hasSubLinks;
@@ -2175,8 +2178,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
RangeTblEntry *rte;
RangeTblRef *rtr;
ListCell *lc;
- ListCell *lc2;
- int i;
+ ListCell *lc2;
+ int i;
qry->commandType = CMD_SELECT;
@@ -2190,21 +2193,21 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
Assert(stmt->op == SETOP_NONE);
/*
- * For each row of VALUES, transform the raw expressions and gather
- * type information. This is also a handy place to reject DEFAULT
- * nodes, which the grammar allows for simplicity.
+ * For each row of VALUES, transform the raw expressions and gather type
+ * information. This is also a handy place to reject DEFAULT nodes, which
+ * the grammar allows for simplicity.
*/
foreach(lc, stmt->valuesLists)
{
- List *sublist = (List *) lfirst(lc);
+ List *sublist = (List *) lfirst(lc);
/* Do basic expression transformation (same as a ROW() expr) */
sublist = transformExpressionList(pstate, sublist);
/*
* All the sublists must be the same length, *after* transformation
- * (which might expand '*' into multiple items). The VALUES RTE
- * can't handle anything different.
+ * (which might expand '*' into multiple items). The VALUES RTE can't
+ * handle anything different.
*/
if (sublist_length < 0)
{
@@ -2226,7 +2229,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
i = 0;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
if (IsA(col, SetToDefault))
ereport(ERROR,
@@ -2238,8 +2241,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
}
/*
- * Now resolve the common types of the columns, and coerce everything
- * to those types.
+ * Now resolve the common types of the columns, and coerce everything to
+ * those types.
*/
for (i = 0; i < sublist_length; i++)
{
@@ -2249,13 +2252,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
newExprsLists = NIL;
foreach(lc, exprsLists)
{
- List *sublist = (List *) lfirst(lc);
- List *newsublist = NIL;
+ List *sublist = (List *) lfirst(lc);
+ List *newsublist = NIL;
i = 0;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
col = coerce_to_common_type(pstate, col, coltypes[i], "VALUES");
newsublist = lappend(newsublist, col);
@@ -2283,8 +2286,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
qry->targetList = expandRelAttrs(pstate, rte, rtr->rtindex, 0);
/*
- * The grammar allows attaching ORDER BY, LIMIT, and FOR UPDATE
- * to a VALUES, so cope.
+ * The grammar allows attaching ORDER BY, LIMIT, and FOR UPDATE to a
+ * VALUES, so cope.
*/
qry->sortClause = transformSortClause(pstate,
stmt->sortClause,
@@ -2299,7 +2302,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
if (stmt->lockingClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to VALUES")));
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to VALUES")));
/* handle any CREATE TABLE AS spec */
if (stmt->into)
@@ -2313,10 +2316,10 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
}
/*
- * There mustn't have been any table references in the expressions,
- * else strange things would happen, like Cartesian products of
- * those tables with the VALUES list. We have to check this after
- * parsing ORDER BY et al since those could insert more junk.
+ * There mustn't have been any table references in the expressions, else
+ * strange things would happen, like Cartesian products of those tables
+ * with the VALUES list. We have to check this after parsing ORDER BY et
+ * al since those could insert more junk.
*/
if (list_length(pstate->p_joinlist) != 1)
ereport(ERROR,
@@ -2324,10 +2327,10 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
errmsg("VALUES must not contain table references")));
/*
- * Another thing we can't currently support is NEW/OLD references
- * in rules --- seems we'd need something like SQL99's LATERAL
- * construct to ensure that the values would be available while
- * evaluating the VALUES RTE. This is a shame. FIXME
+ * Another thing we can't currently support is NEW/OLD references in rules
+ * --- seems we'd need something like SQL99's LATERAL construct to ensure
+ * that the values would be available while evaluating the VALUES RTE.
+ * This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) newExprsLists, 0))
@@ -2390,8 +2393,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* Find leftmost leaf SelectStmt; extract the one-time-only items from it
* and from the top-level node. (Most of the INTO options can be
- * transferred to the Query immediately, but intoColNames has to be
- * saved to apply below.)
+ * transferred to the Query immediately, but intoColNames has to be saved
+ * to apply below.)
*/
leftmostSelect = stmt->larg;
while (leftmostSelect && leftmostSelect->op != SETOP_NONE)
@@ -2865,9 +2868,9 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
qry->hasSubLinks = pstate->p_hasSubLinks;
/*
- * Top-level aggregates are simply disallowed in UPDATE, per spec.
- * (From an implementation point of view, this is forced because the
- * implicit ctid reference would otherwise be an ungrouped variable.)
+ * Top-level aggregates are simply disallowed in UPDATE, per spec. (From
+ * an implementation point of view, this is forced because the implicit
+ * ctid reference would otherwise be an ungrouped variable.)
*/
if (pstate->p_hasAggs)
ereport(ERROR,
@@ -2890,7 +2893,7 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
ResTarget *origTarget;
- int attrno;
+ int attrno;
if (tle->resjunk)
{
@@ -2916,7 +2919,7 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
origTarget->name,
- RelationGetRelationName(pstate->p_target_relation)),
+ RelationGetRelationName(pstate->p_target_relation)),
parser_errposition(pstate, origTarget->location)));
updateTargetListEntry(pstate, tle, origTarget->name,
@@ -2948,9 +2951,9 @@ transformReturningList(ParseState *pstate, List *returningList)
return NIL; /* nothing to do */
/*
- * We need to assign resnos starting at one in the RETURNING list.
- * Save and restore the main tlist's value of p_next_resno, just in
- * case someone looks at it later (probably won't happen).
+ * We need to assign resnos starting at one in the RETURNING list. Save
+ * and restore the main tlist's value of p_next_resno, just in case
+ * someone looks at it later (probably won't happen).
*/
save_next_resno = pstate->p_next_resno;
pstate->p_next_resno = 1;
@@ -2975,7 +2978,7 @@ transformReturningList(ParseState *pstate, List *returningList)
if (list_length(pstate->p_rtable) != length_rtable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("RETURNING may not contain references to other relations")));
+ errmsg("RETURNING may not contain references to other relations")));
/* mark column origins */
markTargetListOrigins(pstate, rlist);
@@ -3204,7 +3207,7 @@ static Query *
transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt)
{
Query *result = makeNode(Query);
- List *argtype_oids; /* argtype OIDs in a list */
+ List *argtype_oids; /* argtype OIDs in a list */
Oid *argtoids = NULL; /* and as an array */
int nargs;
List *queries;
@@ -3233,10 +3236,9 @@ transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt)
}
/*
- * Analyze the statement using these parameter types (any
- * parameters passed in from above us will not be visible to it),
- * allowing information about unknown parameters to be deduced
- * from context.
+ * Analyze the statement using these parameter types (any parameters
+ * passed in from above us will not be visible to it), allowing
+ * information about unknown parameters to be deduced from context.
*/
queries = parse_analyze_varparams((Node *) stmt->query,
pstate->p_sourcetext,
@@ -3250,8 +3252,8 @@ transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt)
elog(ERROR, "unexpected extra stuff in prepared statement");
/*
- * Check that all parameter types were determined, and convert the
- * array of OIDs into a list for storage.
+ * Check that all parameter types were determined, and convert the array
+ * of OIDs into a list for storage.
*/
argtype_oids = NIL;
for (i = 0; i < nargs; i++)
@@ -3360,7 +3362,7 @@ CheckSelectLocking(Query *qry)
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE is not allowed with HAVING clause")));
+ errmsg("SELECT FOR UPDATE/SHARE is not allowed with HAVING clause")));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -3500,15 +3502,15 @@ applyLockingClause(Query *qry, Index rtindex, bool forUpdate, bool noWait)
if ((rc = get_rowmark(qry, rtindex)) != NULL)
{
/*
- * If the same RTE is specified both FOR UPDATE and FOR SHARE,
- * treat it as FOR UPDATE. (Reasonable, since you can't take
- * both a shared and exclusive lock at the same time; it'll
- * end up being exclusive anyway.)
+ * If the same RTE is specified both FOR UPDATE and FOR SHARE, treat
+ * it as FOR UPDATE. (Reasonable, since you can't take both a shared
+ * and exclusive lock at the same time; it'll end up being exclusive
+ * anyway.)
*
- * We also consider that NOWAIT wins if it's specified both ways.
- * This is a bit more debatable but raising an error doesn't
- * seem helpful. (Consider for instance SELECT FOR UPDATE NOWAIT
- * from a view that internally contains a plain FOR UPDATE spec.)
+ * We also consider that NOWAIT wins if it's specified both ways. This
+ * is a bit more debatable but raising an error doesn't seem helpful.
+ * (Consider for instance SELECT FOR UPDATE NOWAIT from a view that
+ * internally contains a plain FOR UPDATE spec.)
*/
rc->forUpdate |= forUpdate;
rc->noWait |= noWait;
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 70b6946d5f..be584b514c 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.157 2006/08/14 23:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.158 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -544,8 +544,8 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* If a coldeflist was supplied, ensure it defines a legal set of names
* (no duplicates) and datatypes (no pseudo-types, for instance).
- * addRangeTableEntryForFunction looked up the type names but didn't
- * check them further than that.
+ * addRangeTableEntryForFunction looked up the type names but didn't check
+ * them further than that.
*/
if (r->coldeflist)
{
@@ -1338,10 +1338,10 @@ transformGroupClause(ParseState *pstate, List *grouplist,
ListCell *l;
/* Preprocess the grouping clause, lookup TLEs */
- foreach (l, grouplist)
+ foreach(l, grouplist)
{
TargetEntry *tle;
- Oid restype;
+ Oid restype;
tle = findTargetlistEntry(pstate, lfirst(l),
targetlist, GROUP_CLAUSE);
@@ -1359,21 +1359,20 @@ transformGroupClause(ParseState *pstate, List *grouplist,
}
/*
- * Now iterate through the ORDER BY clause. If we find a grouping
- * element that matches the ORDER BY element, append the grouping
- * element to the result set immediately. Otherwise, stop
- * iterating. The effect of this is to look for a prefix of the
- * ORDER BY list in the grouping clauses, and to move that prefix
- * to the front of the GROUP BY.
+ * Now iterate through the ORDER BY clause. If we find a grouping element
+ * that matches the ORDER BY element, append the grouping element to the
+ * result set immediately. Otherwise, stop iterating. The effect of this
+ * is to look for a prefix of the ORDER BY list in the grouping clauses,
+ * and to move that prefix to the front of the GROUP BY.
*/
- foreach (l, sortClause)
+ foreach(l, sortClause)
{
- SortClause *sc = (SortClause *) lfirst(l);
- ListCell *prev = NULL;
- ListCell *tl;
- bool found = false;
+ SortClause *sc = (SortClause *) lfirst(l);
+ ListCell *prev = NULL;
+ ListCell *tl;
+ bool found = false;
- foreach (tl, tle_list)
+ foreach(tl, tle_list)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
@@ -1399,17 +1398,17 @@ transformGroupClause(ParseState *pstate, List *grouplist,
}
/*
- * Now add any remaining elements of the GROUP BY list in the
- * order we received them.
+ * Now add any remaining elements of the GROUP BY list in the order we
+ * received them.
*
- * XXX: are there any additional criteria to consider when
- * ordering grouping clauses?
+ * XXX: are there any additional criteria to consider when ordering
+ * grouping clauses?
*/
foreach(l, tle_list)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
GroupClause *gc;
- Oid sort_op;
+ Oid sort_op;
/* avoid making duplicate grouplist entries */
if (targetIsInSortList(tle, result))
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 010c704c68..9cfe4391bd 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.143 2006/07/26 19:31:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.144 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,11 +163,11 @@ coerce_type(ParseState *pstate, Node *node,
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature
- * failure when the domain applies a typmod: existing input
- * routines follow implicit-coercion semantics for length checks,
- * which is not always what we want here. The needed check will
- * be applied properly inside coerce_to_domain().
+ * input routine, not domain_in(). This is to avoid premature failure
+ * when the domain applies a typmod: existing input routines follow
+ * implicit-coercion semantics for length checks, which is not always
+ * what we want here. The needed check will be applied properly
+ * inside coerce_to_domain().
*/
baseTypeMod = -1;
baseTypeId = getBaseTypeAndTypmod(targetTypeId, &baseTypeMod);
@@ -180,13 +180,13 @@ coerce_type(ParseState *pstate, Node *node,
newcon->constisnull = con->constisnull;
/*
- * We pass typmod -1 to the input routine, primarily because
- * existing input routines follow implicit-coercion semantics for
- * length checks, which is not always what we want here. Any
- * length constraint will be applied later by our caller.
+ * We pass typmod -1 to the input routine, primarily because existing
+ * input routines follow implicit-coercion semantics for length
+ * checks, which is not always what we want here. Any length
+ * constraint will be applied later by our caller.
*
- * We assume here that UNKNOWN's internal representation is the
- * same as CSTRING.
+ * We assume here that UNKNOWN's internal representation is the same
+ * as CSTRING.
*/
if (!con->constisnull)
newcon->constvalue = stringTypeDatum(targetType,
@@ -886,8 +886,8 @@ coerce_to_bigint(ParseState *pstate, Node *node,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type bigint, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type bigint, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 0812c3d441..7c72ae9e22 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.197 2006/08/12 20:05:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.198 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ static Node *transformIndirection(ParseState *pstate, Node *basenode,
static Node *typecast_expression(ParseState *pstate, Node *expr,
TypeName *typename);
static Node *make_row_comparison_op(ParseState *pstate, List *opname,
- List *largs, List *rargs, int location);
+ List *largs, List *rargs, int location);
static Node *make_row_distinct_op(ParseState *pstate, List *opname,
RowExpr *lrow, RowExpr *rrow, int location);
static Expr *make_distinct_op(ParseState *pstate, List *opname,
@@ -772,8 +772,8 @@ static Node *
transformAExprOf(ParseState *pstate, A_Expr *a)
{
/*
- * Checking an expression for match to a list of type names.
- * Will result in a boolean constant node.
+ * Checking an expression for match to a list of type names. Will result
+ * in a boolean constant node.
*/
Node *lexpr = transformExpr(pstate, a->lexpr);
ListCell *telem;
@@ -791,7 +791,7 @@ transformAExprOf(ParseState *pstate, A_Expr *a)
}
/*
- * We have two forms: equals or not equals. Flip the sense of the result
+ * We have two forms: equals or not equals. Flip the sense of the result
* for not equals.
*/
if (strcmp(strVal(linitial(a->name)), "<>") == 0)
@@ -820,10 +820,10 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
useOr = true;
/*
- * We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is
- * only possible if the inputs are all scalars (no RowExprs) and there
- * is a suitable array type available. If not, we fall back to a
- * boolean condition tree with multiple copies of the lefthand expression.
+ * We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
+ * possible if the inputs are all scalars (no RowExprs) and there is a
+ * suitable array type available. If not, we fall back to a boolean
+ * condition tree with multiple copies of the lefthand expression.
*
* First step: transform all the inputs, and detect whether any are
* RowExprs.
@@ -834,7 +834,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
rexprs = NIL;
foreach(l, (List *) a->rexpr)
{
- Node *rexpr = transformExpr(pstate, lfirst(l));
+ Node *rexpr = transformExpr(pstate, lfirst(l));
haveRowExpr |= (rexpr && IsA(rexpr, RowExpr));
rexprs = lappend(rexprs, rexpr);
@@ -842,10 +842,10 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
}
/*
- * If not forced by presence of RowExpr, try to resolve a common
- * scalar type for all the expressions, and see if it has an array type.
- * (But if there's only one righthand expression, we may as well just
- * fall through and generate a simple = comparison.)
+ * If not forced by presence of RowExpr, try to resolve a common scalar
+ * type for all the expressions, and see if it has an array type. (But if
+ * there's only one righthand expression, we may as well just fall through
+ * and generate a simple = comparison.)
*/
if (!haveRowExpr && list_length(rexprs) != 1)
{
@@ -853,9 +853,9 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
Oid array_type;
/*
- * Select a common type for the array elements. Note that since
- * the LHS' type is first in the list, it will be preferred when
- * there is doubt (eg, when all the RHS items are unknown literals).
+ * Select a common type for the array elements. Note that since the
+ * LHS' type is first in the list, it will be preferred when there is
+ * doubt (eg, when all the RHS items are unknown literals).
*/
scalar_type = select_common_type(typeids, "IN");
@@ -864,8 +864,8 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
if (array_type != InvalidOid)
{
/*
- * OK: coerce all the right-hand inputs to the common type
- * and build an ArrayExpr for them.
+ * OK: coerce all the right-hand inputs to the common type and
+ * build an ArrayExpr for them.
*/
List *aexprs;
ArrayExpr *newa;
@@ -910,11 +910,11 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
!IsA(rexpr, RowExpr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("arguments of row IN must all be row expressions"),
+ errmsg("arguments of row IN must all be row expressions"),
parser_errposition(pstate, a->location)));
cmp = make_row_comparison_op(pstate,
a->name,
- (List *) copyObject(((RowExpr *) lexpr)->args),
+ (List *) copyObject(((RowExpr *) lexpr)->args),
((RowExpr *) rexpr)->args,
a->location);
}
@@ -1111,8 +1111,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
if (sublink->subLinkType == EXISTS_SUBLINK)
{
/*
- * EXISTS needs no test expression or combining operator.
- * These fields should be null already, but make sure.
+ * EXISTS needs no test expression or combining operator. These fields
+ * should be null already, but make sure.
*/
sublink->testexpr = NULL;
sublink->operName = NIL;
@@ -1140,8 +1140,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
}
/*
- * EXPR and ARRAY need no test expression or combining operator.
- * These fields should be null already, but make sure.
+ * EXPR and ARRAY need no test expression or combining operator. These
+ * fields should be null already, but make sure.
*/
sublink->testexpr = NULL;
sublink->operName = NIL;
@@ -1164,8 +1164,8 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
left_list = list_make1(lefthand);
/*
- * Build a list of PARAM_SUBLINK nodes representing the
- * output columns of the subquery.
+ * Build a list of PARAM_SUBLINK nodes representing the output columns
+ * of the subquery.
*/
right_list = NIL;
foreach(l, qtree->targetList)
@@ -1185,9 +1185,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
}
/*
- * We could rely on make_row_comparison_op to complain if the
- * list lengths differ, but we prefer to generate a more specific
- * error message.
+ * We could rely on make_row_comparison_op to complain if the list
+ * lengths differ, but we prefer to generate a more specific error
+ * message.
*/
if (list_length(left_list) < list_length(right_list))
ereport(ERROR,
@@ -1968,8 +1968,8 @@ make_row_comparison_op(ParseState *pstate, List *opname,
parser_errposition(pstate, location)));
/*
- * We can't compare zero-length rows because there is no principled
- * basis for figuring out what the operator is.
+ * We can't compare zero-length rows because there is no principled basis
+ * for figuring out what the operator is.
*/
if (nopers == 0)
ereport(ERROR,
@@ -1978,8 +1978,8 @@ make_row_comparison_op(ParseState *pstate, List *opname,
parser_errposition(pstate, location)));
/*
- * Identify all the pairwise operators, using make_op so that
- * behavior is the same as in the simple scalar case.
+ * Identify all the pairwise operators, using make_op so that behavior is
+ * the same as in the simple scalar case.
*/
opexprs = NIL;
forboth(l, largs, r, rargs)
@@ -1999,9 +1999,9 @@ make_row_comparison_op(ParseState *pstate, List *opname,
if (cmp->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("row comparison operator must yield type boolean, "
- "not type %s",
- format_type_be(cmp->opresulttype)),
+ errmsg("row comparison operator must yield type boolean, "
+ "not type %s",
+ format_type_be(cmp->opresulttype)),
parser_errposition(pstate, location)));
if (expression_returns_set((Node *) cmp))
ereport(ERROR,
@@ -2012,16 +2012,16 @@ make_row_comparison_op(ParseState *pstate, List *opname,
}
/*
- * If rows are length 1, just return the single operator. In this
- * case we don't insist on identifying btree semantics for the operator
- * (but we still require it to return boolean).
+ * If rows are length 1, just return the single operator. In this case we
+ * don't insist on identifying btree semantics for the operator (but we
+ * still require it to return boolean).
*/
if (nopers == 1)
return (Node *) linitial(opexprs);
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opclasses containing
+ * apply to this set of operators. We look for btree opclasses containing
* the operators, and see which interpretations (strategy numbers) exist
* for each operator.
*/
@@ -2031,14 +2031,15 @@ make_row_comparison_op(ParseState *pstate, List *opname,
i = 0;
foreach(l, opexprs)
{
- Bitmapset *this_strats;
+ Bitmapset *this_strats;
ListCell *j;
get_op_btree_interpretation(((OpExpr *) lfirst(l))->opno,
&opclass_lists[i], &opstrat_lists[i]);
+
/*
- * convert strategy number list to a Bitmapset to make the intersection
- * calculation easy.
+ * convert strategy number list to a Bitmapset to make the
+ * intersection calculation easy.
*/
this_strats = NULL;
foreach(j, opstrat_lists[i])
@@ -2074,21 +2075,21 @@ make_row_comparison_op(ParseState *pstate, List *opname,
/*
* Prefer the interpretation with the most default opclasses.
*/
- int best_defaults = 0;
- bool multiple_best = false;
- int this_rctype;
+ int best_defaults = 0;
+ bool multiple_best = false;
+ int this_rctype;
rctype = 0; /* keep compiler quiet */
while ((this_rctype = bms_first_member(strats)) >= 0)
{
- int ndefaults = 0;
+ int ndefaults = 0;
for (i = 0; i < nopers; i++)
{
forboth(l, opclass_lists[i], r, opstrat_lists[i])
{
- Oid opclass = lfirst_oid(l);
- int opstrat = lfirst_int(r);
+ Oid opclass = lfirst_oid(l);
+ int opstrat = lfirst_int(r);
if (opstrat == this_rctype &&
opclass_is_default(opclass))
@@ -2116,12 +2117,12 @@ make_row_comparison_op(ParseState *pstate, List *opname,
}
/*
- * For = and <> cases, we just combine the pairwise operators with
- * AND or OR respectively.
+ * For = and <> cases, we just combine the pairwise operators with AND or
+ * OR respectively.
*
* Note: this is presently the only place where the parser generates
- * BoolExpr with more than two arguments. Should be OK since the
- * rest of the system thinks BoolExpr is N-argument anyway.
+ * BoolExpr with more than two arguments. Should be OK since the rest of
+ * the system thinks BoolExpr is N-argument anyway.
*/
if (rctype == ROWCOMPARE_EQ)
return (Node *) makeBoolExpr(AND_EXPR, opexprs);
@@ -2129,20 +2130,20 @@ make_row_comparison_op(ParseState *pstate, List *opname,
return (Node *) makeBoolExpr(OR_EXPR, opexprs);
/*
- * Otherwise we need to determine exactly which opclass to associate
- * with each operator.
+ * Otherwise we need to determine exactly which opclass to associate with
+ * each operator.
*/
opclasses = NIL;
for (i = 0; i < nopers; i++)
{
- Oid best_opclass = 0;
- int ndefault = 0;
- int nmatch = 0;
+ Oid best_opclass = 0;
+ int ndefault = 0;
+ int nmatch = 0;
forboth(l, opclass_lists[i], r, opstrat_lists[i])
{
- Oid opclass = lfirst_oid(l);
- int opstrat = lfirst_int(r);
+ Oid opclass = lfirst_oid(l);
+ int opstrat = lfirst_int(r);
if (opstrat == rctype)
{
@@ -2161,7 +2162,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("could not determine interpretation of row comparison operator %s",
strVal(llast(opname))),
- errdetail("There are multiple equally-plausible candidates."),
+ errdetail("There are multiple equally-plausible candidates."),
parser_errposition(pstate, location)));
}
@@ -2251,7 +2252,7 @@ make_distinct_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree,
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean"),
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean"),
parser_errposition(pstate, location)));
/*
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index b1b53164f8..a61099766b 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.189 2006/07/27 19:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.190 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@
static Node *ParseComplexProjection(ParseState *pstate, char *funcname,
Node *first_arg, int location);
static void unknown_attribute(ParseState *pstate, Node *relref, char *attname,
- int location);
+ int location);
/*
@@ -265,13 +265,13 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("%s(*) must be used to call a parameterless aggregate function",
- NameListToString(funcname)),
+ errmsg("%s(*) must be used to call a parameterless aggregate function",
+ NameListToString(funcname)),
parser_errposition(pstate, location)));
/* parse_agg.c does additional aggregate-specific processing */
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 9c1b58704e..8cad187550 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.94 2006/08/02 01:59:47 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.95 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,15 +61,15 @@ make_parsestate(ParseState *parentParseState)
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
int
parser_errposition(ParseState *pstate, int location)
{
- int pos;
+ int pos;
/* No-op if location was not provided */
if (location < 0)
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index be53472d04..d8ac0d0ff8 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.89 2006/07/14 14:52:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.90 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,7 +29,7 @@
#include "utils/typcache.h"
-static Oid binary_oper_exact(List *opname, Oid arg1, Oid arg2);
+static Oid binary_oper_exact(List *opname, Oid arg1, Oid arg2);
static FuncDetailCode oper_select_candidate(int nargs,
Oid *input_typeids,
FuncCandidateList candidates,
@@ -37,8 +37,8 @@ static FuncDetailCode oper_select_candidate(int nargs,
static const char *op_signature_string(List *op, char oprkind,
Oid arg1, Oid arg2);
static void op_error(ParseState *pstate, List *op, char oprkind,
- Oid arg1, Oid arg2,
- FuncDetailCode fdresult, int location);
+ Oid arg1, Oid arg2,
+ FuncDetailCode fdresult, int location);
static Expr *make_op_expr(ParseState *pstate, Operator op,
Node *ltree, Node *rtree,
Oid ltypeId, Oid rtypeId);
@@ -701,10 +701,9 @@ left_oper(ParseState *pstate, List *op, Oid arg, bool noError, int location)
if (clist != NULL)
{
/*
- * The returned list has args in the form (0, oprright).
- * Move the useful data into args[0] to keep oper_select_candidate
- * simple. XXX we are assuming here that we may scribble on the
- * list!
+ * The returned list has args in the form (0, oprright). Move the
+ * useful data into args[0] to keep oper_select_candidate simple.
+ * XXX we are assuming here that we may scribble on the list!
*/
FuncCandidateList clisti;
@@ -872,7 +871,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (!OidIsValid(rtypeId))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires array on right side"),
+ errmsg("op ANY/ALL (array) requires array on right side"),
parser_errposition(pstate, location)));
}
@@ -902,12 +901,12 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (rettype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator to yield boolean"),
+ errmsg("op ANY/ALL (array) requires operator to yield boolean"),
parser_errposition(pstate, location)));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator not to return a set"),
+ errmsg("op ANY/ALL (array) requires operator not to return a set"),
parser_errposition(pstate, location)));
/*
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index e9896be634..75d5a50702 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.124 2006/08/02 01:59:47 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.125 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,7 +48,7 @@ static void expandTupleDesc(TupleDesc tupdesc, Alias *eref,
List **colnames, List **colvars);
static int specialAttNum(const char *attname);
static void warnAutoRange(ParseState *pstate, RangeVar *relation,
- int location);
+ int location);
/*
@@ -970,7 +970,7 @@ addRangeTableEntryForValues(ParseState *pstate,
numaliases = list_length(eref->colnames);
while (numaliases < numcolumns)
{
- char attrname[64];
+ char attrname[64];
numaliases++;
snprintf(attrname, sizeof(attrname), "column%d", numaliases);
@@ -1146,6 +1146,7 @@ addImplicitRTE(ParseState *pstate, RangeVar *relation, int location)
/* issue warning or error as needed */
warnAutoRange(pstate, relation, location);
+
/*
* Note that we set inFromCl true, so that the RTE will be listed
* explicitly if the parsetree is ever decompiled by ruleutils.c. This
@@ -1311,7 +1312,7 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
varattno = 0;
foreach(lc, (List *) linitial(rte->values_lists))
{
- Node *col = (Node *) lfirst(lc);
+ Node *col = (Node *) lfirst(lc);
varattno++;
if (colnames)
@@ -1676,13 +1677,13 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
case RTE_VALUES:
{
/* Values RTE --- get type info from first sublist */
- List *collist = (List *) linitial(rte->values_lists);
+ List *collist = (List *) linitial(rte->values_lists);
Node *col;
if (attnum < 1 || attnum > list_length(collist))
elog(ERROR, "values list %s does not have attribute %d",
rte->eref->aliasname, attnum);
- col = (Node *) list_nth(collist, attnum-1);
+ col = (Node *) list_nth(collist, attnum - 1);
*vartype = exprType(col);
*vartypmod = exprTypmod(col);
}
@@ -1963,15 +1964,15 @@ warnAutoRange(ParseState *pstate, RangeVar *relation, int location)
/*
* Check to see if there are any potential matches in the query's
- * rangetable. This affects the message we provide.
+ * rangetable. This affects the message we provide.
*/
rte = searchRangeTable(pstate, relation);
/*
- * If we found a match that has an alias and the alias is visible in
- * the namespace, then the problem is probably use of the relation's
- * real name instead of its alias, ie "SELECT foo.* FROM foo f".
- * This mistake is common enough to justify a specific hint.
+ * If we found a match that has an alias and the alias is visible in the
+ * namespace, then the problem is probably use of the relation's real name
+ * instead of its alias, ie "SELECT foo.* FROM foo f". This mistake is
+ * common enough to justify a specific hint.
*
* If we found a match that doesn't meet those criteria, assume the
* problem is illegal use of a relation outside its scope, as in the
@@ -1988,11 +1989,11 @@ warnAutoRange(ParseState *pstate, RangeVar *relation, int location)
if (rte)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("invalid reference to FROM-clause entry for table \"%s\"",
- relation->relname),
+ errmsg("invalid reference to FROM-clause entry for table \"%s\"",
+ relation->relname),
(badAlias ?
- errhint("Perhaps you meant to reference the table alias \"%s\".",
- badAlias) :
+ errhint("Perhaps you meant to reference the table alias \"%s\".",
+ badAlias) :
errhint("There is an entry for table \"%s\", but it cannot be referenced from this part of the query.",
rte->eref->aliasname)),
parser_errposition(pstate, location)));
@@ -2000,8 +2001,8 @@ warnAutoRange(ParseState *pstate, RangeVar *relation, int location)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
(pstate->parentParseState ?
- errmsg("missing FROM-clause entry in subquery for table \"%s\"",
- relation->relname) :
+ errmsg("missing FROM-clause entry in subquery for table \"%s\"",
+ relation->relname) :
errmsg("missing FROM-clause entry for table \"%s\"",
relation->relname)),
parser_errposition(pstate, location)));
@@ -2017,8 +2018,8 @@ warnAutoRange(ParseState *pstate, RangeVar *relation, int location)
errmsg("adding missing FROM-clause entry for table \"%s\"",
relation->relname)),
(badAlias ?
- errhint("Perhaps you meant to reference the table alias \"%s\".",
- badAlias) :
+ errhint("Perhaps you meant to reference the table alias \"%s\".",
+ badAlias) :
(rte ?
errhint("There is an entry for table \"%s\", but it cannot be referenced from this part of the query.",
rte->eref->aliasname) : 0)),
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 961e320543..bb4b065eeb 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.148 2006/08/14 23:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.149 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -161,7 +161,7 @@ transformTargetList(ParseState *pstate, List *targetlist)
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist)
@@ -436,7 +436,7 @@ transformAssignedExpr(ParseState *pstate,
colname,
format_type_be(attrtype),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
}
@@ -446,7 +446,7 @@ transformAssignedExpr(ParseState *pstate,
/*
* updateTargetListEntry()
- * This is used in UPDATE statements only. It prepares an UPDATE
+ * This is used in UPDATE statements only. It prepares an UPDATE
* TargetEntry for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
@@ -687,7 +687,7 @@ transformAssignmentIndirection(ParseState *pstate,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
else
ereport(ERROR,
@@ -697,7 +697,7 @@ transformAssignmentIndirection(ParseState *pstate,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
}
@@ -761,9 +761,9 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos)
if (attrno == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name,
- RelationGetRelationName(pstate->p_target_relation)),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name,
+ RelationGetRelationName(pstate->p_target_relation)),
parser_errposition(pstate, col->location)));
/*
@@ -825,8 +825,8 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
*
* (e.g., SELECT * FROM emp, dept)
*
- * Since the grammar only accepts bare '*' at top level of SELECT,
- * we need not handle the targetlist==false case here.
+ * Since the grammar only accepts bare '*' at top level of SELECT, we
+ * need not handle the targetlist==false case here.
*/
Assert(targetlist);
@@ -898,7 +898,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
return expandRelAttrs(pstate, rte, rtindex, sublevels_up);
else
{
- List *vars;
+ List *vars;
expandRTE(rte, rtindex, sublevels_up, false,
NULL, &vars);
@@ -1114,8 +1114,8 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup)
/*
* This case should not occur: a column of a table or values list
- * shouldn't have type RECORD. Fall through and fail
- * (most likely) at the bottom.
+ * shouldn't have type RECORD. Fall through and fail (most
+ * likely) at the bottom.
*/
break;
case RTE_SUBQUERY:
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 45666d8880..93c7db6b52 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.84 2006/09/25 15:17:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.85 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -210,7 +210,7 @@ TypeNameListToString(List *typenames)
initStringInfo(&string);
foreach(l, typenames)
{
- TypeName *typename = (TypeName *) lfirst(l);
+ TypeName *typename = (TypeName *) lfirst(l);
Assert(IsA(typename, TypeName));
if (l != list_head(typenames))
@@ -358,7 +358,7 @@ typeTypeRelid(Type typ)
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index 7f1c926f67..674d4d497c 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.67 2006/07/15 03:35:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.68 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ raw_parser(const char *str)
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
@@ -91,15 +91,16 @@ filtered_base_yylex(void)
switch (cur_token)
{
case WITH:
+
/*
* WITH CASCADED, LOCAL, or CHECK must be reduced to one token
*
- * XXX an alternative way is to recognize just WITH_TIME and
- * put the ugliness into the datetime datatype productions
- * instead of WITH CHECK OPTION. However that requires promoting
- * WITH to a fully reserved word. If we ever have to do that
- * anyway (perhaps for SQL99 recursive queries), come back and
- * simplify this code.
+ * XXX an alternative way is to recognize just WITH_TIME and put
+ * the ugliness into the datetime datatype productions instead of
+ * WITH CHECK OPTION. However that requires promoting WITH to a
+ * fully reserved word. If we ever have to do that anyway
+ * (perhaps for SQL99 recursive queries), come back and simplify
+ * this code.
*/
lookahead_token = base_yylex();
switch (lookahead_token)
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index 1b69e32d8e..47bb0d9cb8 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.12 2006/07/29 19:55:18 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.13 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -123,7 +123,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what)
(errmsg_internal("Failed to reset socket waiting event: %i", (int) GetLastError())));
/*
- * make sure we don't multiplex this kernel event object with a different
+ * make sure we don't multiplex this kernel event object with a different
* socket from a previous call
*/
diff --git a/src/backend/port/win32/timer.c b/src/backend/port/win32/timer.c
index 384ca9d451..5a764053f8 100644
--- a/src/backend/port/win32/timer.c
+++ b/src/backend/port/win32/timer.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.12 2006/08/09 21:18:13 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.13 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ pg_timer_thread(LPVOID param)
{
/* WaitForSingleObjectEx() uses milliseconds, round up */
waittime = (timerCommArea.value.it_value.tv_usec + 999) / 1000 +
- timerCommArea.value.it_value.tv_sec * 1000;
+ timerCommArea.value.it_value.tv_sec * 1000;
}
ResetEvent(timerCommArea.event);
LeaveCriticalSection(&timerCommArea.crit_sec);
@@ -85,7 +85,7 @@ pg_timer_thread(LPVOID param)
* to handle the timer setting and notification upon timeout.
*/
int
-setitimer(int which, const struct itimerval *value, struct itimerval *ovalue)
+setitimer(int which, const struct itimerval * value, struct itimerval * ovalue)
{
Assert(value != NULL);
Assert(value->it_interval.tv_sec == 0 && value->it_interval.tv_usec == 0);
diff --git a/src/backend/port/win32_sema.c b/src/backend/port/win32_sema.c
index 3af18df634..0d99a9a0f5 100644
--- a/src/backend/port/win32_sema.c
+++ b/src/backend/port/win32_sema.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32_sema.c,v 1.2 2006/07/16 02:44:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32_sema.c,v 1.3 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,9 +32,10 @@ static void ReleaseSemaphores(int code, Datum arg);
* so the semaphores are automatically freed when the last referencing
* process exits.
*/
-void PGReserveSemaphores(int maxSemas, int port)
+void
+PGReserveSemaphores(int maxSemas, int port)
{
- mySemSet = (HANDLE *)malloc(maxSemas * sizeof(HANDLE));
+ mySemSet = (HANDLE *) malloc(maxSemas * sizeof(HANDLE));
if (mySemSet == NULL)
elog(PANIC, "out of memory");
numSems = 0;
@@ -63,7 +64,8 @@ ReleaseSemaphores(int code, Datum arg)
*
* Initialize a PGSemaphore structure to represent a sema with count 1
*/
-void PGSemaphoreCreate(PGSemaphore sema)
+void
+PGSemaphoreCreate(PGSemaphore sema)
{
HANDLE cur_handle;
SECURITY_ATTRIBUTES sec_attrs;
@@ -89,7 +91,7 @@ void PGSemaphoreCreate(PGSemaphore sema)
}
else
ereport(PANIC,
- (errmsg("could not create semaphore: error code %d", (int)GetLastError())));
+ (errmsg("could not create semaphore: error code %d", (int) GetLastError())));
}
/*
@@ -97,7 +99,8 @@ void PGSemaphoreCreate(PGSemaphore sema)
*
* Reset a previously-initialized PGSemaphore to have count 0
*/
-void PGSemaphoreReset(PGSemaphore sema)
+void
+PGSemaphoreReset(PGSemaphore sema)
{
/*
* There's no direct API for this in Win32, so we have to ratchet the
@@ -112,7 +115,8 @@ void PGSemaphoreReset(PGSemaphore sema)
* Lock a semaphore (decrement count), blocking if count would be < 0.
* Serve the interrupt if interruptOK is true.
*/
-void PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
+void
+PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
{
DWORD ret;
HANDLE wh[2];
@@ -156,7 +160,8 @@ void PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
*
* Unlock a semaphore (increment count)
*/
-void PGSemaphoreUnlock(PGSemaphore sema)
+void
+PGSemaphoreUnlock(PGSemaphore sema)
{
if (!ReleaseSemaphore(*sema, 1, NULL))
ereport(FATAL,
@@ -168,7 +173,8 @@ void PGSemaphoreUnlock(PGSemaphore sema)
*
* Lock a semaphore only if able to do so without blocking
*/
-bool PGSemaphoreTryLock(PGSemaphore sema)
+bool
+PGSemaphoreTryLock(PGSemaphore sema)
{
DWORD ret;
@@ -189,7 +195,7 @@ bool PGSemaphoreTryLock(PGSemaphore sema)
/* Otherwise we are in trouble */
ereport(FATAL,
(errmsg("could not try-lock semaphore: error code %d", (int) GetLastError())));
-
+
/* keep compiler quiet */
return false;
}
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 25a19b2b24..2ba12c2f9e 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.26 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.27 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -269,10 +269,10 @@ AutoVacMain(int argc, char *argv[])
BaseInit();
/*
- * Create a per-backend PGPROC struct in shared memory, except in
- * the EXEC_BACKEND case where this was done in SubPostmasterMain.
- * We must do this before we can use LWLocks (and in the EXEC_BACKEND
- * case we already had to do some stuff with LWLocks).
+ * Create a per-backend PGPROC struct in shared memory, except in the
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifndef EXEC_BACKEND
InitProcess();
@@ -305,9 +305,9 @@ AutoVacMain(int argc, char *argv[])
PG_SETMASK(&UnBlockSig);
/*
- * Force zero_damaged_pages OFF in the autovac process, even if it is
- * set in postgresql.conf. We don't really want such a dangerous option
- * being applied non-interactively.
+ * Force zero_damaged_pages OFF in the autovac process, even if it is set
+ * in postgresql.conf. We don't really want such a dangerous option being
+ * applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
@@ -387,8 +387,8 @@ AutoVacMain(int argc, char *argv[])
}
/*
- * Otherwise, skip a database with no pgstat entry; it means it hasn't
- * seen any activity.
+ * Otherwise, skip a database with no pgstat entry; it means it
+ * hasn't seen any activity.
*/
tmp->entry = pgstat_fetch_stat_dbentry(tmp->oid);
if (!tmp->entry)
@@ -409,7 +409,7 @@ AutoVacMain(int argc, char *argv[])
* backend signalled the postmaster. Pick up the database with the
* greatest age, and apply a database-wide vacuum on it.
*/
- int32 oldest = 0;
+ int32 oldest = 0;
whole_db = true;
foreach(cell, dblist)
@@ -535,7 +535,7 @@ process_whole_db(void)
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
- /* functions in indexes may want a snapshot set */
+ /* functions in indexes may want a snapshot set */
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
@@ -593,13 +593,13 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
- /* functions in indexes may want a snapshot set */
+ /* functions in indexes may want a snapshot set */
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Clean up any dead statistics collector entries for this DB.
- * We always want to do this exactly once per DB-processing cycle,
- * even if we find nothing worth vacuuming in the database.
+ * Clean up any dead statistics collector entries for this DB. We always
+ * want to do this exactly once per DB-processing cycle, even if we find
+ * nothing worth vacuuming in the database.
*/
pgstat_vacuum_tabstat();
@@ -948,7 +948,7 @@ autovacuum_do_vac_analyze(List *relids, bool dovacuum, bool doanalyze,
/*
* autovac_report_activity
- * Report to pgstat what autovacuum is doing
+ * Report to pgstat what autovacuum is doing
*
* We send a SQL string corresponding to what the user would see if the
* equivalent command was to be issued manually.
@@ -989,13 +989,13 @@ autovac_report_activity(VacuumStmt *vacstmt, List *relids)
char *nspname = get_namespace_name(get_rel_namespace(relid));
/*
- * Paranoia is appropriate here in case relation was recently
- * dropped --- the lsyscache routines we just invoked will return
- * NULL rather than failing.
+ * Paranoia is appropriate here in case relation was recently dropped
+ * --- the lsyscache routines we just invoked will return NULL rather
+ * than failing.
*/
if (relname && nspname)
{
- int len = strlen(activity);
+ int len = strlen(activity);
snprintf(activity + len, MAX_AUTOVAC_ACTIV_LEN - len,
" %s.%s", nspname, relname);
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 679b8a5c72..69a6734bc5 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.27 2006/08/17 23:04:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.28 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -207,14 +207,13 @@ BackgroundWriterMain(void)
#endif
/*
- * Initialize so that first time-driven event happens at the correct
- * time.
+ * Initialize so that first time-driven event happens at the correct time.
*/
last_checkpoint_time = last_xlog_switch_time = time(NULL);
/*
- * Create a resource owner to keep track of our resources (currently
- * only buffer pins).
+ * Create a resource owner to keep track of our resources (currently only
+ * buffer pins).
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Writer");
@@ -406,17 +405,17 @@ BackgroundWriterMain(void)
BgBufferSync();
/*
- * Check for archive_timeout, if so, switch xlog files. First
- * we do a quick check using possibly-stale local state.
+ * Check for archive_timeout, if so, switch xlog files. First we do a
+ * quick check using possibly-stale local state.
*/
if (XLogArchiveTimeout > 0 &&
(int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
{
/*
- * Update local state ... note that last_xlog_switch_time is
- * the last time a switch was performed *or requested*.
+ * Update local state ... note that last_xlog_switch_time is the
+ * last time a switch was performed *or requested*.
*/
- time_t last_time = GetLastSegSwitchTime();
+ time_t last_time = GetLastSegSwitchTime();
last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
@@ -427,7 +426,7 @@ BackgroundWriterMain(void)
/* Now we can do the real check */
if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
{
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
/* OK, it's time to switch */
switchpoint = RequestXLogSwitch();
@@ -440,9 +439,10 @@ BackgroundWriterMain(void)
ereport(DEBUG1,
(errmsg("xlog switch forced (archive_timeout=%d)",
XLogArchiveTimeout)));
+
/*
- * Update state in any case, so we don't retry constantly
- * when the system is idle.
+ * Update state in any case, so we don't retry constantly when
+ * the system is idle.
*/
last_xlog_switch_time = now;
}
@@ -463,9 +463,9 @@ BackgroundWriterMain(void)
(bgwriter_lru_percent > 0.0 && bgwriter_lru_maxpages > 0))
udelay = BgWriterDelay * 1000L;
else if (XLogArchiveTimeout > 0)
- udelay = 1000000L; /* One second */
+ udelay = 1000000L; /* One second */
else
- udelay = 10000000L; /* Ten seconds */
+ udelay = 10000000L; /* Ten seconds */
while (udelay > 999999L)
{
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 715de0071c..db4a57b017 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.138 2006/08/28 19:38:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.139 2006/10/04 00:29:56 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -136,8 +136,8 @@ static TransactionId pgStatLocalStatusXact = InvalidTransactionId;
static PgBackendStatus *localBackendStatusTable = NULL;
static int localNumBackends = 0;
-static volatile bool need_exit = false;
-static volatile bool need_statwrite = false;
+static volatile bool need_exit = false;
+static volatile bool need_statwrite = false;
/* ----------
@@ -199,13 +199,13 @@ pgstat_init(void)
char test_byte;
int sel_res;
int tries = 0;
-
+
#define TESTBYTEVAL ((char) 199)
/*
* Force start of collector daemon if something to collect. Note that
- * pgstat_collect_querystring is now an independent facility that does
- * not require the collector daemon.
+ * pgstat_collect_querystring is now an independent facility that does not
+ * require the collector daemon.
*/
if (pgstat_collect_tuplelevel ||
pgstat_collect_blocklevel)
@@ -262,8 +262,8 @@ pgstat_init(void)
if (++tries > 1)
ereport(LOG,
- (errmsg("trying another address for the statistics collector")));
-
+ (errmsg("trying another address for the statistics collector")));
+
/*
* Create the socket.
*/
@@ -479,7 +479,6 @@ pgstat_forkexec(void)
return postmaster_forkexec(ac, av);
}
-
#endif /* EXEC_BACKEND */
@@ -823,7 +822,7 @@ pgstat_drop_relation(Oid relid)
msg.m_tableid[0] = relid;
msg.m_nentries = 1;
- len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) + sizeof(Oid);
+ len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) +sizeof(Oid);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TABPURGE);
msg.m_databaseid = MyDatabaseId;
@@ -900,7 +899,7 @@ pgstat_report_vacuum(Oid tableoid, bool shared,
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
msg.m_analyze = analyze;
- msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
msg.m_vacuumtime = GetCurrentTimestamp();
msg.m_tuples = tuples;
pgstat_send(&msg, sizeof(msg));
@@ -925,7 +924,7 @@ pgstat_report_analyze(Oid tableoid, bool shared, PgStat_Counter livetuples,
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_ANALYZE);
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
- msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
msg.m_analyzetime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
@@ -1079,8 +1078,8 @@ pgstat_initstats(PgStat_Info *stats, Relation rel)
void
pgstat_count_xact_commit(void)
{
- if (!pgstat_collect_tuplelevel &&
- !pgstat_collect_blocklevel)
+ if (!pgstat_collect_tuplelevel &&
+ !pgstat_collect_blocklevel)
return;
pgStatXactCommit++;
@@ -1110,8 +1109,8 @@ pgstat_count_xact_commit(void)
void
pgstat_count_xact_rollback(void)
{
- if (!pgstat_collect_tuplelevel &&
- !pgstat_collect_blocklevel)
+ if (!pgstat_collect_tuplelevel &&
+ !pgstat_collect_blocklevel)
return;
pgStatXactRollback++;
@@ -1319,8 +1318,8 @@ pgstat_bestart(void)
MyBEEntry = &BackendStatusArray[MyBackendId - 1];
/*
- * To minimize the time spent modifying the entry, fetch all the
- * needed data first.
+ * To minimize the time spent modifying the entry, fetch all the needed
+ * data first.
*
* If we have a MyProcPort, use its session start time (for consistency,
* and to save a kernel call).
@@ -1343,12 +1342,13 @@ pgstat_bestart(void)
/*
* Initialize my status entry, following the protocol of bumping
- * st_changecount before and after; and make sure it's even afterwards.
- * We use a volatile pointer here to ensure the compiler doesn't try to
- * get cute.
+ * st_changecount before and after; and make sure it's even afterwards. We
+ * use a volatile pointer here to ensure the compiler doesn't try to get
+ * cute.
*/
beentry = MyBEEntry;
- do {
+ do
+ {
beentry->st_changecount++;
} while ((beentry->st_changecount & 1) == 0);
@@ -1389,9 +1389,9 @@ pgstat_beshutdown_hook(int code, Datum arg)
pgstat_report_tabstat();
/*
- * Clear my status entry, following the protocol of bumping
- * st_changecount before and after. We use a volatile pointer here
- * to ensure the compiler doesn't try to get cute.
+ * Clear my status entry, following the protocol of bumping st_changecount
+ * before and after. We use a volatile pointer here to ensure the
+ * compiler doesn't try to get cute.
*/
beentry->st_changecount++;
@@ -1420,8 +1420,8 @@ pgstat_report_activity(const char *cmd_str)
return;
/*
- * To minimize the time spent modifying the entry, fetch all the
- * needed data first.
+ * To minimize the time spent modifying the entry, fetch all the needed
+ * data first.
*/
start_timestamp = GetCurrentStatementStartTimestamp();
@@ -1430,8 +1430,8 @@ pgstat_report_activity(const char *cmd_str)
/*
* Update my status entry, following the protocol of bumping
- * st_changecount before and after. We use a volatile pointer here
- * to ensure the compiler doesn't try to get cute.
+ * st_changecount before and after. We use a volatile pointer here to
+ * ensure the compiler doesn't try to get cute.
*/
beentry->st_changecount++;
@@ -1499,20 +1499,19 @@ pgstat_read_current_status(void)
for (i = 1; i <= MaxBackends; i++)
{
/*
- * Follow the protocol of retrying if st_changecount changes while
- * we copy the entry, or if it's odd. (The check for odd is needed
- * to cover the case where we are able to completely copy the entry
- * while the source backend is between increment steps.) We use a
- * volatile pointer here to ensure the compiler doesn't try to get
- * cute.
+ * Follow the protocol of retrying if st_changecount changes while we
+ * copy the entry, or if it's odd. (The check for odd is needed to
+ * cover the case where we are able to completely copy the entry while
+ * the source backend is between increment steps.) We use a volatile
+ * pointer here to ensure the compiler doesn't try to get cute.
*/
for (;;)
{
- int save_changecount = beentry->st_changecount;
+ int save_changecount = beentry->st_changecount;
/*
- * XXX if PGBE_ACTIVITY_SIZE is really large, it might be best
- * to use strcpy not memcpy for copying the activity string?
+ * XXX if PGBE_ACTIVITY_SIZE is really large, it might be best to
+ * use strcpy not memcpy for copying the activity string?
*/
memcpy(localentry, (char *) beentry, sizeof(PgBackendStatus));
@@ -1589,7 +1588,7 @@ pgstat_send(void *msg, int len)
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
@@ -1602,6 +1601,7 @@ PgstatCollectorMain(int argc, char *argv[])
bool need_timer = false;
int len;
PgStat_Msg msg;
+
#ifdef HAVE_POLL
struct pollfd input_fd;
#else
@@ -1655,8 +1655,8 @@ PgstatCollectorMain(int argc, char *argv[])
pgstat_read_statsfile(&pgStatDBHash, InvalidOid);
/*
- * Setup the descriptor set for select(2). Since only one bit in the
- * set ever changes, we need not repeat FD_ZERO each time.
+ * Setup the descriptor set for select(2). Since only one bit in the set
+ * ever changes, we need not repeat FD_ZERO each time.
*/
#ifndef HAVE_POLL
FD_ZERO(&rfds);
@@ -1666,13 +1666,13 @@ PgstatCollectorMain(int argc, char *argv[])
* Loop to process messages until we get SIGQUIT or detect ungraceful
* death of our parent postmaster.
*
- * For performance reasons, we don't want to do a PostmasterIsAlive()
- * test after every message; instead, do it at statwrite time and if
+ * For performance reasons, we don't want to do a PostmasterIsAlive() test
+ * after every message; instead, do it at statwrite time and if
* select()/poll() is interrupted by timeout.
*/
for (;;)
{
- int got_data;
+ int got_data;
/*
* Quit if we get SIGQUIT from the postmaster.
@@ -1681,7 +1681,7 @@ PgstatCollectorMain(int argc, char *argv[])
break;
/*
- * If time to write the stats file, do so. Note that the alarm
+ * If time to write the stats file, do so. Note that the alarm
* interrupt isn't re-enabled immediately, but only after we next
* receive a stats message; so no cycles are wasted when there is
* nothing going on.
@@ -1701,9 +1701,9 @@ PgstatCollectorMain(int argc, char *argv[])
* Wait for a message to arrive; but not for more than
* PGSTAT_SELECT_TIMEOUT seconds. (This determines how quickly we will
* shut down after an ungraceful postmaster termination; so it needn't
- * be very fast. However, on some systems SIGQUIT won't interrupt
- * the poll/select call, so this also limits speed of response to
- * SIGQUIT, which is more important.)
+ * be very fast. However, on some systems SIGQUIT won't interrupt the
+ * poll/select call, so this also limits speed of response to SIGQUIT,
+ * which is more important.)
*
* We use poll(2) if available, otherwise select(2)
*/
@@ -1722,7 +1722,6 @@ PgstatCollectorMain(int argc, char *argv[])
}
got_data = (input_fd.revents != 0);
-
#else /* !HAVE_POLL */
FD_SET(pgStatSock, &rfds);
@@ -1744,7 +1743,6 @@ PgstatCollectorMain(int argc, char *argv[])
}
got_data = FD_ISSET(pgStatSock, &rfds);
-
#endif /* HAVE_POLL */
/*
@@ -1826,20 +1824,20 @@ PgstatCollectorMain(int argc, char *argv[])
{
if (setitimer(ITIMER_REAL, &write_timeout, NULL))
ereport(ERROR,
- (errmsg("could not set statistics collector timer: %m")));
+ (errmsg("could not set statistics collector timer: %m")));
need_timer = false;
}
}
else
{
/*
- * We can only get here if the select/poll timeout elapsed.
- * Check for postmaster death.
+ * We can only get here if the select/poll timeout elapsed. Check
+ * for postmaster death.
*/
if (!PostmasterIsAlive(true))
break;
}
- } /* end of message-processing loop */
+ } /* end of message-processing loop */
/*
* Save the final stats to reuse at next startup.
@@ -1953,9 +1951,9 @@ pgstat_write_statsfile(void)
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Write out the DB entry including the number of live backends.
- * We don't write the tables pointer since it's of no use to any
- * other process.
+ * Write out the DB entry including the number of live backends. We
+ * don't write the tables pointer since it's of no use to any other
+ * process.
*/
fputc('D', fpout);
fwrite(dbentry, offsetof(PgStat_StatDBEntry, tables), 1, fpout);
@@ -1987,8 +1985,8 @@ pgstat_write_statsfile(void)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not write temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
fclose(fpout);
unlink(PGSTAT_STAT_TMPFILE);
}
@@ -2491,10 +2489,10 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
if (tabentry == NULL)
return;
- if (msg->m_autovacuum)
+ if (msg->m_autovacuum)
tabentry->autovac_vacuum_timestamp = msg->m_vacuumtime;
- else
- tabentry->vacuum_timestamp = msg->m_vacuumtime;
+ else
+ tabentry->vacuum_timestamp = msg->m_vacuumtime;
tabentry->n_live_tuples = msg->m_tuples;
tabentry->n_dead_tuples = 0;
if (msg->m_analyze)
@@ -2539,9 +2537,9 @@ pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len)
if (tabentry == NULL)
return;
- if (msg->m_autovacuum)
+ if (msg->m_autovacuum)
tabentry->autovac_analyze_timestamp = msg->m_analyzetime;
- else
+ else
tabentry->analyze_timestamp = msg->m_analyzetime;
tabentry->n_live_tuples = msg->m_live_tuples;
tabentry->n_dead_tuples = msg->m_dead_tuples;
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 130415c74c..ed4de72698 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.499 2006/08/15 18:26:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.500 2006/10/04 00:29:56 momjian Exp $
*
* NOTES
*
@@ -141,9 +141,9 @@ typedef struct bkend
static Dllist *BackendList;
#ifdef EXEC_BACKEND
-/*
+/*
* Number of entries in the backend table. Twice the number of backends,
- * plus four other subprocesses (stats, bgwriter, autovac, logger).
+ * plus four other subprocesses (stats, bgwriter, autovac, logger).
*/
#define NUM_BACKENDARRAY_ELEMS (2*MaxBackends + 4)
static Backend *ShmemBackendArray;
@@ -510,6 +510,7 @@ PostmasterMain(int argc, char *argv[])
break;
case 'T':
+
/*
* In the event that some backend dumps core, send SIGSTOP,
* rather than SIGQUIT, to all its peers. This lets the wily
@@ -519,21 +520,21 @@ PostmasterMain(int argc, char *argv[])
break;
case 't':
- {
- const char *tmp = get_stats_option_name(optarg);
-
- if (tmp)
{
- SetConfigOption(tmp, "true", PGC_POSTMASTER, PGC_S_ARGV);
- }
- else
- {
- write_stderr("%s: invalid argument for option -t: \"%s\"\n",
- progname, optarg);
- ExitPostmaster(1);
+ const char *tmp = get_stats_option_name(optarg);
+
+ if (tmp)
+ {
+ SetConfigOption(tmp, "true", PGC_POSTMASTER, PGC_S_ARGV);
+ }
+ else
+ {
+ write_stderr("%s: invalid argument for option -t: \"%s\"\n",
+ progname, optarg);
+ ExitPostmaster(1);
+ }
+ break;
}
- break;
- }
case 'W':
SetConfigOption("post_auth_delay", optarg, PGC_POSTMASTER, PGC_S_ARGV);
@@ -2468,9 +2469,9 @@ BackendStartup(Port *port)
* postmaster's listen sockets. (In EXEC_BACKEND case this is all
* done in SubPostmasterMain.)
*/
- IsUnderPostmaster = true; /* we are a postmaster subprocess now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess now */
- MyProcPid = getpid(); /* reset MyProcPid */
+ MyProcPid = getpid(); /* reset MyProcPid */
/* We don't want the postmaster's proc_exit() handlers */
on_exit_reset();
@@ -2718,8 +2719,8 @@ BackendInitialize(Port *port)
* title for ps. It's good to do this as early as possible in startup.
*/
init_ps_display(port->user_name, port->database_name, remote_ps_data,
- update_process_title ? "authentication" : "");
-
+ update_process_title ? "authentication" : "");
+
/*
* Now perform authentication exchange.
*/
@@ -3252,12 +3253,11 @@ SubPostmasterMain(int argc, char *argv[])
/*
* Perform additional initialization and client authentication.
*
- * We want to do this before InitProcess() for a couple of reasons:
- * 1. so that we aren't eating up a PGPROC slot while waiting on the
- * client.
- * 2. so that if InitProcess() fails due to being out of PGPROC slots,
- * we have already initialized libpq and are able to report the error
- * to the client.
+ * We want to do this before InitProcess() for a couple of reasons: 1.
+ * so that we aren't eating up a PGPROC slot while waiting on the
+ * client. 2. so that if InitProcess() fails due to being out of
+ * PGPROC slots, we have already initialized libpq and are able to
+ * report the error to the client.
*/
BackendInitialize(&port);
@@ -3268,16 +3268,15 @@ SubPostmasterMain(int argc, char *argv[])
InitProcess();
/*
- * Attach process to shared data structures. If testing
- * EXEC_BACKEND on Linux, you must run this as root
- * before starting the postmaster:
+ * Attach process to shared data structures. If testing EXEC_BACKEND
+ * on Linux, you must run this as root before starting the postmaster:
*
- * echo 0 >/proc/sys/kernel/randomize_va_space
+ * echo 0 >/proc/sys/kernel/randomize_va_space
*
- * This prevents a randomized stack base address that causes
- * child shared memory to be at a different address than
- * the parent, making it impossible to attached to shared
- * memory. Return the value to '1' when finished.
+ * This prevents a randomized stack base address that causes child
+ * shared memory to be at a different address than the parent, making
+ * it impossible to attached to shared memory. Return the value to
+ * '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 17f437c385..d4f9c7f1ff 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.114 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.115 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@
static void checkRuleResultList(List *targetList, TupleDesc resultDesc,
- bool isSelect);
+ bool isSelect);
static bool setRuleCheckAsUser_walker(Node *node, Oid *context);
static void setRuleCheckAsUser_Query(Query *qry, Oid userid);
@@ -286,7 +286,7 @@ DefineQueryRewrite(RuleStmt *stmt)
*/
if (!replace && event_relation->rd_rules != NULL)
{
- int i;
+ int i;
for (i = 0; i < event_relation->rd_rules->numLocks; i++)
{
@@ -369,14 +369,14 @@ DefineQueryRewrite(RuleStmt *stmt)
else
{
/*
- * For non-SELECT rules, a RETURNING list can appear in at most one
- * of the actions ... and there can't be any RETURNING list at all
- * in a conditional or non-INSTEAD rule. (Actually, there can be
- * at most one RETURNING list across all rules on the same event,
- * but it seems best to enforce that at rule expansion time.) If
- * there is a RETURNING list, it must match the event relation.
+ * For non-SELECT rules, a RETURNING list can appear in at most one of
+ * the actions ... and there can't be any RETURNING list at all in a
+ * conditional or non-INSTEAD rule. (Actually, there can be at most
+ * one RETURNING list across all rules on the same event, but it seems
+ * best to enforce that at rule expansion time.) If there is a
+ * RETURNING list, it must match the event relation.
*/
- bool haveReturning = false;
+ bool haveReturning = false;
foreach(l, action)
{
@@ -387,7 +387,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (haveReturning)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot have multiple RETURNING lists in a rule")));
+ errmsg("cannot have multiple RETURNING lists in a rule")));
haveReturning = true;
if (event_qual != NULL)
ereport(ERROR,
@@ -478,7 +478,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
isSelect ?
- errmsg("SELECT rule's target list has too many entries") :
+ errmsg("SELECT rule's target list has too many entries") :
errmsg("RETURNING list has too many entries")));
attr = resultDesc->attrs[i - 1];
@@ -513,9 +513,9 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect)
/*
* Allow typmods to be different only if one of them is -1, ie,
- * "unspecified". This is necessary for cases like "numeric",
- * where the table will have a filled-in default length but the
- * select rule's expression will probably have typmod = -1.
+ * "unspecified". This is necessary for cases like "numeric", where
+ * the table will have a filled-in default length but the select
+ * rule's expression will probably have typmod = -1.
*/
tletypmod = exprTypmod((Node *) tle->expr);
if (attr->atttypmod != tletypmod &&
@@ -638,4 +638,5 @@ RenameRewriteRule(Oid owningRel, const char *oldName,
heap_freetuple(ruletup);
heap_close(pg_rewrite_desc, RowExclusiveLock);
}
+
#endif
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 1af5bd7e7f..81af9eebad 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.166 2006/09/02 17:06:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.167 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,13 +43,13 @@ static Query *rewriteRuleAction(Query *parsetree,
bool *returning_flag);
static List *adjustJoinTreeList(Query *parsetree, bool removert, int rt_index);
static void rewriteTargetList(Query *parsetree, Relation target_relation,
- List **attrno_list);
+ List **attrno_list);
static TargetEntry *process_matched_tle(TargetEntry *src_tle,
TargetEntry *prior_tle,
const char *attrName);
static Node *get_assignment_input(Node *node);
static void rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation,
- List *attrnos);
+ List *attrnos);
static void markQueryForLocking(Query *qry, bool forUpdate, bool noWait,
bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
@@ -431,10 +431,10 @@ rewriteRuleAction(Query *parsetree,
}
/*
- * If rule_action has a RETURNING clause, then either throw it away
- * if the triggering query has no RETURNING clause, or rewrite it to
- * emit what the triggering query's RETURNING clause asks for. Throw
- * an error if more than one rule has a RETURNING clause.
+ * If rule_action has a RETURNING clause, then either throw it away if the
+ * triggering query has no RETURNING clause, or rewrite it to emit what
+ * the triggering query's RETURNING clause asks for. Throw an error if
+ * more than one rule has a RETURNING clause.
*/
if (!parsetree->returningList)
rule_action->returningList = NIL;
@@ -443,7 +443,7 @@ rewriteRuleAction(Query *parsetree,
if (*returning_flag)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot have RETURNING lists in multiple rules")));
+ errmsg("cannot have RETURNING lists in multiple rules")));
*returning_flag = true;
rule_action->returningList = (List *)
ResolveNew((Node *) parsetree->returningList,
@@ -907,12 +907,12 @@ searchForDefault(RangeTblEntry *rte)
foreach(lc, rte->values_lists)
{
- List *sublist = (List *) lfirst(lc);
- ListCell *lc2;
+ List *sublist = (List *) lfirst(lc);
+ ListCell *lc2;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
if (IsA(col, SetToDefault))
return true;
@@ -952,15 +952,15 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos)
newValues = NIL;
foreach(lc, rte->values_lists)
{
- List *sublist = (List *) lfirst(lc);
- List *newList = NIL;
- ListCell *lc2;
- ListCell *lc3;
+ List *sublist = (List *) lfirst(lc);
+ List *newList = NIL;
+ ListCell *lc2;
+ ListCell *lc3;
forboth(lc2, sublist, lc3, attrnos)
{
- Node *col = (Node *) lfirst(lc2);
- int attrno = lfirst_int(lc3);
+ Node *col = (Node *) lfirst(lc2);
+ int attrno = lfirst_int(lc3);
if (IsA(col, SetToDefault))
{
@@ -972,7 +972,7 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos)
if (!att_tup->attisdropped)
new_expr = build_column_default(target_relation, attrno);
else
- new_expr = NULL; /* force a NULL if dropped */
+ new_expr = NULL; /* force a NULL if dropped */
/*
* If there is no default (ie, default is effectively NULL),
@@ -1548,8 +1548,8 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
RangeTblEntry *values_rte = NULL;
/*
- * If it's an INSERT ... VALUES (...), (...), ...
- * there will be a single RTE for the VALUES targetlists.
+ * If it's an INSERT ... VALUES (...), (...), ... there will be a
+ * single RTE for the VALUES targetlists.
*/
if (list_length(parsetree->jointree->fromlist) == 1)
{
@@ -1567,7 +1567,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
if (values_rte)
{
- List *attrnos;
+ List *attrnos;
/* Process the main targetlist ... */
rewriteTargetList(parsetree, rt_entry_relation, &attrnos);
@@ -1638,11 +1638,11 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
/*
- * If there is an INSTEAD, and the original query has a RETURNING,
- * we have to have found a RETURNING in the rule(s), else fail.
- * (Because DefineQueryRewrite only allows RETURNING in unconditional
- * INSTEAD rules, there's no need to worry whether the substituted
- * RETURNING will actually be executed --- it must be.)
+ * If there is an INSTEAD, and the original query has a RETURNING, we
+ * have to have found a RETURNING in the rule(s), else fail. (Because
+ * DefineQueryRewrite only allows RETURNING in unconditional INSTEAD
+ * rules, there's no need to worry whether the substituted RETURNING
+ * will actually be executed --- it must be.)
*/
if ((instead || qual_product != NULL) &&
parsetree->returningList &&
@@ -1653,22 +1653,22 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
case CMD_INSERT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot INSERT RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot INSERT RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause.")));
break;
case CMD_UPDATE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot UPDATE RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot UPDATE RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause.")));
break;
case CMD_DELETE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot DELETE RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot DELETE RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause.")));
break;
default:
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index ce951a4601..96bfdf5f3a 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.101 2006/07/14 14:52:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.102 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -984,10 +984,11 @@ ResolveNew(Node *node, int target_varno, int sublevels_up,
{
if (IsA(result, Query))
((Query *) result)->hasSubLinks = true;
+
/*
* Note: if we're called on a non-Query node then it's the caller's
- * responsibility to update hasSubLinks in the ancestor Query.
- * This is pretty fragile and perhaps should be rethought ...
+ * responsibility to update hasSubLinks in the ancestor Query. This is
+ * pretty fragile and perhaps should be rethought ...
*/
}
diff --git a/src/backend/rewrite/rewriteRemove.c b/src/backend/rewrite/rewriteRemove.c
index f08001ce03..d09b5d2c42 100644
--- a/src/backend/rewrite/rewriteRemove.c
+++ b/src/backend/rewrite/rewriteRemove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.67 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.68 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@
*/
void
RemoveRewriteRule(Oid owningRel, const char *ruleName, DropBehavior behavior,
- bool missing_ok)
+ bool missing_ok)
{
HeapTuple tuple;
Oid eventRelationOid;
@@ -54,7 +54,7 @@ RemoveRewriteRule(Oid owningRel, const char *ruleName, DropBehavior behavior,
*/
if (!HeapTupleIsValid(tuple))
{
- if (! missing_ok)
+ if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("rule \"%s\" for relation \"%s\" does not exist",
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index aaa0dff684..5efd9cc7b1 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.211 2006/09/25 22:01:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.212 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -178,10 +178,10 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* This can happen because mdread doesn't complain about reads beyond
* EOF --- which is arguably bogus, but changing it seems tricky ---
* and so a previous attempt to read a block just beyond EOF could
- * have left a "valid" zero-filled buffer. Unfortunately, we have
+ * have left a "valid" zero-filled buffer. Unfortunately, we have
* also seen this case occurring because of buggy Linux kernels that
* sometimes return an lseek(SEEK_END) result that doesn't account for
- * a recent write. In that situation, the pre-existing buffer would
+ * a recent write. In that situation, the pre-existing buffer would
* contain valid data that we don't want to overwrite. Since the
* legitimate cases should always have left a zero-filled buffer,
* complain if not PageIsNew.
@@ -194,10 +194,10 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
- * We *must* do smgrextend before succeeding, else the
- * page will not be reserved by the kernel, and the next P_NEW call
- * will decide to return the same page. Clear the BM_VALID bit,
- * do the StartBufferIO call that BufferAlloc didn't, and proceed.
+ * We *must* do smgrextend before succeeding, else the page will not
+ * be reserved by the kernel, and the next P_NEW call will decide to
+ * return the same page. Clear the BM_VALID bit, do the StartBufferIO
+ * call that BufferAlloc didn't, and proceed.
*/
if (isLocalBuf)
{
@@ -208,11 +208,12 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
else
{
/*
- * Loop to handle the very small possibility that someone
- * re-sets BM_VALID between our clearing it and StartBufferIO
- * inspecting it.
+ * Loop to handle the very small possibility that someone re-sets
+ * BM_VALID between our clearing it and StartBufferIO inspecting
+ * it.
*/
- do {
+ do
+ {
LockBufHdr(bufHdr);
Assert(bufHdr->flags & BM_VALID);
bufHdr->flags &= ~BM_VALID;
@@ -311,10 +312,10 @@ BufferAlloc(Relation reln,
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ LWLockId newPartitionLock; /* buffer partition lock for it */
BufferTag oldTag; /* previous identity of selected buffer */
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
int buf_id;
volatile BufferDesc *buf;
@@ -620,7 +621,7 @@ InvalidateBuffer(volatile BufferDesc *buf)
{
BufferTag oldTag;
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
/* Save the original buffer tag before dropping the spinlock */
@@ -629,9 +630,9 @@ InvalidateBuffer(volatile BufferDesc *buf)
UnlockBufHdr(buf);
/*
- * Need to compute the old tag's hashcode and partition lock ID.
- * XXX is it worth storing the hashcode in BufferDesc so we need
- * not recompute it here? Probably not.
+ * Need to compute the old tag's hashcode and partition lock ID. XXX is it
+ * worth storing the hashcode in BufferDesc so we need not recompute it
+ * here? Probably not.
*/
oldHash = BufTableHashCode(&oldTag);
oldPartitionLock = BufMappingPartitionLock(oldHash);
@@ -715,7 +716,7 @@ retry:
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
@@ -972,9 +973,9 @@ BufferSync(void)
{
/*
* If in bgwriter, absorb pending fsync requests after each
- * WRITES_PER_ABSORB write operations, to prevent overflow of
- * the fsync request queue. If not in bgwriter process, this is
- * a no-op.
+ * WRITES_PER_ABSORB write operations, to prevent overflow of the
+ * fsync request queue. If not in bgwriter process, this is a
+ * no-op.
*/
if (--absorb_counter <= 0)
{
@@ -1770,9 +1771,9 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
- * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
+ * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* immediately after we look, because the buffer content update is already
* done and will be reflected in the I/O.)
*/
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 333053c1c9..92c2cfce62 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.56 2006/07/23 18:34:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.57 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -237,12 +237,12 @@ StrategyInitialize(bool init)
/*
* Initialize the shared buffer lookup hashtable.
*
- * Since we can't tolerate running out of lookup table entries, we
- * must be sure to specify an adequate table size here. The maximum
- * steady-state usage is of course NBuffers entries, but BufferAlloc()
- * tries to insert a new entry before deleting the old. In principle
- * this could be happening in each partition concurrently, so we
- * could need as many as NBuffers + NUM_BUFFER_PARTITIONS entries.
+ * Since we can't tolerate running out of lookup table entries, we must be
+ * sure to specify an adequate table size here. The maximum steady-state
+ * usage is of course NBuffers entries, but BufferAlloc() tries to insert
+ * a new entry before deleting the old. In principle this could be
+ * happening in each partition concurrently, so we could need as many as
+ * NBuffers + NUM_BUFFER_PARTITIONS entries.
*/
InitBufTable(NBuffers + NUM_BUFFER_PARTITIONS);
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 04cdd95a69..a594b16edf 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.129 2006/08/24 03:15:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.130 2006/10/04 00:29:57 momjian Exp $
*
* NOTES:
*
@@ -1026,7 +1026,7 @@ retry:
* in which case immediate retry is indicated.
*/
#ifdef WIN32
- DWORD error = GetLastError();
+ DWORD error = GetLastError();
switch (error)
{
@@ -1081,7 +1081,7 @@ retry:
* See comments in FileRead()
*/
#ifdef WIN32
- DWORD error = GetLastError();
+ DWORD error = GetLastError();
switch (error)
{
@@ -1279,8 +1279,8 @@ TryAgain:
}
/*
- * TEMPORARY hack to log the Windows error code on fopen failures,
- * in hopes of diagnosing some hard-to-reproduce problems.
+ * TEMPORARY hack to log the Windows error code on fopen failures, in
+ * hopes of diagnosing some hard-to-reproduce problems.
*/
#ifdef WIN32
{
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 3309ba11da..7c85b44221 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.55 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.56 2006/10/04 00:29:57 momjian Exp $
*
*
* NOTES:
@@ -111,7 +111,7 @@ typedef struct FsmCacheRelHeader
RelFileNode key; /* hash key (must be first) */
bool isIndex; /* if true, we store only page numbers */
uint32 avgRequest; /* moving average of space requests */
- BlockNumber interestingPages; /* # of pages with useful free space */
+ BlockNumber interestingPages; /* # of pages with useful free space */
int32 storedPages; /* # of pages stored in arena */
} FsmCacheRelHeader;
@@ -128,8 +128,8 @@ static void CheckFreeSpaceMapStatistics(int elevel, int numRels,
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
-static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
- bool isIndex);
+static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
+ bool isIndex);
static void link_fsm_rel_usage(FSMRelation *fsmrel);
static void unlink_fsm_rel_usage(FSMRelation *fsmrel);
static void link_fsm_rel_storage(FSMRelation *fsmrel);
@@ -601,6 +601,7 @@ PrintFreeSpaceMapStatistics(int elevel)
double needed;
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
+
/*
* Count total space actually used, as well as the unclamped request total
*/
@@ -1688,9 +1689,9 @@ fsm_calc_request(FSMRelation *fsmrel)
}
/*
- * We clamp the per-relation requests to at most half the arena size;
- * this is intended to prevent a single bloated relation from crowding
- * out FSM service for every other rel.
+ * We clamp the per-relation requests to at most half the arena size; this
+ * is intended to prevent a single bloated relation from crowding out FSM
+ * service for every other rel.
*/
req = Min(req, FreeSpaceMap->totalChunks / 2);
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index a4d36d157b..716963f448 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.87 2006/08/01 19:03:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.88 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,8 +95,8 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
size = add_size(size, 8192 - (size % 8192));
/*
- * The shared memory for add-ins is treated as a separate
- * segment, but in reality it is not.
+ * The shared memory for add-ins is treated as a separate segment, but
+ * in reality it is not.
*/
size_b4addins = size;
size = add_size(size, AddinShmemSize());
@@ -115,8 +115,8 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
* Modify hdr to show segment size before add-ins
*/
seghdr->totalsize = size_b4addins;
-
- /*
+
+ /*
* Set up segment header sections in each Addin context
*/
InitAddinContexts((void *) ((char *) seghdr + size_b4addins));
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 7b4ea9077e..39de167fe5 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.17 2006/09/03 15:59:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.18 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -393,7 +393,7 @@ TransactionIdIsActive(TransactionId xid)
* This is used by VACUUM to decide which deleted tuples must be preserved
* in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
@@ -545,13 +545,13 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
globalxmin = xmin = GetTopTransactionId();
/*
- * It is sufficient to get shared lock on ProcArrayLock, even if we
- * are computing a serializable snapshot and therefore will be setting
+ * It is sufficient to get shared lock on ProcArrayLock, even if we are
+ * computing a serializable snapshot and therefore will be setting
* MyProc->xmin. This is because any two backends that have overlapping
* shared holds on ProcArrayLock will certainly compute the same xmin
* (since no xact, in particular not the oldest, can exit the set of
* running transactions while we hold ProcArrayLock --- see further
- * discussion just below). So it doesn't matter whether another backend
+ * discussion just below). So it doesn't matter whether another backend
* concurrently doing GetSnapshotData or GetOldestXmin sees our xmin as
* set or not; he'd compute the same xmin for himself either way.
*/
@@ -595,8 +595,8 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
/*
* Ignore my own proc (dealt with my xid above), procs not running a
- * transaction, xacts started since we read the next transaction
- * ID, and xacts executing LAZY VACUUM. There's no need to store XIDs
+ * transaction, xacts started since we read the next transaction ID,
+ * and xacts executing LAZY VACUUM. There's no need to store XIDs
* above what we got from ReadNewTransactionId, since we'll treat them
* as running anyway. We also assume that such xacts can't compute an
* xmin older than ours, so they needn't be considered in computing
@@ -625,18 +625,17 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
* their parent, so no need to check them against xmin.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
- * Should be safe to use memcpy, though. (We needn't worry about
- * missing any xids added concurrently, because they must postdate
- * xmax.)
+ * remove any. Hence it's important to fetch nxids just once. Should
+ * be safe to use memcpy, though. (We needn't worry about missing any
+ * xids added concurrently, because they must postdate xmax.)
*/
if (subcount >= 0)
{
if (proc->subxids.overflowed)
- subcount = -1; /* overflowed */
+ subcount = -1; /* overflowed */
else
{
- int nxids = proc->subxids.nxids;
+ int nxids = proc->subxids.nxids;
if (nxids > 0)
{
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 4cff4c19f3..685912e157 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.96 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.97 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,14 +62,14 @@
* hash bucket garbage collector if need be. Right now, it seems
* unnecessary.
*
- * (e) Add-ins can request their own logical shared memory segments
- * by calling RegisterAddinContext() from the preload-libraries hook.
- * Each call establishes a uniquely named add-in shared memopry
- * context which will be set up as part of postgres intialisation.
- * Memory can be allocated from these contexts using
- * ShmemAllocFromContext(), and can be reset to its initial condition
- * using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
- * can be used to request that a LWLock be allocated, placed into *lock_ptr.
+ * (e) Add-ins can request their own logical shared memory segments
+ * by calling RegisterAddinContext() from the preload-libraries hook.
+ * Each call establishes a uniquely named add-in shared memopry
+ * context which will be set up as part of postgres intialisation.
+ * Memory can be allocated from these contexts using
+ * ShmemAllocFromContext(), and can be reset to its initial condition
+ * using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
+ * can be used to request that a LWLock be allocated, placed into *lock_ptr.
*/
#include "postgres.h"
@@ -98,9 +98,9 @@ static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
/* Structures and globals for managing add-in shared memory contexts */
typedef struct context
{
- char *name;
- Size size;
- PGShmemHeader *seg_hdr;
+ char *name;
+ Size size;
+ PGShmemHeader *seg_hdr;
struct context *next;
} ContextNode;
@@ -138,9 +138,9 @@ InitShmemAllocation(void)
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the
- * space allocation the hard way, since obviously ShmemAlloc can't
- * be called yet.
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * allocation the hard way, since obviously ShmemAlloc can't be called
+ * yet.
*/
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
@@ -153,22 +153,22 @@ InitShmemAllocation(void)
ShmemIndex = (HTAB *) NULL;
/*
- * Initialize ShmemVariableCache for transaction manager.
- * (This doesn't really belong here, but not worth moving.)
+ * Initialize ShmemVariableCache for transaction manager. (This doesn't
+ * really belong here, but not worth moving.)
*/
ShmemVariableCache = (VariableCache)
- ShmemAlloc(sizeof(*ShmemVariableCache));
+ ShmemAlloc(sizeof(*ShmemVariableCache));
memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
/*
* RegisterAddinContext -- Register the requirement for a named shared
- * memory context.
+ * memory context.
*/
void
RegisterAddinContext(const char *name, Size size)
{
- char *newstr = malloc(strlen(name) + 1);
+ char *newstr = malloc(strlen(name) + 1);
ContextNode *node = malloc(sizeof(ContextNode));
strcpy(newstr, name);
@@ -185,7 +185,7 @@ RegisterAddinContext(const char *name, Size size)
/*
* ContextFromName -- Return the ContextNode for the given named
- * context, or NULL if not found.
+ * context, or NULL if not found.
*/
static ContextNode *
ContextFromName(const char *name)
@@ -203,7 +203,7 @@ ContextFromName(const char *name)
/*
* InitAddinContexts -- Initialise the registered addin shared memory
- * contexts.
+ * contexts.
*/
void
InitAddinContexts(void *start)
@@ -218,7 +218,7 @@ InitAddinContexts(void *start)
next_segment->totalsize = context->size;
next_segment->freeoffset = MAXALIGN(sizeof(PGShmemHeader));
- next_segment = (PGShmemHeader *)
+ next_segment = (PGShmemHeader *)
((char *) next_segment + context->size);
context = context->next;
}
@@ -245,7 +245,7 @@ ShmemResetContext(const char *name)
/*
* AddinShmemSize -- Report how much shared memory has been registered
- * for add-ins.
+ * for add-ins.
*/
Size
AddinShmemSize(void)
@@ -265,15 +265,15 @@ AddinShmemSize(void)
void *
ShmemAllocFromContext(Size size, const char *context_name)
{
- Size newStart;
- Size newFree;
- void *newSpace;
- ContextNode *context;
+ Size newStart;
+ Size newFree;
+ void *newSpace;
+ ContextNode *context;
/* use volatile pointer to prevent code rearrangement */
volatile PGShmemHeader *shmemseghdr = ShmemSegHdr;
- /*
+ /*
* if context_name is provided, allocate from the named context
*/
if (context_name)
@@ -480,8 +480,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no
- * other process can be accessing shared memory yet.
+ * index has been initialized. This should be OK because no other
+ * process can be accessing shared memory yet.
*/
Assert(shmemseghdr->indexoffset == 0);
structPtr = ShmemAlloc(size);
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 28e862533d..4a4c0990ad 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.88 2006/09/22 23:20:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.89 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,7 +54,7 @@ RelationInitLockInfo(Relation relation)
static inline void
SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
{
- Oid dbid;
+ Oid dbid;
if (IsSharedRelation(relid))
dbid = InvalidOid;
@@ -67,7 +67,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
@@ -81,13 +81,13 @@ LockRelationOid(Oid relid, LOCKMODE lockmode)
res = LockAcquire(&tag, lockmode, false, false);
/*
- * Now that we have the lock, check for invalidation messages, so that
- * we will update or flush any stale relcache entry before we try to use
- * it. We can skip this in the not-uncommon case that we already had
- * the same type of lock being requested, since then no one else could
- * have modified the relcache entry in an undesirable way. (In the
- * case where our own xact modifies the rel, the relcache update happens
- * via CommandCounterIncrement, not here.)
+ * Now that we have the lock, check for invalidation messages, so that we
+ * will update or flush any stale relcache entry before we try to use it.
+ * We can skip this in the not-uncommon case that we already had the same
+ * type of lock being requested, since then no one else could have
+ * modified the relcache entry in an undesirable way. (In the case where
+ * our own xact modifies the rel, the relcache update happens via
+ * CommandCounterIncrement, not here.)
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@@ -116,8 +116,8 @@ ConditionalLockRelationOid(Oid relid, LOCKMODE lockmode)
return false;
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@@ -176,8 +176,8 @@ LockRelation(Relation relation, LOCKMODE lockmode)
res = LockAcquire(&tag, lockmode, false, false);
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
@@ -206,8 +206,8 @@ ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
return false;
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 6300dc2eeb..939528e754 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.173 2006/09/18 22:40:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.174 2006/10/04 00:29:57 momjian Exp $
*
* NOTES
* A lock table is a shared memory hash table. When
@@ -112,7 +112,7 @@ static const char *const lock_mode_names[] =
};
#ifndef LOCK_DEBUG
-static bool Dummy_trace = false;
+static bool Dummy_trace = false;
#endif
static const LockMethodData default_lockmethod = {
@@ -290,8 +290,8 @@ InitLocks(void)
init_table_size = max_table_size / 2;
/*
- * Allocate hash table for LOCK structs. This stores
- * per-locked-object information.
+ * Allocate hash table for LOCK structs. This stores per-locked-object
+ * information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(LOCKTAG);
@@ -331,8 +331,8 @@ InitLocks(void)
elog(FATAL, "could not initialize proclock hash table");
/*
- * Allocate non-shared hash table for LOCALLOCK structs. This stores
- * lock counts and resource owner information.
+ * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
+ * counts and resource owner information.
*
* The non-shared table could already exist in this process (this occurs
* when the postmaster is recreating shared memory after a backend crash).
@@ -396,8 +396,8 @@ static uint32
proclock_hash(const void *key, Size keysize)
{
const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
- uint32 lockhash;
- Datum procptr;
+ uint32 lockhash;
+ Datum procptr;
Assert(keysize == sizeof(PROCLOCKTAG));
@@ -407,9 +407,9 @@ proclock_hash(const void *key, Size keysize)
/*
* To make the hash code also depend on the PGPROC, we xor the proc
* struct's address into the hash code, left-shifted so that the
- * partition-number bits don't change. Since this is only a hash,
- * we don't care if we lose high-order bits of the address; use
- * an intermediate variable to suppress cast-pointer-to-int warnings.
+ * partition-number bits don't change. Since this is only a hash, we
+ * don't care if we lose high-order bits of the address; use an
+ * intermediate variable to suppress cast-pointer-to-int warnings.
*/
procptr = PointerGetDatum(proclocktag->myProc);
lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
@@ -426,8 +426,8 @@ proclock_hash(const void *key, Size keysize)
static inline uint32
ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
{
- uint32 lockhash = hashcode;
- Datum procptr;
+ uint32 lockhash = hashcode;
+ Datum procptr;
/*
* This must match proclock_hash()!
@@ -1117,7 +1117,7 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
memcpy(new_status, old_status, len);
strcpy(new_status + len, " waiting");
set_ps_display(new_status, false);
- new_status[len] = '\0'; /* truncate off " waiting" */
+ new_status[len] = '\0'; /* truncate off " waiting" */
}
pgstat_report_waiting(true);
@@ -1549,12 +1549,12 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
LockTagHashCode(&lock->tag),
wakeupNeeded);
- next_item:
+ next_item:
proclock = nextplock;
- } /* loop over PROCLOCKs within this partition */
+ } /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
- } /* loop over partitions */
+ } /* loop over partitions */
#ifdef LOCK_DEBUG
if (*(lockMethodTable->trace_flag))
@@ -1726,8 +1726,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
if (!lock)
{
/*
- * If the lock object doesn't exist, there is nothing holding a
- * lock on this lockable object.
+ * If the lock object doesn't exist, there is nothing holding a lock
+ * on this lockable object.
*/
LWLockRelease(partitionLock);
return NIL;
@@ -1747,7 +1747,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
{
if (conflictMask & proclock->holdMask)
{
- PGPROC *proc = proclock->tag.myProc;
+ PGPROC *proc = proclock->tag.myProc;
/* A backend never blocks itself */
if (proc != MyProc)
@@ -1963,7 +1963,7 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. So, unlink
+ * the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
@@ -1987,7 +1987,7 @@ PostPrepare_Locks(TransactionId xid)
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
- ereport(PANIC, /* should not happen */
+ ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
@@ -2017,12 +2017,12 @@ PostPrepare_Locks(TransactionId xid)
Assert((newproclock->holdMask & holdMask) == 0);
newproclock->holdMask |= holdMask;
- next_item:
+ next_item:
proclock = nextplock;
- } /* loop over PROCLOCKs within this partition */
+ } /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
- } /* loop over partitions */
+ } /* loop over partitions */
END_CRIT_SECTION();
}
@@ -2084,10 +2084,11 @@ GetLockStatusData(void)
* operate one partition at a time if we want to deliver a self-consistent
* view of the state.
*
- * Since this is a read-only operation, we take shared instead of exclusive
- * lock. There's not a whole lot of point to this, because all the normal
- * operations require exclusive lock, but it doesn't hurt anything either.
- * It will at least allow two backends to do GetLockStatusData in parallel.
+ * Since this is a read-only operation, we take shared instead of
+ * exclusive lock. There's not a whole lot of point to this, because all
+ * the normal operations require exclusive lock, but it doesn't hurt
+ * anything either. It will at least allow two backends to do
+ * GetLockStatusData in parallel.
*
* Must grab LWLocks in partition-number order to avoid LWLock deadlock.
*/
@@ -2119,7 +2120,7 @@ GetLockStatusData(void)
}
/* And release locks */
- for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
+ for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
Assert(el == data->nelements);
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 28c73a8a9b..c5b0d2af4d 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.45 2006/08/07 21:56:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.46 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@
#include "storage/spin.h"
-static int NumAddinLWLocks(void);
+static int NumAddinLWLocks(void);
static void AssignAddinLWLocks(void);
@@ -95,23 +95,23 @@ static int *ex_acquire_counts;
static int *block_counts;
#endif
-/*
+/*
* Structures and globals to allow add-ins to register for their own
* lwlocks from the preload-libraries hook.
*/
typedef struct LWLockNode
{
- LWLockId *lock;
+ LWLockId *lock;
struct LWLockNode *next;
} LWLockNode;
static LWLockNode *addin_locks = NULL;
-static int num_addin_locks = 0;
+static int num_addin_locks = 0;
/*
* RegisterAddinLWLock() --- Allow an andd-in to request a LWLock
- * from the preload-libraries hook.
+ * from the preload-libraries hook.
*/
void
RegisterAddinLWLock(LWLockId *lock)
@@ -198,8 +198,7 @@ print_lwlock_stats(int code, Datum arg)
LWLockRelease(0);
}
-
-#endif /* LWLOCK_STATS */
+#endif /* LWLOCK_STATS */
/*
@@ -306,9 +305,8 @@ CreateLWLocks(void)
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
- /*
- * Allocate LWLocks for those add-ins that have explicitly requested
- * them.
+ /*
+ * Allocate LWLocks for those add-ins that have explicitly requested them.
*/
AssignAddinLWLocks();
}
@@ -364,8 +362,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
{
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
+ int numLocks = LWLockCounter[1];
sh_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
@@ -378,7 +376,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
ex_acquire_counts[lockid]++;
else
sh_acquire_counts[lockid]++;
-#endif /* LWLOCK_STATS */
+#endif /* LWLOCK_STATS */
/*
* We can't wait if we haven't got a PGPROC. This should only occur
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 07fc3e3d25..cf71b83eab 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.179 2006/07/30 02:07:18 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.180 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -149,8 +149,8 @@ InitProcGlobal(void)
Assert(!found);
/*
- * Create the PGPROC structures for dummy (bgwriter) processes, too.
- * These do not get linked into the freeProcs list.
+ * Create the PGPROC structures for dummy (bgwriter) processes, too. These
+ * do not get linked into the freeProcs list.
*/
DummyProcs = (PGPROC *)
ShmemInitStruct("DummyProcs", NUM_DUMMY_PROCS * sizeof(PGPROC),
@@ -183,7 +183,7 @@ InitProcGlobal(void)
MemSet(DummyProcs, 0, NUM_DUMMY_PROCS * sizeof(PGPROC));
for (i = 0; i < NUM_DUMMY_PROCS; i++)
{
- DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
+ DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
PGSemaphoreCreate(&(DummyProcs[i].sem));
}
@@ -268,7 +268,7 @@ InitProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -298,9 +298,9 @@ InitProcessPhase2(void)
Assert(MyProc != NULL);
/*
- * We should now know what database we're in, so advertise that. (We
- * need not do any locking here, since no other backend can yet see
- * our PGPROC.)
+ * We should now know what database we're in, so advertise that. (We need
+ * not do any locking here, since no other backend can yet see our
+ * PGPROC.)
*/
Assert(OidIsValid(MyDatabaseId));
MyProc->databaseId = MyDatabaseId;
@@ -400,7 +400,7 @@ InitDummyProcess(void)
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
@@ -489,12 +489,12 @@ LockWaitCancel(void)
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
- * from remaining in the semaphore if someone else had granted us the
- * lock we wanted before we were able to remove ourselves from the
- * wait-list. However, now that ProcSleep loops until waitStatus changes,
- * a leftover wakeup signal isn't harmful, and it seems not worth
- * expending cycles to get rid of a signal that most likely isn't there.
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * from remaining in the semaphore if someone else had granted us the lock
+ * we wanted before we were able to remove ourselves from the wait-list.
+ * However, now that ProcSleep loops until waitStatus changes, a leftover
+ * wakeup signal isn't harmful, and it seems not worth expending cycles to
+ * get rid of a signal that most likely isn't there.
*/
/*
@@ -810,11 +810,11 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
* PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where
- * a saved wakeup might be leftover from a previous operation (for
- * example, we aborted ProcWaitForSignal just before someone did
- * ProcSendSignal). So, loop to wait again if the waitStatus shows
- * we haven't been granted nor denied the lock yet.
+ * implementation. While this is normally good, there are cases where a
+ * saved wakeup might be leftover from a previous operation (for example,
+ * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
+ * So, loop to wait again if the waitStatus shows we haven't been granted
+ * nor denied the lock yet.
*
* We pass interruptOK = true, which eliminates a window in which
* cancel/die interrupts would be held off undesirably. This is a promise
@@ -824,7 +824,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
* updating the locallock table, but if we lose control to an error,
* LockWaitCancel will fix that up.
*/
- do {
+ do
+ {
PGSemaphoreLock(&MyProc->sem, true);
} while (MyProc->waitStatus == STATUS_WAITING);
@@ -835,9 +836,9 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
elog(FATAL, "could not disable timer for process wakeup");
/*
- * Re-acquire the lock table's partition lock. We have to do this to
- * hold off cancel/die interrupts before we can mess with lockAwaited
- * (else we might have a missed or duplicated locallock update).
+ * Re-acquire the lock table's partition lock. We have to do this to hold
+ * off cancel/die interrupts before we can mess with lockAwaited (else we
+ * might have a missed or duplicated locallock update).
*/
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
@@ -977,8 +978,8 @@ CheckDeadLock(void)
int i;
/*
- * Acquire exclusive lock on the entire shared lock data structures.
- * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
+ * Acquire exclusive lock on the entire shared lock data structures. Must
+ * grab LWLocks in partition-number order to avoid LWLock deadlock.
*
* Note that the deadlock check interrupt had better not be enabled
* anywhere that this process itself holds lock partition locks, else this
@@ -1018,7 +1019,7 @@ CheckDeadLock(void)
/*
* Oops. We have a deadlock.
*
- * Get this process out of wait state. (Note: we could do this more
+ * Get this process out of wait state. (Note: we could do this more
* efficiently by relying on lockAwaited, but use this coding to preserve
* the flexibility to kill some other transaction than the one detecting
* the deadlock.)
@@ -1047,12 +1048,12 @@ CheckDeadLock(void)
*/
/*
- * Release locks acquired at head of routine. Order is not critical,
- * so do it back-to-front to avoid waking another CheckDeadLock instance
+ * Release locks acquired at head of routine. Order is not critical, so
+ * do it back-to-front to avoid waking another CheckDeadLock instance
* before it can get all the locks.
*/
check_done:
- for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
+ for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
}
@@ -1063,10 +1064,10 @@ check_done:
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
@@ -1122,10 +1123,10 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
* interval will have elapsed and so this doesn't matter, but there
* are corner cases (involving multi-statement query strings with
* embedded COMMIT or ROLLBACK) where we might re-initialize the
- * statement timeout long after initial receipt of the message.
- * In such cases the enforcement of the statement timeout will be
- * a bit inconsistent. This annoyance is judged not worth the cost
- * of performing an additional gettimeofday() here.
+ * statement timeout long after initial receipt of the message. In
+ * such cases the enforcement of the statement timeout will be a bit
+ * inconsistent. This annoyance is judged not worth the cost of
+ * performing an additional gettimeofday() here.
*/
Assert(!deadlock_timeout_active);
fin_time = GetCurrentStatementStartTimestamp();
@@ -1253,6 +1254,7 @@ CheckStatementTimeout(void)
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
+
/*
* It's possible that the difference is less than a microsecond;
* ensure we don't cancel, rather than set, the interrupt.
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index 8c4be2364f..c90545e105 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.46 2006/07/14 14:52:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.47 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,7 +119,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- ((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
+ ((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
@@ -280,7 +280,6 @@ tas_dummy() /* really means: extern int tas(slock_t
asm(" .data");
}
#endif /* sun3 */
-
#endif /* not __GNUC__ */
#endif /* HAVE_SPINLOCKS */
diff --git a/src/backend/storage/page/itemptr.c b/src/backend/storage/page/itemptr.c
index 08f2273ab9..d693ded4a2 100644
--- a/src/backend/storage/page/itemptr.c
+++ b/src/backend/storage/page/itemptr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.18 2006/08/25 04:06:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.19 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
*/
BlockNumber b1 = BlockIdGetBlockNumber(&(arg1->ip_blkid));
BlockNumber b2 = BlockIdGetBlockNumber(&(arg2->ip_blkid));
-
+
if (b1 < b2)
return -1;
else if (b1 > b2)
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 928b64410e..ca1c34fecf 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.121 2006/07/14 05:28:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.122 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -735,8 +735,8 @@ mdsync(void)
* If in bgwriter, absorb pending requests every so often to
* prevent overflow of the fsync request queue. The hashtable
* code does not specify whether entries added by this will be
- * visited by our search, but we don't really care: it's OK if
- * we do, and OK if we don't.
+ * visited by our search, but we don't really care: it's OK if we
+ * do, and OK if we don't.
*/
if (--absorb_counter <= 0)
{
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 28aaca716b..0ceb800b36 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.100 2006/07/14 14:52:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.101 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -471,10 +471,10 @@ smgr_internal_unlink(RelFileNode rnode, int which, bool isTemp, bool isRedo)
FreeSpaceMapForgetRel(&rnode);
/*
- * Tell the stats collector to forget it immediately, too. Skip this
- * in recovery mode, since the stats collector likely isn't running
- * (and if it is, pgstat.c will get confused because we aren't a real
- * backend process).
+ * Tell the stats collector to forget it immediately, too. Skip this in
+ * recovery mode, since the stats collector likely isn't running (and if
+ * it is, pgstat.c will get confused because we aren't a real backend
+ * process).
*/
if (!InRecovery)
pgstat_drop_relation(rnode.relNode);
@@ -960,16 +960,16 @@ smgr_desc(StringInfo buf, uint8 xl_info, char *rec)
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
appendStringInfo(buf, "file create: %u/%u/%u",
- xlrec->rnode.spcNode, xlrec->rnode.dbNode,
- xlrec->rnode.relNode);
+ xlrec->rnode.spcNode, xlrec->rnode.dbNode,
+ xlrec->rnode.relNode);
}
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
appendStringInfo(buf, "file truncate: %u/%u/%u to %u blocks",
- xlrec->rnode.spcNode, xlrec->rnode.dbNode,
- xlrec->rnode.relNode, xlrec->blkno);
+ xlrec->rnode.spcNode, xlrec->rnode.dbNode,
+ xlrec->rnode.relNode, xlrec->blkno);
}
else
appendStringInfo(buf, "UNKNOWN");
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 26931e0a6d..8a9d51a551 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.92 2006/09/08 15:55:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.93 2006/10/04 00:29:58 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -303,8 +303,8 @@ HandleFunctionRequest(StringInfo msgBuf)
"commands ignored until end of transaction block")));
/*
- * Now that we know we are in a valid transaction, set snapshot in
- * case needed by function itself or one of the datatype I/O routines.
+ * Now that we know we are in a valid transaction, set snapshot in case
+ * needed by function itself or one of the datatype I/O routines.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
@@ -312,7 +312,7 @@ HandleFunctionRequest(StringInfo msgBuf)
* Begin parsing the buffer contents.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
- (void) pq_getmsgstring(msgBuf); /* dummy string */
+ (void) pq_getmsgstring(msgBuf); /* dummy string */
fid = (Oid) pq_getmsgint(msgBuf, 4); /* function oid */
@@ -474,8 +474,8 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
@@ -607,8 +607,8 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 6e83a23293..835b5ab36e 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.509 2006/09/13 21:59:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.510 2006/10/04 00:29:58 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -1340,8 +1340,8 @@ exec_bind_message(StringInfo input_message)
numPFormats = pq_getmsgint(input_message, 2);
if (numPFormats > 0)
{
- int i;
-
+ int i;
+
pformats = (int16 *) palloc(numPFormats * sizeof(int16));
for (i = 0; i < numPFormats; i++)
pformats[i] = pq_getmsgint(input_message, 2);
@@ -1400,7 +1400,7 @@ exec_bind_message(StringInfo input_message)
/* sizeof(ParamListInfoData) includes the first array element */
params = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (numParams - 1) * sizeof(ParamExternData));
+ (numParams - 1) *sizeof(ParamExternData));
params->numParams = numParams;
paramno = 0;
@@ -1473,7 +1473,7 @@ exec_bind_message(StringInfo input_message)
if (pstring && pstring != pbuf.data)
pfree(pstring);
}
- else if (pformat == 1) /* binary mode */
+ else if (pformat == 1) /* binary mode */
{
Oid typreceive;
Oid typioparam;
@@ -1513,6 +1513,7 @@ exec_bind_message(StringInfo input_message)
params->params[paramno].value = pval;
params->params[paramno].isnull = isNull;
+
/*
* We mark the params as CONST. This has no effect if we already
* did planning, but if we didn't, it licenses the planner to
@@ -1534,7 +1535,7 @@ exec_bind_message(StringInfo input_message)
numRFormats = pq_getmsgint(input_message, 2);
if (numRFormats > 0)
{
- int i;
+ int i;
rformats = (int16 *) palloc(numRFormats * sizeof(int16));
for (i = 0; i < numRFormats; i++)
@@ -1548,14 +1549,14 @@ exec_bind_message(StringInfo input_message)
* to make use of the concrete parameter values we now have. Because we
* use PARAM_FLAG_CONST, the plan is good only for this set of param
* values, and so we generate the plan in the portal's own memory context
- * where it will be thrown away after use. As in exec_parse_message,
- * we make no attempt to recover planner temporary memory until the end
- * of the operation.
+ * where it will be thrown away after use. As in exec_parse_message, we
+ * make no attempt to recover planner temporary memory until the end of
+ * the operation.
*
- * XXX because the planner has a bad habit of scribbling on its input,
- * we have to make a copy of the parse trees, just in case someone binds
- * and executes an unnamed statement multiple times; this also means that
- * the portal's queryContext becomes its own heap context rather than the
+ * XXX because the planner has a bad habit of scribbling on its input, we
+ * have to make a copy of the parse trees, just in case someone binds and
+ * executes an unnamed statement multiple times; this also means that the
+ * portal's queryContext becomes its own heap context rather than the
* prepared statement's context. FIXME someday
*/
if (pstmt->plan_list == NIL && pstmt->query_list != NIL)
@@ -1694,9 +1695,9 @@ exec_execute_message(const char *portal_name, long max_rows)
execute_is_fetch = !portal->atStart;
/*
- * We must copy the sourceText and prepStmtName into MessageContext
- * in case the portal is destroyed during finish_xact_command.
- * Can avoid the copy if it's not an xact command, though.
+ * We must copy the sourceText and prepStmtName into MessageContext in
+ * case the portal is destroyed during finish_xact_command. Can avoid the
+ * copy if it's not an xact command, though.
*/
if (is_xact_command)
{
@@ -1705,6 +1706,7 @@ exec_execute_message(const char *portal_name, long max_rows)
prepStmtName = pstrdup(portal->prepStmtName);
else
prepStmtName = "<unnamed>";
+
/*
* An xact command shouldn't have any parameters, which is a good
* thing because they wouldn't be around after finish_xact_command.
@@ -1925,10 +1927,9 @@ check_log_duration(char *msec_str, bool was_logged)
msecs = usecs / 1000;
/*
- * This odd-looking test for log_min_duration_statement being
- * exceeded is designed to avoid integer overflow with very
- * long durations: don't compute secs * 1000 until we've
- * verified it will fit in int.
+ * This odd-looking test for log_min_duration_statement being exceeded
+ * is designed to avoid integer overflow with very long durations:
+ * don't compute secs * 1000 until we've verified it will fit in int.
*/
exceeded = (log_min_duration_statement == 0 ||
(log_min_duration_statement > 0 &&
@@ -2026,7 +2027,7 @@ errdetail_params(ParamListInfo params)
appendStringInfoCharMacro(&param_str, '\'');
for (p = pstring; *p; p++)
{
- if (*p == '\'') /* double single quotes */
+ if (*p == '\'') /* double single quotes */
appendStringInfoCharMacro(&param_str, *p);
appendStringInfoCharMacro(&param_str, *p);
}
@@ -2082,7 +2083,7 @@ exec_describe_statement_message(const char *stmt_name)
/*
* If we are in aborted transaction state, we can't safely create a result
- * tupledesc, because that needs catalog accesses. Hence, refuse to
+ * tupledesc, because that needs catalog accesses. Hence, refuse to
* Describe statements that return data. (We shouldn't just refuse all
* Describes, since that might break the ability of some clients to issue
* COMMIT or ROLLBACK commands, if they use code that blindly Describes
@@ -2154,7 +2155,7 @@ exec_describe_portal_message(const char *portal_name)
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
@@ -2581,29 +2582,29 @@ set_debug_options(int debug_flag, GucContext context, GucSource source)
bool
set_plan_disabling_options(const char *arg, GucContext context, GucSource source)
{
- char *tmp = NULL;
+ char *tmp = NULL;
switch (arg[0])
{
- case 's': /* seqscan */
+ case 's': /* seqscan */
tmp = "enable_seqscan";
break;
- case 'i': /* indexscan */
+ case 'i': /* indexscan */
tmp = "enable_indexscan";
break;
- case 'b': /* bitmapscan */
+ case 'b': /* bitmapscan */
tmp = "enable_bitmapscan";
break;
- case 't': /* tidscan */
+ case 't': /* tidscan */
tmp = "enable_tidscan";
break;
- case 'n': /* nestloop */
+ case 'n': /* nestloop */
tmp = "enable_nestloop";
break;
- case 'm': /* mergejoin */
+ case 'm': /* mergejoin */
tmp = "enable_mergejoin";
break;
- case 'h': /* hashjoin */
+ case 'h': /* hashjoin */
tmp = "enable_hashjoin";
break;
}
@@ -2623,13 +2624,13 @@ get_stats_option_name(const char *arg)
switch (arg[0])
{
case 'p':
- if (optarg[1] == 'a') /* "parser" */
+ if (optarg[1] == 'a') /* "parser" */
return "log_parser_stats";
- else if (optarg[1] == 'l') /* "planner" */
+ else if (optarg[1] == 'l') /* "planner" */
return "log_planner_stats";
break;
- case 'e': /* "executor" */
+ case 'e': /* "executor" */
return "log_executor_stats";
break;
}
@@ -2834,6 +2835,7 @@ PostgresMain(int argc, char *argv[], const char *username)
break;
case 's':
+
/*
* Since log options are SUSET, we need to postpone unless
* still in secure context
@@ -2850,19 +2852,20 @@ PostgresMain(int argc, char *argv[], const char *username)
break;
case 't':
- {
- const char *tmp = get_stats_option_name(optarg);
- if (tmp)
{
- if (ctx == PGC_BACKEND)
- PendingConfigOption(tmp, "true");
+ const char *tmp = get_stats_option_name(optarg);
+
+ if (tmp)
+ {
+ if (ctx == PGC_BACKEND)
+ PendingConfigOption(tmp, "true");
+ else
+ SetConfigOption(tmp, "true", ctx, gucsource);
+ }
else
- SetConfigOption(tmp, "true", ctx, gucsource);
+ errs++;
+ break;
}
- else
- errs++;
- break;
- }
case 'v':
if (secure)
@@ -2875,6 +2878,7 @@ PostgresMain(int argc, char *argv[], const char *username)
case 'y':
+
/*
* y - special flag passed if backend was forked by a
* postmaster.
@@ -3090,10 +3094,10 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * Create a per-backend PGPROC struct in shared memory, except in
- * the EXEC_BACKEND case where this was done in SubPostmasterMain.
- * We must do this before we can use LWLocks (and in the EXEC_BACKEND
- * case we already had to do some stuff with LWLocks).
+ * Create a per-backend PGPROC struct in shared memory, except in the
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifdef EXEC_BACKEND
if (!IsUnderPostmaster)
@@ -3154,8 +3158,8 @@ PostgresMain(int argc, char *argv[], const char *username)
on_proc_exit(log_disconnections, 0);
/*
- * process any libraries that should be preloaded at backend start
- * (this likewise can't be done until GUC settings are complete)
+ * process any libraries that should be preloaded at backend start (this
+ * likewise can't be done until GUC settings are complete)
*/
process_local_preload_libraries();
@@ -3290,7 +3294,7 @@ PostgresMain(int argc, char *argv[], const char *username)
PG_SETMASK(&UnBlockSig);
if (!ignore_till_sync)
- send_ready_for_query = true; /* initially, or after error */
+ send_ready_for_query = true; /* initially, or after error */
/*
* Non-error queries loop here.
@@ -3465,8 +3469,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* Note: we may at this point be inside an aborted
- * transaction. We can't throw error for that until
- * we've finished reading the function-call message, so
+ * transaction. We can't throw error for that until we've
+ * finished reading the function-call message, so
* HandleFunctionRequest() must check for it after doing so.
* Be careful not to do anything that assumes we're inside a
* valid transaction here.
@@ -3778,5 +3782,5 @@ log_disconnections(int code, Datum arg)
"user=%s database=%s host=%s%s%s",
hours, minutes, seconds, msecs,
port->user_name, port->database_name, port->remote_host,
- port->remote_port[0] ? " port=" : "", port->remote_port)));
+ port->remote_port[0] ? " port=" : "", port->remote_port)));
}
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 8738d91abd..62310f7c0c 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.110 2006/09/03 03:19:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.111 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -218,8 +218,8 @@ ChoosePortalStrategy(List *parseTrees)
/*
* PORTAL_ONE_SELECT and PORTAL_UTIL_SELECT need only consider the
- * single-Query-struct case, since there are no rewrite rules that
- * can add auxiliary queries to a SELECT or a utility command.
+ * single-Query-struct case, since there are no rewrite rules that can add
+ * auxiliary queries to a SELECT or a utility command.
*/
if (list_length(parseTrees) == 1)
{
@@ -244,8 +244,8 @@ ChoosePortalStrategy(List *parseTrees)
/*
* PORTAL_ONE_RETURNING has to allow auxiliary queries added by rewrite.
- * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query
- * and it has a RETURNING list.
+ * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query and
+ * it has a RETURNING list.
*/
nSetTag = 0;
foreach(lc, parseTrees)
@@ -256,9 +256,9 @@ ChoosePortalStrategy(List *parseTrees)
if (query->canSetTag)
{
if (++nSetTag > 1)
- return PORTAL_MULTI_QUERY; /* no need to look further */
+ return PORTAL_MULTI_QUERY; /* no need to look further */
if (query->returningList == NIL)
- return PORTAL_MULTI_QUERY; /* no need to look further */
+ return PORTAL_MULTI_QUERY; /* no need to look further */
}
}
if (nSetTag == 1)
@@ -418,7 +418,7 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
eflags = EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD;
else
- eflags = 0; /* default run-to-completion flags */
+ eflags = 0; /* default run-to-completion flags */
/*
* Call ExecutorStart to prepare the plan for execution
@@ -447,8 +447,8 @@ PortalStart(Portal portal, ParamListInfo params, Snapshot snapshot)
case PORTAL_ONE_RETURNING:
/*
- * We don't start the executor until we are told to run
- * the portal. We do need to set up the result tupdesc.
+ * We don't start the executor until we are told to run the
+ * portal. We do need to set up the result tupdesc.
*/
portal->tupDesc =
ExecCleanTypeFromTL((PortalGetPrimaryQuery(portal))->returningList, false);
@@ -672,8 +672,8 @@ PortalRun(Portal portal, long count,
case PORTAL_UTIL_SELECT:
/*
- * If we have not yet run the command, do so,
- * storing its results in the portal's tuplestore.
+ * If we have not yet run the command, do so, storing its
+ * results in the portal's tuplestore.
*/
if (!portal->holdStore)
FillPortalStore(portal);
@@ -922,10 +922,11 @@ FillPortalStore(Portal portal)
switch (portal->strategy)
{
case PORTAL_ONE_RETURNING:
+
/*
- * Run the portal to completion just as for the default MULTI_QUERY
- * case, but send the primary query's output to the tuplestore.
- * Auxiliary query outputs are discarded.
+ * Run the portal to completion just as for the default
+ * MULTI_QUERY case, but send the primary query's output to the
+ * tuplestore. Auxiliary query outputs are discarded.
*/
PortalRunMulti(portal, treceiver, None_Receiver, completionTag);
/* Override default completion tag with actual command result */
@@ -1253,8 +1254,8 @@ PortalRunFetch(Portal portal,
case PORTAL_UTIL_SELECT:
/*
- * If we have not yet run the command, do so,
- * storing its results in the portal's tuplestore.
+ * If we have not yet run the command, do so, storing its
+ * results in the portal's tuplestore.
*/
if (!portal->holdStore)
FillPortalStore(portal);
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 868da2d5ff..448b0fba02 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.268 2006/09/07 22:52:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.269 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -632,7 +632,7 @@ ProcessUtility(Node *parsetree,
case T_CopyStmt:
{
- uint64 processed = DoCopy((CopyStmt *) parsetree);
+ uint64 processed = DoCopy((CopyStmt *) parsetree);
if (completionTag)
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
@@ -798,10 +798,10 @@ ProcessUtility(Node *parsetree,
stmt->unique,
stmt->primary,
stmt->isconstraint,
- false, /* is_alter_table */
- true, /* check_rights */
- false, /* skip_build */
- false, /* quiet */
+ false, /* is_alter_table */
+ true, /* check_rights */
+ false, /* skip_build */
+ false, /* quiet */
stmt->concurrent); /* concurrent */
}
break;
@@ -1883,7 +1883,7 @@ GetCommandLogLevel(Node *parsetree)
case T_SelectStmt:
if (((SelectStmt *) parsetree)->into)
- lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
+ lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
else
lev = LOGSTMT_ALL;
break;
@@ -2053,7 +2053,7 @@ GetCommandLogLevel(Node *parsetree)
case T_ExplainStmt:
{
- ExplainStmt *stmt = (ExplainStmt *) parsetree;
+ ExplainStmt *stmt = (ExplainStmt *) parsetree;
/* Look through an EXPLAIN ANALYZE to the contained stmt */
if (stmt->analyze)
@@ -2128,7 +2128,7 @@ GetCommandLogLevel(Node *parsetree)
break;
case T_ReindexStmt:
- lev = LOGSTMT_ALL; /* should this be DDL? */
+ lev = LOGSTMT_ALL; /* should this be DDL? */
break;
case T_CreateConversionStmt:
@@ -2153,7 +2153,7 @@ GetCommandLogLevel(Node *parsetree)
case T_PrepareStmt:
{
- PrepareStmt *stmt = (PrepareStmt *) parsetree;
+ PrepareStmt *stmt = (PrepareStmt *) parsetree;
/* Look through a PREPARE to the contained stmt */
return GetCommandLogLevel((Node *) stmt->query);
@@ -2162,9 +2162,9 @@ GetCommandLogLevel(Node *parsetree)
case T_ExecuteStmt:
{
- ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
+ ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
PreparedStatement *pstmt;
- ListCell *l;
+ ListCell *l;
/* Look through an EXECUTE to the referenced stmt(s) */
lev = LOGSTMT_ALL;
@@ -2173,7 +2173,7 @@ GetCommandLogLevel(Node *parsetree)
{
foreach(l, pstmt->query_list)
{
- Query *query = (Query *) lfirst(l);
+ Query *query = (Query *) lfirst(l);
LogStmtLevel stmt_lev;
stmt_lev = GetQueryLogLevel(query);
@@ -2188,10 +2188,11 @@ GetCommandLogLevel(Node *parsetree)
break;
case T_Query:
+
/*
* In complicated situations (eg, EXPLAIN ANALYZE in an extended
- * Query protocol), we might find an already-analyzed query
- * within a utility statement. Cope.
+ * Query protocol), we might find an already-analyzed query within
+ * a utility statement. Cope.
*/
lev = GetQueryLogLevel((Query *) parsetree);
break;
@@ -2224,7 +2225,7 @@ GetQueryLogLevel(Query *parsetree)
{
case CMD_SELECT:
if (parsetree->into != NULL)
- lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
+ lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
else
lev = LOGSTMT_ALL;
break;
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index dd1f1649fa..a29d31bd6e 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.132 2006/09/29 21:22:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.133 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -2050,21 +2050,21 @@ array_set(ArrayType *array,
dim[0] += addedbefore;
lb[0] = indx[0];
if (addedbefore > 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
}
if (indx[0] >= (dim[0] + lb[0]))
{
addedafter = indx[0] - (dim[0] + lb[0]) + 1;
dim[0] += addedafter;
if (addedafter > 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
}
}
else
{
/*
- * XXX currently we do not support extending multi-dimensional
- * arrays during assignment
+ * XXX currently we do not support extending multi-dimensional arrays
+ * during assignment
*/
for (i = 0; i < ndim; i++)
{
@@ -2338,7 +2338,7 @@ array_set_slice(ArrayType *array,
if (lowerIndx[0] < lb[0])
{
if (upperIndx[0] < lb[0] - 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
addedbefore = lb[0] - lowerIndx[0];
dim[0] += addedbefore;
lb[0] = lowerIndx[0];
@@ -2346,7 +2346,7 @@ array_set_slice(ArrayType *array,
if (upperIndx[0] >= (dim[0] + lb[0]))
{
if (lowerIndx[0] > (dim[0] + lb[0]))
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1;
dim[0] += addedafter;
}
@@ -2354,15 +2354,15 @@ array_set_slice(ArrayType *array,
else
{
/*
- * XXX currently we do not support extending multi-dimensional
- * arrays during assignment
+ * XXX currently we do not support extending multi-dimensional arrays
+ * during assignment
*/
for (i = 0; i < nSubscripts; i++)
{
if (lowerIndx[i] > upperIndx[i])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
if (lowerIndx[i] < lb[i] ||
upperIndx[i] >= (dim[i] + lb[i]))
ereport(ERROR,
@@ -2377,7 +2377,7 @@ array_set_slice(ArrayType *array,
if (lowerIndx[i] > upperIndx[i])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
}
}
@@ -3417,8 +3417,8 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, bool matchall,
/*
* We arrange to look up the equality function only once per series of
* calls, assuming the element type doesn't change underneath us. The
- * typcache is used so that we have no memory leakage when being used
- * as an index support function.
+ * typcache is used so that we have no memory leakage when being used as
+ * an index support function.
*/
typentry = (TypeCacheEntry *) *fn_extra;
if (typentry == NULL ||
@@ -3429,8 +3429,8 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, bool matchall,
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
*fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -3486,9 +3486,9 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, bool matchall,
}
/*
- * We assume that the comparison operator is strict, so a NULL
- * can't match anything. XXX this diverges from the "NULL=NULL"
- * behavior of array_eq, should we act like that?
+ * We assume that the comparison operator is strict, so a NULL can't
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * array_eq, should we act like that?
*/
if (isnull1)
{
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 81f7e63650..fac642ddc9 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.171 2006/09/16 20:14:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.172 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ char *days[] = {"Sunday", "Monday", "Tuesday", "Wednesday",
*/
static datetkn *timezonetktbl = NULL;
-static int sztimezonetktbl = 0;
+static int sztimezonetktbl = 0;
static const datetkn datetktbl[] = {
/* text, token, lexval */
@@ -176,7 +176,7 @@ static const datetkn datetktbl[] = {
{YESTERDAY, RESERV, DTK_YESTERDAY}, /* yesterday midnight */
};
-static int szdatetktbl = sizeof datetktbl / sizeof datetktbl[0];
+static int szdatetktbl = sizeof datetktbl / sizeof datetktbl[0];
static datetkn deltatktbl[] = {
/* text, token, lexval */
@@ -246,7 +246,7 @@ static datetkn deltatktbl[] = {
{"yrs", UNITS, DTK_YEAR}, /* "years" relative */
};
-static int szdeltatktbl = sizeof deltatktbl / sizeof deltatktbl[0];
+static int szdeltatktbl = sizeof deltatktbl / sizeof deltatktbl[0];
static const datetkn *datecache[MAXDATEFIELDS] = {NULL};
@@ -562,12 +562,19 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
if (*cp == '/')
{
ftype[nf] = DTK_TZ;
- /* set the first character of the region to upper case
- * again*/
+
+ /*
+ * set the first character of the region to upper case
+ * again
+ */
field[nf][0] = pg_toupper((unsigned char) field[nf][0]);
- /* we have seen "Region/" of a POSIX timezone, continue to
- * read the City part */
- do {
+
+ /*
+ * we have seen "Region/" of a POSIX timezone, continue to
+ * read the City part
+ */
+ do
+ {
APPEND_CHAR(bufp, bufend, *cp++);
/* there is for example America/New_York */
} while (isalpha((unsigned char) *cp) || *cp == '_');
@@ -1303,13 +1310,14 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (zicTzFnum != -1)
{
- Datum tsTz;
- Timestamp timestamp;
+ Datum tsTz;
+ Timestamp timestamp;
+
tm2timestamp(tm, *fsec, NULL, &timestamp);
tsTz = DirectFunctionCall2(timestamp_zone,
- DirectFunctionCall1(textin,
- CStringGetDatum(field[zicTzFnum])),
- TimestampGetDatum(timestamp));
+ DirectFunctionCall1(textin,
+ CStringGetDatum(field[zicTzFnum])),
+ TimestampGetDatum(timestamp));
timestamp2tm(DatumGetTimestampTz(tsTz), tzp, tm, fsec, NULL, NULL);
fmask &= ~DTK_M(TZ);
}
@@ -2920,7 +2928,8 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm,
tm->tm_mday += val * 7;
if (fval != 0)
{
- int extra_days;
+ int extra_days;
+
fval *= 7;
extra_days = (int32) fval;
tm->tm_mday += extra_days;
@@ -2928,6 +2937,7 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm,
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
@@ -2945,7 +2955,8 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm,
tm->tm_mon += val;
if (fval != 0)
{
- int day;
+ int day;
+
fval *= DAYS_PER_MONTH;
day = fval;
tm->tm_mday += day;
@@ -2953,6 +2964,7 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm,
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
@@ -3808,7 +3820,7 @@ CheckDateTokenTables(void)
void
InstallTimeZoneAbbrevs(tzEntry *abbrevs, int n)
{
- datetkn *newtbl;
+ datetkn *newtbl;
int i;
/*
@@ -3844,29 +3856,28 @@ InstallTimeZoneAbbrevs(tzEntry *abbrevs, int n)
Datum
pg_timezone_abbrevs(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- int *pindex;
- Datum result;
- HeapTuple tuple;
- Datum values[3];
- bool nulls[3];
- char buffer[TOKMAXLEN + 1];
- unsigned char *p;
- struct pg_tm tm;
- Interval *resInterval;
+ FuncCallContext *funcctx;
+ int *pindex;
+ Datum result;
+ HeapTuple tuple;
+ Datum values[3];
+ bool nulls[3];
+ char buffer[TOKMAXLEN + 1];
+ unsigned char *p;
+ struct pg_tm tm;
+ Interval *resInterval;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
- MemoryContext oldcontext;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -3901,8 +3912,8 @@ pg_timezone_abbrevs(PG_FUNCTION_ARGS)
MemSet(nulls, 0, sizeof(nulls));
/*
- * Convert name to text, using upcasing conversion that is the inverse
- * of what ParseDateTime() uses.
+ * Convert name to text, using upcasing conversion that is the inverse of
+ * what ParseDateTime() uses.
*/
strncpy(buffer, timezonetktbl[*pindex].token, TOKMAXLEN);
buffer[TOKMAXLEN] = '\0'; /* may not be null-terminated */
@@ -3936,32 +3947,31 @@ pg_timezone_abbrevs(PG_FUNCTION_ARGS)
Datum
pg_timezone_names(PG_FUNCTION_ARGS)
{
- MemoryContext oldcontext;
- FuncCallContext *funcctx;
- pg_tzenum *tzenum;
- pg_tz *tz;
- Datum result;
- HeapTuple tuple;
- Datum values[4];
- bool nulls[4];
+ MemoryContext oldcontext;
+ FuncCallContext *funcctx;
+ pg_tzenum *tzenum;
+ pg_tz *tz;
+ Datum result;
+ HeapTuple tuple;
+ Datum values[4];
+ bool nulls[4];
int tzoff;
- struct pg_tm tm;
+ struct pg_tm tm;
fsec_t fsec;
char *tzn;
- Interval *resInterval;
- struct pg_tm itm;
+ Interval *resInterval;
+ struct pg_tm itm;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
+ TupleDesc tupdesc;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -4021,7 +4031,7 @@ pg_timezone_names(PG_FUNCTION_ARGS)
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(pg_get_timezone_name(tz)));
+ CStringGetDatum(pg_get_timezone_name(tz)));
values[1] = DirectFunctionCall1(textin,
CStringGetDatum(tzn ? tzn : ""));
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 2126c6b87b..590c561589 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -25,7 +25,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.3 2006/08/04 21:33:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.4 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,8 +146,8 @@ domain_check_input(Datum value, bool isnull, DomainIOData *my_extra)
/*
* Set up value to be returned by CoerceToDomainValue
* nodes. Unlike ExecEvalCoerceToDomain, this econtext
- * couldn't be shared with anything else, so no need
- * to save and restore fields.
+ * couldn't be shared with anything else, so no need to
+ * save and restore fields.
*/
econtext->domainValue_datum = value;
econtext->domainValue_isNull = isnull;
@@ -174,8 +174,8 @@ domain_check_input(Datum value, bool isnull, DomainIOData *my_extra)
/*
* Before exiting, call any shutdown callbacks and reset econtext's
- * per-tuple memory. This avoids leaking non-memory resources,
- * if anything in the expression(s) has any.
+ * per-tuple memory. This avoids leaking non-memory resources, if
+ * anything in the expression(s) has any.
*/
if (econtext)
ReScanExprContext(econtext);
@@ -194,9 +194,9 @@ domain_in(PG_FUNCTION_ARGS)
Datum value;
/*
- * Since domain_in is not strict, we have to check for null inputs.
- * The typioparam argument should never be null in normal system usage,
- * but it could be null in a manual invocation --- if so, just return null.
+ * Since domain_in is not strict, we have to check for null inputs. The
+ * typioparam argument should never be null in normal system usage, but it
+ * could be null in a manual invocation --- if so, just return null.
*/
if (PG_ARGISNULL(0))
string = NULL;
@@ -207,8 +207,8 @@ domain_in(PG_FUNCTION_ARGS)
domainType = PG_GETARG_OID(1);
/*
- * We arrange to look up the needed info just once per series of
- * calls, assuming the domain type doesn't change underneath us.
+ * We arrange to look up the needed info just once per series of calls,
+ * assuming the domain type doesn't change underneath us.
*/
my_extra = (DomainIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -254,9 +254,9 @@ domain_recv(PG_FUNCTION_ARGS)
Datum value;
/*
- * Since domain_recv is not strict, we have to check for null inputs.
- * The typioparam argument should never be null in normal system usage,
- * but it could be null in a manual invocation --- if so, just return null.
+ * Since domain_recv is not strict, we have to check for null inputs. The
+ * typioparam argument should never be null in normal system usage, but it
+ * could be null in a manual invocation --- if so, just return null.
*/
if (PG_ARGISNULL(0))
buf = NULL;
@@ -267,8 +267,8 @@ domain_recv(PG_FUNCTION_ARGS)
domainType = PG_GETARG_OID(1);
/*
- * We arrange to look up the needed info just once per series of
- * calls, assuming the domain type doesn't change underneath us.
+ * We arrange to look up the needed info just once per series of calls,
+ * assuming the domain type doesn't change underneath us.
*/
my_extra = (DomainIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 1f8d081c8b..c4fbe89455 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.128 2006/07/28 18:33:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.129 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,11 +78,12 @@
#define HAVE_FINITE 1
#endif
-/* Visual C++ etc lacks NAN, and won't accept 0.0/0.0. NAN definition from
+/* Visual C++ etc lacks NAN, and won't accept 0.0/0.0. NAN definition from
* http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrfNotNumberNANItems.asp
*/
#if defined(WIN32) && !defined(NAN)
static const uint32 nan[2] = {0xffffffff, 0x7fffffff};
+
#define NAN (*(const double *) nan)
#endif
@@ -2137,7 +2138,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
@@ -2150,7 +2151,12 @@ float8_regr_accum(PG_FUNCTION_ARGS)
float8 newvalY = PG_GETARG_FLOAT8(1);
float8 newvalX = PG_GETARG_FLOAT8(2);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY;
transvalues = check_float8_array(transarray, "float8_regr_accum", 6);
N = transvalues[0];
@@ -2265,7 +2271,11 @@ float8_regr_sxy(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_regr_sxy", 6);
N = transvalues[0];
@@ -2327,7 +2337,11 @@ float8_covar_pop(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_covar_pop", 6);
N = transvalues[0];
@@ -2349,7 +2363,11 @@ float8_covar_samp(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_covar_samp", 6);
N = transvalues[0];
@@ -2371,8 +2389,15 @@ float8_corr(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY, numeratorX,
- numeratorY, numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY,
+ numeratorX,
+ numeratorY,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_corr", 6);
N = transvalues[0];
@@ -2391,7 +2416,7 @@ float8_corr(PG_FUNCTION_ARGS)
numeratorXY = N * sumXY - sumX * sumY;
if (numeratorX <= 0 || numeratorY <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(sqrt((numeratorXY * numeratorXY) /
(numeratorX * numeratorY)));
}
@@ -2401,8 +2426,15 @@ float8_regr_r2(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY, numeratorX,
- numeratorY, numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY,
+ numeratorX,
+ numeratorY,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_regr_r2", 6);
N = transvalues[0];
@@ -2434,8 +2466,13 @@ float8_regr_slope(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumXY, numeratorX,
- numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumXY,
+ numeratorX,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_regr_slope", 6);
N = transvalues[0];
@@ -2452,7 +2489,7 @@ float8_regr_slope(PG_FUNCTION_ARGS)
numeratorXY = N * sumXY - sumX * sumY;
if (numeratorX <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(numeratorXY / numeratorX);
}
@@ -2461,8 +2498,13 @@ float8_regr_intercept(PG_FUNCTION_ARGS)
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumXY, numeratorX,
- numeratorXXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumXY,
+ numeratorX,
+ numeratorXXY;
transvalues = check_float8_array(transarray, "float8_regr_intercept", 6);
N = transvalues[0];
@@ -2479,7 +2521,7 @@ float8_regr_intercept(PG_FUNCTION_ARGS)
numeratorXXY = sumY * sumX2 - sumX * sumXY;
if (numeratorX <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(numeratorXXY / numeratorX);
}
@@ -2744,13 +2786,13 @@ cbrt(double x)
double tmpres = pow(absx, (double) 1.0 / (double) 3.0);
/*
- * The result is somewhat inaccurate --- not really pow()'s fault,
- * as the exponent it's handed contains roundoff error. We can improve
- * the accuracy by doing one iteration of Newton's formula. Beware of
- * zero input however.
+ * The result is somewhat inaccurate --- not really pow()'s fault, as the
+ * exponent it's handed contains roundoff error. We can improve the
+ * accuracy by doing one iteration of Newton's formula. Beware of zero
+ * input however.
*/
if (tmpres > 0.0)
- tmpres -= (tmpres - absx/(tmpres*tmpres)) / (double) 3.0;
+ tmpres -= (tmpres - absx / (tmpres * tmpres)) / (double) 3.0;
return isneg ? -tmpres : tmpres;
}
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 53cf15319d..d97c1bf405 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.112 2006/09/10 22:54:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.113 2006/10/04 00:29:59 momjian Exp $
*
*
* Portions Copyright (c) 1999-2006, PostgreSQL Global Development Group
@@ -83,7 +83,7 @@
#include "utils/numeric.h"
#include "utils/pg_locale.h"
-#define _(x) gettext((x))
+#define _(x) gettext((x))
/* ----------
* Routines type
@@ -1708,7 +1708,7 @@ strdigits_len(char *str)
len = strspace_len(str);
p += len;
-
+
while (*p && isdigit((unsigned char) *p) && len <= DCH_MAX_ITEM_SIZ)
{
len++;
@@ -1828,7 +1828,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
tm->tm_hour % (HOURS_PER_DAY / 2) == 0 ? 12 :
- tm->tm_hour % (HOURS_PER_DAY / 2));
+ tm->tm_hour % (HOURS_PER_DAY / 2));
if (S_THth(suf))
str_numth(p_inout, inout, 0);
return strlen(p_inout);
@@ -2084,8 +2084,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
tmfc = (TmFromChar *) data;
/*
- * In the FROM-char there is no difference between "January" or "JANUARY" or
- * "january", all is before search convert to "first-upper". This
+ * In the FROM-char there is no difference between "January" or "JANUARY"
+ * or "january", all is before search convert to "first-upper". This
* convention is used for MONTH, MON, DAY, DY
*/
if (!is_to_char)
@@ -2863,7 +2863,7 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
static char *
localize_month_full(int index)
{
- char *m = NULL;
+ char *m = NULL;
switch (index)
{
@@ -2911,7 +2911,7 @@ localize_month_full(int index)
static char *
localize_month(int index)
{
- char *m = NULL;
+ char *m = NULL;
switch (index)
{
@@ -2959,7 +2959,7 @@ localize_month(int index)
static char *
localize_day_full(int index)
{
- char *d = NULL;
+ char *d = NULL;
switch (index)
{
@@ -2992,7 +2992,7 @@ localize_day_full(int index)
static char *
localize_day(int index)
{
- char *d = NULL;
+ char *d = NULL;
switch (index)
{
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 0a93354697..3970bda83d 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.74 2006/06/12 16:28:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.75 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -209,10 +209,10 @@ int2vectorrecv(PG_FUNCTION_ARGS)
int2vector *result;
/*
- * Normally one would call array_recv() using DirectFunctionCall3,
- * but that does not work since array_recv wants to cache some data
- * using fcinfo->flinfo->fn_extra. So we need to pass it our own
- * flinfo parameter.
+ * Normally one would call array_recv() using DirectFunctionCall3, but
+ * that does not work since array_recv wants to cache some data using
+ * fcinfo->flinfo->fn_extra. So we need to pass it our own flinfo
+ * parameter.
*/
InitFunctionCallInfoData(locfcinfo, fcinfo->flinfo, 3, NULL, NULL);
@@ -736,9 +736,10 @@ int4mul(PG_FUNCTION_ARGS)
int32 result;
#ifdef WIN32
+
/*
- * Win32 doesn't throw a catchable exception for
- * SELECT -2147483648 * (-1); -- INT_MIN
+ * Win32 doesn't throw a catchable exception for SELECT -2147483648 *
+ * (-1); -- INT_MIN
*/
if (arg2 == -1 && arg1 == INT_MIN)
ereport(ERROR,
@@ -782,9 +783,10 @@ int4div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
#ifdef WIN32
+
/*
- * Win32 doesn't throw a catchable exception for
- * SELECT -2147483648 / (-1); -- INT_MIN
+ * Win32 doesn't throw a catchable exception for SELECT -2147483648 /
+ * (-1); -- INT_MIN
*/
if (arg2 == -1 && arg1 == INT_MIN)
ereport(ERROR,
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index 00432994c5..903d415327 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.61 2006/07/28 18:33:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.62 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -696,7 +696,7 @@ int8inc(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index e2026218bf..4223bffb18 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.65 2006/09/04 18:32:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -66,12 +66,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*
* Note that MBMatchText and MBMatchTextIC do exactly the same thing now.
* Is it worth refactoring to avoid duplicated code? They might become
@@ -286,12 +286,12 @@ nameiclike(PG_FUNCTION_ARGS)
else
{
/* Force inputs to lower case to achieve case insensitivity */
- text *strtext;
+ text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
NameGetDatum(str)));
strtext = DatumGetTextP(DirectFunctionCall1(lower,
- PointerGetDatum(strtext)));
+ PointerGetDatum(strtext)));
pat = DatumGetTextP(DirectFunctionCall1(lower,
PointerGetDatum(pat)));
@@ -327,12 +327,12 @@ nameicnlike(PG_FUNCTION_ARGS)
else
{
/* Force inputs to lower case to achieve case insensitivity */
- text *strtext;
+ text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
NameGetDatum(str)));
strtext = DatumGetTextP(DirectFunctionCall1(lower,
- PointerGetDatum(strtext)));
+ PointerGetDatum(strtext)));
pat = DatumGetTextP(DirectFunctionCall1(lower,
PointerGetDatum(pat)));
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index a9d8b8b88f..8e0a7bcf98 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.26 2006/09/22 23:20:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.27 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -258,8 +258,8 @@ pg_lock_status(PG_FUNCTION_ARGS)
else
nulls[10] = 'n';
values[11] = DirectFunctionCall1(textin,
- CStringGetDatum(GetLockmodeName(LOCK_LOCKMETHOD(*lock),
- mode)));
+ CStringGetDatum(GetLockmodeName(LOCK_LOCKMETHOD(*lock),
+ mode)));
values[12] = BoolGetDatum(granted);
tuple = heap_formtuple(funcctx->tuple_desc, values, nulls);
@@ -361,7 +361,7 @@ pg_try_advisory_lock_shared_int8(PG_FUNCTION_ARGS)
}
/*
- * pg_advisory_unlock(int8) - release exclusive lock on an int8 key
+ * pg_advisory_unlock(int8) - release exclusive lock on an int8 key
*
* Returns true if successful, false if lock was not held
*/
@@ -473,7 +473,7 @@ pg_try_advisory_lock_shared_int4(PG_FUNCTION_ARGS)
}
/*
- * pg_advisory_unlock(int4, int4) - release exclusive lock on 2 int4 keys
+ * pg_advisory_unlock(int4, int4) - release exclusive lock on 2 int4 keys
*
* Returns true if successful, false if lock was not held
*/
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 22c8af482c..014d2c8bf6 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.53 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.54 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -272,16 +272,16 @@ pg_sleep(PG_FUNCTION_ARGS)
float8 endtime;
/*
- * We break the requested sleep into segments of no more than 1 second,
- * to put an upper bound on how long it will take us to respond to a
- * cancel or die interrupt. (Note that pg_usleep is interruptible by
- * signals on some platforms but not others.) Also, this method avoids
- * exposing pg_usleep's upper bound on allowed delays.
+ * We break the requested sleep into segments of no more than 1 second, to
+ * put an upper bound on how long it will take us to respond to a cancel
+ * or die interrupt. (Note that pg_usleep is interruptible by signals on
+ * some platforms but not others.) Also, this method avoids exposing
+ * pg_usleep's upper bound on allowed delays.
*
- * By computing the intended stop time initially, we avoid accumulation
- * of extra delay across multiple sleeps. This also ensures we won't
- * delay less than the specified time if pg_usleep is interrupted
- * by other signals such as SIGHUP.
+ * By computing the intended stop time initially, we avoid accumulation of
+ * extra delay across multiple sleeps. This also ensures we won't delay
+ * less than the specified time if pg_usleep is interrupted by other
+ * signals such as SIGHUP.
*/
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 2624c203f7..bd7406bc2b 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.65 2006/02/11 20:39:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -181,7 +181,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -201,14 +201,14 @@ network_recv(StringInfo buf, bool is_cidr)
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid address family in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid bits in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
ip_bits(addr) = bits;
@@ -217,7 +217,7 @@ network_recv(StringInfo buf, bool is_cidr)
if (nb != ip_addrsize(addr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid length in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
VARATT_SIZEP(addr) = VARHDRSZ +
@@ -1262,9 +1262,9 @@ inetnot(PG_FUNCTION_ARGS)
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = ~pip[nb];
@@ -1295,10 +1295,10 @@ inetand(PG_FUNCTION_ARGS)
errmsg("cannot AND inet values of different sizes")));
else
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = pip[nb] & pip2[nb];
@@ -1329,10 +1329,10 @@ inetor(PG_FUNCTION_ARGS)
errmsg("cannot OR inet values of different sizes")));
else
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = pip[nb] | pip2[nb];
@@ -1356,32 +1356,34 @@ internal_inetpl(inet *ip, int64 addend)
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pdst = ip_addr(dst);
- int carry = 0;
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pdst = ip_addr(dst);
+ int carry = 0;
while (nb-- > 0)
{
carry = pip[nb] + (int) (addend & 0xFF) + carry;
pdst[nb] = (unsigned char) (carry & 0xFF);
carry >>= 8;
+
/*
* We have to be careful about right-shifting addend because
- * right-shift isn't portable for negative values, while
- * simply dividing by 256 doesn't work (the standard rounding
- * is in the wrong direction, besides which there may be machines
- * out there that round the wrong way). So, explicitly clear
- * the low-order byte to remove any doubt about the correct
- * result of the division, and then divide rather than shift.
+ * right-shift isn't portable for negative values, while simply
+ * dividing by 256 doesn't work (the standard rounding is in the
+ * wrong direction, besides which there may be machines out there
+ * that round the wrong way). So, explicitly clear the low-order
+ * byte to remove any doubt about the correct result of the
+ * division, and then divide rather than shift.
*/
addend &= ~((int64) 0xFF);
addend /= 0x100;
}
+
/*
- * At this point we should have addend and carry both zero if
- * original addend was >= 0, or addend -1 and carry 1 if original
- * addend was < 0. Anything else means overflow.
+ * At this point we should have addend and carry both zero if original
+ * addend was >= 0, or addend -1 and carry 1 if original addend was <
+ * 0. Anything else means overflow.
*/
if (!((addend == 0 && carry == 0) ||
(addend == -1 && carry == 1)))
@@ -1403,8 +1405,8 @@ internal_inetpl(inet *ip, int64 addend)
Datum
inetpl(PG_FUNCTION_ARGS)
{
- inet *ip = PG_GETARG_INET_P(0);
- int64 addend = PG_GETARG_INT64(1);
+ inet *ip = PG_GETARG_INET_P(0);
+ int64 addend = PG_GETARG_INT64(1);
PG_RETURN_INET_P(internal_inetpl(ip, addend));
}
@@ -1413,8 +1415,8 @@ inetpl(PG_FUNCTION_ARGS)
Datum
inetmi_int8(PG_FUNCTION_ARGS)
{
- inet *ip = PG_GETARG_INET_P(0);
- int64 addend = PG_GETARG_INT64(1);
+ inet *ip = PG_GETARG_INET_P(0);
+ int64 addend = PG_GETARG_INT64(1);
PG_RETURN_INET_P(internal_inetpl(ip, -addend));
}
@@ -1434,20 +1436,20 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * We form the difference using the traditional complement,
- * increment, and add rule, with the increment part being handled
- * by starting the carry off at 1. If you don't think integer
- * arithmetic is done in two's complement, too bad.
+ * We form the difference using the traditional complement, increment,
+ * and add rule, with the increment part being handled by starting the
+ * carry off at 1. If you don't think integer arithmetic is done in
+ * two's complement, too bad.
*/
- int nb = ip_addrsize(ip);
- int byte = 0;
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- int carry = 1;
+ int nb = ip_addrsize(ip);
+ int byte = 0;
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ int carry = 1;
while (nb-- > 0)
{
- int lobyte;
+ int lobyte;
carry = pip[nb] + (~pip2[nb] & 0xFF) + carry;
lobyte = carry & 0xFF;
@@ -1458,9 +1460,9 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes
- * to the left of what will fit should be 0 or 0xFF,
- * depending on sign of the now-complete result.
+ * Input wider than int64: check for overflow. All bytes to
+ * the left of what will fit should be 0 or 0xFF, depending on
+ * sign of the now-complete result.
*/
if ((res < 0) ? (lobyte != 0xFF) : (lobyte != 0))
ereport(ERROR,
@@ -1472,8 +1474,8 @@ inetmi(PG_FUNCTION_ARGS)
}
/*
- * If input is narrower than int64, overflow is not possible, but
- * we have to do proper sign extension.
+ * If input is narrower than int64, overflow is not possible, but we
+ * have to do proper sign extension.
*/
if (carry == 0 && byte < sizeof(int64))
res |= ((int64) -1) << (byte * 8);
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 6d77c49c12..35b0221b85 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.95 2006/10/03 21:25:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.96 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -253,10 +253,10 @@ static double numericvar_to_double_no_overflow(NumericVar *var);
static int cmp_numerics(Numeric num1, Numeric num2);
static int cmp_var(NumericVar *var1, NumericVar *var2);
-static int cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
- int var1weight, int var1sign,
- const NumericDigit *var2digits, int var2ndigits,
- int var2weight, int var2sign);
+static int cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
+ int var1weight, int var1sign,
+ const NumericDigit *var2digits, int var2ndigits,
+ int var2weight, int var2sign);
static void add_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -278,10 +278,10 @@ static void power_var_int(NumericVar *base, int exp, NumericVar *result,
int rscale);
static int cmp_abs(NumericVar *var1, NumericVar *var2);
-static int cmp_abs_common(const NumericDigit *var1digits, int var1ndigits,
- int var1weight,
- const NumericDigit *var2digits, int var2ndigits,
- int var2weight);
+static int cmp_abs_common(const NumericDigit *var1digits, int var1ndigits,
+ int var1weight,
+ const NumericDigit *var2digits, int var2ndigits,
+ int var2weight);
static void add_abs(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void sub_abs(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void round_var(NumericVar *var, int rscale);
@@ -2228,9 +2228,8 @@ numeric_stddev_internal(ArrayType *transarray,
set_var_from_num(N, &vN);
/*
- * Sample stddev and variance are undefined when N <= 1;
- * population stddev is undefined when N == 0. Return NULL in
- * either case.
+ * Sample stddev and variance are undefined when N <= 1; population stddev
+ * is undefined when N == 0. Return NULL in either case.
*/
if (sample)
comp = &const_one;
@@ -2257,7 +2256,7 @@ numeric_stddev_internal(ArrayType *transarray,
mul_var(&vsumX, &vsumX, &vsumX, rscale); /* vsumX = sumX * sumX */
mul_var(&vN, &vsumX2, &vsumX2, rscale); /* vsumX2 = N * sumX2 */
- sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
+ sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
if (cmp_var(&vsumX2, &const_zero) <= 0)
{
@@ -2266,11 +2265,11 @@ numeric_stddev_internal(ArrayType *transarray,
}
else
{
- mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
+ mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
if (!variance)
- sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
+ sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
res = make_result(&vsumX);
}
@@ -2286,8 +2285,8 @@ numeric_stddev_internal(ArrayType *transarray,
Datum
numeric_var_samp(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
true, true, &is_null);
@@ -2301,8 +2300,8 @@ numeric_var_samp(PG_FUNCTION_ARGS)
Datum
numeric_stddev_samp(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
false, true, &is_null);
@@ -2316,8 +2315,8 @@ numeric_stddev_samp(PG_FUNCTION_ARGS)
Datum
numeric_var_pop(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
true, false, &is_null);
@@ -2331,8 +2330,8 @@ numeric_var_pop(PG_FUNCTION_ARGS)
Datum
numeric_stddev_pop(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
false, false, &is_null);
@@ -3219,7 +3218,7 @@ apply_typmod(NumericVar *var, int32 typmod)
errmsg("numeric field overflow"),
errdetail("A field with precision %d, scale %d must round to an absolute value less than %s%d.",
precision, scale,
- /* Display 10^0 as 1 */
+ /* Display 10^0 as 1 */
maxdigits ? "10^" : "",
maxdigits ? maxdigits : 1
)));
@@ -3415,8 +3414,8 @@ cmp_var(NumericVar *var1, NumericVar *var2)
/*
* cmp_var_common() -
*
- * Main routine of cmp_var(). This function can be used by both
- * NumericVar and Numeric.
+ * Main routine of cmp_var(). This function can be used by both
+ * NumericVar and Numeric.
*/
static int
cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
@@ -4853,13 +4852,13 @@ cmp_abs(NumericVar *var1, NumericVar *var2)
/* ----------
* cmp_abs_common() -
*
- * Main routine of cmp_abs(). This function can be used by both
- * NumericVar and Numeric.
+ * Main routine of cmp_abs(). This function can be used by both
+ * NumericVar and Numeric.
* ----------
*/
static int
cmp_abs_common(const NumericDigit *var1digits, int var1ndigits, int var1weight,
- const NumericDigit *var2digits, int var2ndigits, int var2weight)
+ const NumericDigit *var2digits, int var2ndigits, int var2weight)
{
int i1 = 0;
int i2 = 0;
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 8ba769fafa..20142cb771 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.68 2006/03/05 15:58:43 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.69 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -258,10 +258,10 @@ oidvectorrecv(PG_FUNCTION_ARGS)
oidvector *result;
/*
- * Normally one would call array_recv() using DirectFunctionCall3,
- * but that does not work since array_recv wants to cache some data
- * using fcinfo->flinfo->fn_extra. So we need to pass it our own
- * flinfo parameter.
+ * Normally one would call array_recv() using DirectFunctionCall3, but
+ * that does not work since array_recv wants to cache some data using
+ * fcinfo->flinfo->fn_extra. So we need to pass it our own flinfo
+ * parameter.
*/
InitFunctionCallInfoData(locfcinfo, fcinfo->flinfo, 3, NULL, NULL);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index aa08c7bc8d..da81d79f39 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.36 2006/06/03 17:36:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.37 2006/10/04 00:29:59 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -70,6 +70,7 @@ static bool CurrentLocaleConvValid = false;
static char lc_collate_envbuf[LC_ENV_BUFSIZE];
static char lc_ctype_envbuf[LC_ENV_BUFSIZE];
+
#ifdef LC_MESSAGES
static char lc_messages_envbuf[LC_ENV_BUFSIZE];
#endif
@@ -93,18 +94,19 @@ static char lc_time_envbuf[LC_ENV_BUFSIZE];
char *
pg_perm_setlocale(int category, const char *locale)
{
- char *result;
+ char *result;
const char *envvar;
- char *envbuf;
+ char *envbuf;
#ifndef WIN32
result = setlocale(category, locale);
#else
+
/*
- * On Windows, setlocale(LC_MESSAGES) does not work, so just assume
- * that the given value is good and set it in the environment variables.
- * We must ignore attempts to set to "", which means "keep using the
- * old environment value".
+ * On Windows, setlocale(LC_MESSAGES) does not work, so just assume that
+ * the given value is good and set it in the environment variables. We
+ * must ignore attempts to set to "", which means "keep using the old
+ * environment value".
*/
#ifdef LC_MESSAGES
if (category == LC_MESSAGES)
@@ -116,7 +118,7 @@ pg_perm_setlocale(int category, const char *locale)
else
#endif
result = setlocale(category, locale);
-#endif /* WIN32 */
+#endif /* WIN32 */
if (result == NULL)
return result; /* fall out immediately on failure */
@@ -156,12 +158,13 @@ pg_perm_setlocale(int category, const char *locale)
break;
}
- snprintf(envbuf, LC_ENV_BUFSIZE-1, "%s=%s", envvar, result);
+ snprintf(envbuf, LC_ENV_BUFSIZE - 1, "%s=%s", envvar, result);
#ifndef WIN32
if (putenv(envbuf))
return NULL;
#else
+
/*
* On Windows, we need to modify both the process environment and the
* cached version in msvcrt
@@ -251,8 +254,8 @@ locale_messages_assign(const char *value, bool doit, GucSource source)
/*
* LC_MESSAGES category does not exist everywhere, but accept it anyway
*
- * On Windows, we can't even check the value, so the non-doit case
- * is a no-op
+ * On Windows, we can't even check the value, so the non-doit case is a
+ * no-op
*/
#ifdef LC_MESSAGES
if (doit)
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index d14a4c8e2f..e2d302b5a6 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.33 2006/08/19 01:36:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.34 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -186,7 +186,7 @@ Datum
pg_stat_get_last_vacuum_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
@@ -204,7 +204,7 @@ Datum
pg_stat_get_last_autovacuum_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
@@ -222,7 +222,7 @@ Datum
pg_stat_get_last_analyze_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
@@ -240,7 +240,7 @@ Datum
pg_stat_get_last_autoanalyze_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index a7d11eaa6d..d249772064 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.65 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -554,7 +554,7 @@ similar_escape(PG_FUNCTION_ARGS)
* which is bizarre enough to require some explanation. "***:" is a
* director prefix to force the regex to be treated as an ARE regardless
* of the current regex_flavor setting. We need "^" and "$" to force
- * the pattern to match the entire input string as per SQL99 spec. The
+ * the pattern to match the entire input string as per SQL99 spec. The
* "(?:" and ")" are a non-capturing set of parens; we have to have
* parens in case the string contains "|", else the "^" and "$" will
* be bound into the first and last alternatives which is not what we
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index b58b99d2c8..747ef66bf7 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.88 2006/08/27 21:41:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.89 2006/10/04 00:29:59 momjian Exp $
*
* ----------
*/
@@ -214,17 +214,17 @@ RI_FKey_check(PG_FUNCTION_ARGS)
/*
* We should not even consider checking the row if it is no longer valid,
* since it was either deleted (so the deferred check should be skipped)
- * or updated (in which case only the latest version of the row should
- * be checked). Test its liveness with HeapTupleSatisfiesItself.
+ * or updated (in which case only the latest version of the row should be
+ * checked). Test its liveness with HeapTupleSatisfiesItself.
*
* NOTE: The normal coding rule is that one must acquire the buffer
- * content lock to call HeapTupleSatisfiesFOO. We can skip that here
+ * content lock to call HeapTupleSatisfiesFOO. We can skip that here
* because we know that AfterTriggerExecute just fetched the tuple
- * successfully, so there cannot be a VACUUM compaction in progress
- * on the page (either heap_fetch would have waited for the VACUUM,
- * or the VACUUM's LockBufferForCleanup would be waiting for us to drop
- * pin). And since this is a row inserted by our open transaction,
- * no one else can be entitled to change its xmin/xmax.
+ * successfully, so there cannot be a VACUUM compaction in progress on the
+ * page (either heap_fetch would have waited for the VACUUM, or the
+ * VACUUM's LockBufferForCleanup would be waiting for us to drop pin).
+ * And since this is a row inserted by our open transaction, no one else
+ * can be entitled to change its xmin/xmax.
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesItself(new_row->t_data, new_row_buf))
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 821cc99e9f..f5b42524be 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -2,7 +2,7 @@
* ruleutils.c - Functions to convert stored expressions/querytrees
* back to source text
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.233 2006/10/01 17:23:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.234 2006/10/04 00:29:59 momjian Exp $
**********************************************************************/
#include "postgres.h"
@@ -141,7 +141,7 @@ static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context,
TupleDesc resultDesc);
static void get_target_list(List *targetList, deparse_context *context,
- TupleDesc resultDesc);
+ TupleDesc resultDesc);
static void get_setop_query(Node *setOp, Query *query,
deparse_context *context,
TupleDesc resultDesc);
@@ -176,7 +176,7 @@ static void get_from_clause_coldeflist(List *names, List *types, List *typmods,
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context,
- bool printit);
+ bool printit);
static void printSubscripts(ArrayRef *aref, deparse_context *context);
static char *generate_relation_name(Oid relid);
static char *generate_function_name(Oid funcid, int nargs, Oid *argtypes);
@@ -530,10 +530,11 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
{
if (i > 0)
appendStringInfo(&buf, ", ");
+
/*
* We form the string literal according to the prevailing setting
- * of standard_conforming_strings; we never use E''.
- * User is responsible for making sure result is used correctly.
+ * of standard_conforming_strings; we never use E''. User is
+ * responsible for making sure result is used correctly.
*/
appendStringInfoChar(&buf, '\'');
while (*p)
@@ -1017,7 +1018,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
if (fullCommand && OidIsValid(conForm->conrelid))
{
- char *options = flatten_reloptions(conForm->conrelid);
+ char *options = flatten_reloptions(conForm->conrelid);
if (options)
{
@@ -1467,9 +1468,9 @@ deparse_context_for_subplan(const char *name, Node *subplan)
RangeTblEntry *rte = makeNode(RangeTblEntry);
/*
- * We create an RTE_SPECIAL RangeTblEntry, and store the subplan in
- * its funcexpr field. RTE_SPECIAL nodes shouldn't appear in
- * deparse contexts otherwise.
+ * We create an RTE_SPECIAL RangeTblEntry, and store the subplan in its
+ * funcexpr field. RTE_SPECIAL nodes shouldn't appear in deparse contexts
+ * otherwise.
*/
rte->rtekind = RTE_SPECIAL;
rte->relid = InvalidOid;
@@ -1831,7 +1832,7 @@ get_values_def(List *values_lists, deparse_context *context)
appendStringInfoChar(buf, '(');
foreach(lc, sublist)
{
- Node *col = (Node *) lfirst(lc);
+ Node *col = (Node *) lfirst(lc);
if (first_col)
first_col = false;
@@ -1964,10 +1965,10 @@ get_basic_select_query(Query *query, deparse_context *context,
}
/*
- * If the query looks like SELECT * FROM (VALUES ...), then print just
- * the VALUES part. This reverses what transformValuesClause() did at
- * parse time. If the jointree contains just a single VALUES RTE,
- * we assume this case applies (without looking at the targetlist...)
+ * If the query looks like SELECT * FROM (VALUES ...), then print just the
+ * VALUES part. This reverses what transformValuesClause() did at parse
+ * time. If the jointree contains just a single VALUES RTE, we assume
+ * this case applies (without looking at the targetlist...)
*/
if (list_length(query->jointree->fromlist) == 1)
{
@@ -2083,10 +2084,10 @@ get_target_list(List *targetList, deparse_context *context,
colno++;
/*
- * We special-case Var nodes rather than using get_rule_expr.
- * This is needed because get_rule_expr will display a whole-row Var
- * as "foo.*", which is the preferred notation in most contexts, but
- * at the top level of a SELECT list it's not right (the parser will
+ * We special-case Var nodes rather than using get_rule_expr. This is
+ * needed because get_rule_expr will display a whole-row Var as
+ * "foo.*", which is the preferred notation in most contexts, but at
+ * the top level of a SELECT list it's not right (the parser will
* expand that notation into multiple columns, yielding behavior
* different from a whole-row Var). We want just "foo", instead.
*/
@@ -2287,8 +2288,8 @@ get_insert_query_def(Query *query, deparse_context *context)
List *strippedexprs;
/*
- * If it's an INSERT ... SELECT or VALUES (...), (...), ...
- * there will be a single RTE for the SELECT or VALUES.
+ * If it's an INSERT ... SELECT or VALUES (...), (...), ... there will be
+ * a single RTE for the SELECT or VALUES.
*/
foreach(l, query->rtable)
{
@@ -2300,7 +2301,7 @@ get_insert_query_def(Query *query, deparse_context *context)
elog(ERROR, "too many subquery RTEs in INSERT");
select_rte = rte;
}
-
+
if (rte->rtekind == RTE_VALUES)
{
if (values_rte)
@@ -2326,12 +2327,12 @@ get_insert_query_def(Query *query, deparse_context *context)
generate_relation_name(rte->relid));
/*
- * Add the insert-column-names list. To handle indirection properly,
- * we need to look for indirection nodes in the top targetlist (if it's
+ * Add the insert-column-names list. To handle indirection properly, we
+ * need to look for indirection nodes in the top targetlist (if it's
* INSERT ... SELECT or INSERT ... single VALUES), or in the first
- * expression list of the VALUES RTE (if it's INSERT ... multi VALUES).
- * We assume that all the expression lists will have similar indirection
- * in the latter case.
+ * expression list of the VALUES RTE (if it's INSERT ... multi VALUES). We
+ * assume that all the expression lists will have similar indirection in
+ * the latter case.
*/
if (values_rte)
values_cell = list_head((List *) linitial(values_rte->values_lists));
@@ -2589,10 +2590,10 @@ get_rte_for_var(Var *var, int levelsup, deparse_context *context,
/*
* Try to find the relevant RTE in this rtable. In a plan tree, it's
- * likely that varno is OUTER, INNER, or 0, in which case we try to
- * use varnoold instead. If the Var references an expression computed
- * by a subplan, varnoold will be 0, and we fall back to looking at the
- * special subplan RTEs.
+ * likely that varno is OUTER, INNER, or 0, in which case we try to use
+ * varnoold instead. If the Var references an expression computed by a
+ * subplan, varnoold will be 0, and we fall back to looking at the special
+ * subplan RTEs.
*/
if (var->varno >= 1 && var->varno <= list_length(dpns->rtable))
rte = rt_fetch(var->varno, dpns->rtable);
@@ -2692,10 +2693,10 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
/*
* This case occurs during EXPLAIN when we are looking at a
* deparse context node set up by deparse_context_for_subplan().
- * If the subplan tlist provides a name, use it, but usually
- * we'll end up with "?columnN?".
+ * If the subplan tlist provides a name, use it, but usually we'll
+ * end up with "?columnN?".
*/
- List *tlist = ((Plan *) rte->funcexpr)->targetlist;
+ List *tlist = ((Plan *) rte->funcexpr)->targetlist;
TargetEntry *tle = get_tle_by_resno(tlist, attnum);
if (tle && tle->resname)
@@ -2704,7 +2705,7 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
}
else
{
- char buf[32];
+ char buf[32];
snprintf(buf, sizeof(buf), "?column%d?", attnum);
*attname = pstrdup(buf);
@@ -2767,8 +2768,8 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* This case should not occur: a column of a table or values list
- * shouldn't have type RECORD. Fall through and fail
- * (most likely) at the bottom.
+ * shouldn't have type RECORD. Fall through and fail (most
+ * likely) at the bottom.
*/
break;
case RTE_SUBQUERY:
@@ -2836,7 +2837,7 @@ get_name_for_var_field(Var *var, int fieldno,
* that's not a Var, and then pass it to
* get_expr_result_type().
*/
- Plan *subplan = (Plan *) rte->funcexpr;
+ Plan *subplan = (Plan *) rte->funcexpr;
for (;;)
{
@@ -3484,7 +3485,7 @@ get_rule_expr(Node *node, deparse_context *context,
if (get_expr_result_type(arg, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
tupdesc = lookup_rowtype_tupdesc_copy(exprType(arg),
- exprTypmod(arg));
+ exprTypmod(arg));
Assert(tupdesc);
/* Got the tupdesc, so we can extract the field name */
Assert(fno >= 1 && fno <= tupdesc->natts);
@@ -3577,16 +3578,16 @@ get_rule_expr(Node *node, deparse_context *context,
if (caseexpr->arg)
{
/*
- * The parser should have produced WHEN clauses of
- * the form "CaseTestExpr = RHS"; we want to show
- * just the RHS. If the user wrote something silly
- * like "CASE boolexpr WHEN TRUE THEN ...", then
- * the optimizer's simplify_boolean_equality() may
- * have reduced this to just "CaseTestExpr" or
- * "NOT CaseTestExpr", for which we have to show
- * "TRUE" or "FALSE". Also, depending on context
- * the original CaseTestExpr might have been reduced
- * to a Const (but we won't see "WHEN Const").
+ * The parser should have produced WHEN clauses of the
+ * form "CaseTestExpr = RHS"; we want to show just the
+ * RHS. If the user wrote something silly like "CASE
+ * boolexpr WHEN TRUE THEN ...", then the optimizer's
+ * simplify_boolean_equality() may have reduced this
+ * to just "CaseTestExpr" or "NOT CaseTestExpr", for
+ * which we have to show "TRUE" or "FALSE". Also,
+ * depending on context the original CaseTestExpr
+ * might have been reduced to a Const (but we won't
+ * see "WHEN Const").
*/
if (IsA(w, OpExpr))
{
@@ -3719,17 +3720,18 @@ get_rule_expr(Node *node, deparse_context *context,
get_rule_expr(e, context, true);
sep = ", ";
}
+
/*
- * We assume that the name of the first-column operator
- * will do for all the rest too. This is definitely
- * open to failure, eg if some but not all operators
- * were renamed since the construct was parsed, but there
- * seems no way to be perfect.
+ * We assume that the name of the first-column operator will
+ * do for all the rest too. This is definitely open to
+ * failure, eg if some but not all operators were renamed
+ * since the construct was parsed, but there seems no way to
+ * be perfect.
*/
appendStringInfo(buf, ") %s ROW(",
- generate_operator_name(linitial_oid(rcexpr->opnos),
- exprType(linitial(rcexpr->largs)),
- exprType(linitial(rcexpr->rargs))));
+ generate_operator_name(linitial_oid(rcexpr->opnos),
+ exprType(linitial(rcexpr->largs)),
+ exprType(linitial(rcexpr->rargs))));
sep = "";
foreach(arg, rcexpr->rargs)
{
@@ -4052,7 +4054,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
}
appendStringInfo(buf, "%s(%s",
- generate_function_name(aggref->aggfnoid, nargs, argtypes),
+ generate_function_name(aggref->aggfnoid, nargs, argtypes),
aggref->aggdistinct ? "DISTINCT " : "");
/* aggstar can be set only in zero-argument aggregates */
if (aggref->aggstar)
@@ -4142,8 +4144,8 @@ get_const_expr(Const *constval, deparse_context *context)
/*
* We form the string literal according to the prevailing setting
- * of standard_conforming_strings; we never use E''.
- * User is responsible for making sure result is used correctly.
+ * of standard_conforming_strings; we never use E''. User is
+ * responsible for making sure result is used correctly.
*/
appendStringInfoChar(buf, '\'');
for (valptr = extval; *valptr; valptr++)
@@ -4205,18 +4207,18 @@ get_sublink_expr(SubLink *sublink, deparse_context *context)
appendStringInfoChar(buf, '(');
/*
- * Note that we print the name of only the first operator, when there
- * are multiple combining operators. This is an approximation that
- * could go wrong in various scenarios (operators in different schemas,
- * renamed operators, etc) but there is not a whole lot we can do about
- * it, since the syntax allows only one operator to be shown.
+ * Note that we print the name of only the first operator, when there are
+ * multiple combining operators. This is an approximation that could go
+ * wrong in various scenarios (operators in different schemas, renamed
+ * operators, etc) but there is not a whole lot we can do about it, since
+ * the syntax allows only one operator to be shown.
*/
if (sublink->testexpr)
{
if (IsA(sublink->testexpr, OpExpr))
{
/* single combining operator */
- OpExpr *opexpr = (OpExpr *) sublink->testexpr;
+ OpExpr *opexpr = (OpExpr *) sublink->testexpr;
get_rule_expr(linitial(opexpr->args), context, true);
opname = generate_operator_name(opexpr->opno,
@@ -4233,7 +4235,7 @@ get_sublink_expr(SubLink *sublink, deparse_context *context)
sep = "";
foreach(l, ((BoolExpr *) sublink->testexpr)->args)
{
- OpExpr *opexpr = (OpExpr *) lfirst(l);
+ OpExpr *opexpr = (OpExpr *) lfirst(l);
Assert(IsA(opexpr, OpExpr));
appendStringInfoString(buf, sep);
@@ -4255,7 +4257,7 @@ get_sublink_expr(SubLink *sublink, deparse_context *context)
get_rule_expr((Node *) rcexpr->largs, context, true);
opname = generate_operator_name(linitial_oid(rcexpr->opnos),
exprType(linitial(rcexpr->largs)),
- exprType(linitial(rcexpr->rargs)));
+ exprType(linitial(rcexpr->rargs)));
appendStringInfoChar(buf, ')');
}
else
@@ -5129,8 +5131,8 @@ flatten_reloptions(Oid relid)
Anum_pg_class_reloptions, &isnull);
if (!isnull)
{
- Datum sep,
- txt;
+ Datum sep,
+ txt;
/*
* We want to use array_to_text(reloptions, ', ') --- but
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 44879d20a2..4eed9619b7 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.213 2006/09/20 19:50:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.214 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -101,8 +101,8 @@
static double ineq_histogram_selectivity(VariableStatData *vardata,
- FmgrInfo *opproc, bool isgt,
- Datum constval, Oid consttype);
+ FmgrInfo *opproc, bool isgt,
+ Datum constval, Oid consttype);
static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
Datum lobound, Datum hibound, Oid boundstypid,
double *scaledlobound, double *scaledhibound);
@@ -128,7 +128,7 @@ static double convert_timevalue_to_scalar(Datum value, Oid typid);
static bool get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
Oid sortop, Datum *max);
static Selectivity prefix_selectivity(VariableStatData *vardata,
- Oid opclass, Const *prefixcon);
+ Oid opclass, Const *prefixcon);
static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
@@ -315,10 +315,9 @@ eqsel(PG_FUNCTION_ARGS)
else
{
/*
- * No ANALYZE stats available, so make a guess using estimated
- * number of distinct values and assuming they are equally common.
- * (The guess is unlikely to be very good, but we do know a few
- * special cases.)
+ * No ANALYZE stats available, so make a guess using estimated number
+ * of distinct values and assuming they are equally common. (The guess
+ * is unlikely to be very good, but we do know a few special cases.)
*/
selec = 1.0 / get_variable_numdistinct(&vardata);
}
@@ -523,7 +522,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -618,20 +617,20 @@ ineq_histogram_selectivity(VariableStatData *vardata,
if (nvalues > 1)
{
/*
- * Use binary search to find proper location, ie, the first
- * slot at which the comparison fails. (If the given operator
- * isn't actually sort-compatible with the histogram, you'll
- * get garbage results ... but probably not any more garbage-y
- * than you would from the old linear search.)
+ * Use binary search to find proper location, ie, the first slot
+ * at which the comparison fails. (If the given operator isn't
+ * actually sort-compatible with the histogram, you'll get garbage
+ * results ... but probably not any more garbage-y than you would
+ * from the old linear search.)
*/
- double histfrac;
- int lobound = 0; /* first possible slot to search */
- int hibound = nvalues; /* last+1 slot to search */
+ double histfrac;
+ int lobound = 0; /* first possible slot to search */
+ int hibound = nvalues; /* last+1 slot to search */
while (lobound < hibound)
{
- int probe = (lobound + hibound) / 2;
- bool ltcmp;
+ int probe = (lobound + hibound) / 2;
+ bool ltcmp;
ltcmp = DatumGetBool(FunctionCall2(opproc,
values[probe],
@@ -688,10 +687,10 @@ ineq_histogram_selectivity(VariableStatData *vardata,
binfrac = (val - low) / (high - low);
/*
- * Watch out for the possibility that we got a NaN
- * or Infinity from the division. This can happen
- * despite the previous checks, if for example
- * "low" is -Infinity.
+ * Watch out for the possibility that we got a NaN or
+ * Infinity from the division. This can happen
+ * despite the previous checks, if for example "low"
+ * is -Infinity.
*/
if (isnan(binfrac) ||
binfrac < 0.0 || binfrac > 1.0)
@@ -701,20 +700,20 @@ ineq_histogram_selectivity(VariableStatData *vardata,
else
{
/*
- * Ideally we'd produce an error here, on the grounds
- * that the given operator shouldn't have scalarXXsel
- * registered as its selectivity func unless we can
- * deal with its operand types. But currently, all
- * manner of stuff is invoking scalarXXsel, so give a
- * default estimate until that can be fixed.
+ * Ideally we'd produce an error here, on the grounds that
+ * the given operator shouldn't have scalarXXsel
+ * registered as its selectivity func unless we can deal
+ * with its operand types. But currently, all manner of
+ * stuff is invoking scalarXXsel, so give a default
+ * estimate until that can be fixed.
*/
binfrac = 0.5;
}
/*
* Now, compute the overall selectivity across the values
- * represented by the histogram. We have i-1 full bins
- * and binfrac partial bin below the constant.
+ * represented by the histogram. We have i-1 full bins and
+ * binfrac partial bin below the constant.
*/
histfrac = (double) (i - 1) + binfrac;
histfrac /= (double) (nvalues - 1);
@@ -1093,7 +1092,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1467,11 +1466,11 @@ scalararraysel(PlannerInfo *root,
RegProcedure oprsel;
FmgrInfo oprselproc;
Datum selarg4;
- Selectivity s1;
+ Selectivity s1;
/*
- * First, look up the underlying operator's selectivity estimator.
- * Punt if it hasn't got one.
+ * First, look up the underlying operator's selectivity estimator. Punt if
+ * it hasn't got one.
*/
if (is_join_clause)
{
@@ -1491,9 +1490,8 @@ scalararraysel(PlannerInfo *root,
* We consider three cases:
*
* 1. rightop is an Array constant: deconstruct the array, apply the
- * operator's selectivity function for each array element, and merge
- * the results in the same way that clausesel.c does for AND/OR
- * combinations.
+ * operator's selectivity function for each array element, and merge the
+ * results in the same way that clausesel.c does for AND/OR combinations.
*
* 2. rightop is an ARRAY[] construct: apply the operator's selectivity
* function for each element of the ARRAY[] construct, and merge.
@@ -1529,7 +1527,7 @@ scalararraysel(PlannerInfo *root,
s1 = useOr ? 0.0 : 1.0;
for (i = 0; i < num_elems; i++)
{
- List *args;
+ List *args;
Selectivity s2;
args = list_make2(leftop,
@@ -1562,7 +1560,7 @@ scalararraysel(PlannerInfo *root,
s1 = useOr ? 0.0 : 1.0;
foreach(l, arrayexpr->elements)
{
- List *args;
+ List *args;
Selectivity s2;
args = list_make2(leftop, lfirst(l));
@@ -1580,14 +1578,14 @@ scalararraysel(PlannerInfo *root,
else
{
CaseTestExpr *dummyexpr;
- List *args;
+ List *args;
Selectivity s2;
- int i;
+ int i;
/*
* We need a dummy rightop to pass to the operator selectivity
- * routine. It can be pretty much anything that doesn't look like
- * a constant; CaseTestExpr is a convenient choice.
+ * routine. It can be pretty much anything that doesn't look like a
+ * constant; CaseTestExpr is a convenient choice.
*/
dummyexpr = makeNode(CaseTestExpr);
dummyexpr->typeId = get_element_type(exprType(rightop));
@@ -1599,9 +1597,10 @@ scalararraysel(PlannerInfo *root,
PointerGetDatum(args),
selarg4));
s1 = useOr ? 0.0 : 1.0;
+
/*
- * Arbitrarily assume 10 elements in the eventual array value
- * (see also estimate_array_length)
+ * Arbitrarily assume 10 elements in the eventual array value (see
+ * also estimate_array_length)
*/
for (i = 0; i < 10; i++)
{
@@ -3050,14 +3049,19 @@ convert_string_datum(Datum value, Oid typid)
* == as you'd expect. Can't any of these people program their way
* out of a paper bag?
*/
-#if _MSC_VER == 1400 /* VS.Net 2005 */
- /* http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99694 */
+#if _MSC_VER == 1400 /* VS.Net 2005 */
+
+ /*
+ * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx
+ * ?FeedbackID=99694
+ */
{
- char x[1];
+ char x[1];
+
xfrmlen = strxfrm(x, val, 0);
}
#else
- xfrmlen = strxfrm(NULL, val, 0);
+ xfrmlen = strxfrm(NULL, val, 0);
#endif
xfrmstr = (char *) palloc(xfrmlen + 1);
xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
@@ -3399,9 +3403,9 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (rte->inh)
{
/*
- * XXX This means the Var represents a column of an append relation.
- * Later add code to look at the member relations and try to derive
- * some kind of combined statistics?
+ * XXX This means the Var represents a column of an append
+ * relation. Later add code to look at the member relations and
+ * try to derive some kind of combined statistics?
*/
}
else if (rte->rtekind == RTE_RELATION)
@@ -4154,7 +4158,7 @@ prefix_selectivity(VariableStatData *vardata, Oid opclass, Const *prefixcon)
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -4162,8 +4166,8 @@ prefix_selectivity(VariableStatData *vardata, Oid opclass, Const *prefixcon)
/*
* A zero or negative prefixsel should be converted into a small
- * positive value; we probably are dealing with a very tight range
- * and got a bogus result due to roundoff errors.
+ * positive value; we probably are dealing with a very tight range and
+ * got a bogus result due to roundoff errors.
*/
if (prefixsel <= 0.0)
prefixsel = 1.0e-10;
@@ -4640,8 +4644,8 @@ genericcostestimate(PlannerInfo *root,
selectivityQuals = indexQuals;
/*
- * Check for ScalarArrayOpExpr index quals, and estimate the number
- * of index scans that will be performed.
+ * Check for ScalarArrayOpExpr index quals, and estimate the number of
+ * index scans that will be performed.
*/
num_sa_scans = 1;
foreach(l, indexQuals)
@@ -4651,7 +4655,7 @@ genericcostestimate(PlannerInfo *root,
if (IsA(rinfo->clause, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
- int alength = estimate_array_length(lsecond(saop->args));
+ int alength = estimate_array_length(lsecond(saop->args));
if (alength > 1)
num_sa_scans *= alength;
@@ -4679,8 +4683,8 @@ genericcostestimate(PlannerInfo *root,
numIndexTuples = rint(numIndexTuples / num_sa_scans);
/*
- * We can bound the number of tuples by the index size in any case.
- * Also, always estimate at least one tuple is touched, even when
+ * We can bound the number of tuples by the index size in any case. Also,
+ * always estimate at least one tuple is touched, even when
* indexSelectivity estimate is tiny.
*/
if (numIndexTuples > index->tuples)
@@ -4691,12 +4695,11 @@ genericcostestimate(PlannerInfo *root,
/*
* Estimate the number of index pages that will be retrieved.
*
- * We use the simplistic method of taking a pro-rata fraction of the
- * total number of index pages. In effect, this counts only leaf pages
- * and not any overhead such as index metapage or upper tree levels.
- * In practice this seems a better approximation than charging for
- * access to the upper levels, perhaps because those tend to stay in
- * cache under load.
+ * We use the simplistic method of taking a pro-rata fraction of the total
+ * number of index pages. In effect, this counts only leaf pages and not
+ * any overhead such as index metapage or upper tree levels. In practice
+ * this seems a better approximation than charging for access to the upper
+ * levels, perhaps because those tend to stay in cache under load.
*/
if (index->pages > 1 && index->tuples > 1)
numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
@@ -4706,19 +4709,19 @@ genericcostestimate(PlannerInfo *root,
/*
* Now compute the disk access costs.
*
- * The above calculations are all per-index-scan. However, if we are
- * in a nestloop inner scan, we can expect the scan to be repeated (with
+ * The above calculations are all per-index-scan. However, if we are in a
+ * nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This
- * creates the potential for cache effects to reduce the number of
- * disk page fetches needed. We want to estimate the average per-scan
- * I/O cost in the presence of caching.
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * the potential for cache effects to reduce the number of disk page
+ * fetches needed. We want to estimate the average per-scan I/O cost in
+ * the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
* estimate the total number of page fetches that occur. While this
* wasn't what it was designed for, it seems a reasonable model anyway.
- * Note that we are counting pages not tuples anymore, so we take
- * N = T = index size, as if there were one "tuple" per page.
+ * Note that we are counting pages not tuples anymore, so we take N = T =
+ * index size, as if there were one "tuple" per page.
*/
if (outer_rel != NULL && outer_rel->rows > 1)
{
@@ -4745,9 +4748,9 @@ genericcostestimate(PlannerInfo *root,
root);
/*
- * Now compute the total disk access cost, and then report a
- * pro-rated share for each outer scan. (Don't pro-rate for
- * ScalarArrayOpExpr, since that's internal to the indexscan.)
+ * Now compute the total disk access cost, and then report a pro-rated
+ * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
+ * since that's internal to the indexscan.)
*/
*indexTotalCost = (pages_fetched * random_page_cost) / num_outer_scans;
}
@@ -4761,20 +4764,20 @@ genericcostestimate(PlannerInfo *root,
}
/*
- * A difficulty with the leaf-pages-only cost approach is that for
- * small selectivities (eg, single index tuple fetched) all indexes
- * will look equally attractive because we will estimate exactly 1
- * leaf page to be fetched. All else being equal, we should prefer
- * physically smaller indexes over larger ones. (An index might be
- * smaller because it is partial or because it contains fewer columns;
- * presumably the other columns in the larger index aren't useful to
- * the query, or the larger index would have better selectivity.)
+ * A difficulty with the leaf-pages-only cost approach is that for small
+ * selectivities (eg, single index tuple fetched) all indexes will look
+ * equally attractive because we will estimate exactly 1 leaf page to be
+ * fetched. All else being equal, we should prefer physically smaller
+ * indexes over larger ones. (An index might be smaller because it is
+ * partial or because it contains fewer columns; presumably the other
+ * columns in the larger index aren't useful to the query, or the larger
+ * index would have better selectivity.)
*
* We can deal with this by adding a very small "fudge factor" that
* depends on the index size. The fudge factor used here is one
- * random_page_cost per 100000 index pages, which should be small
- * enough to not alter index-vs-seqscan decisions, but will prevent
- * indexes of different sizes from looking exactly equally attractive.
+ * random_page_cost per 100000 index pages, which should be small enough
+ * to not alter index-vs-seqscan decisions, but will prevent indexes of
+ * different sizes from looking exactly equally attractive.
*/
*indexTotalCost += index->pages * random_page_cost / 100000.0;
@@ -4841,8 +4844,8 @@ btcostestimate(PG_FUNCTION_ARGS)
* For a RowCompareExpr, we consider only the first column, just as
* rowcomparesel() does.
*
- * If there's a ScalarArrayOpExpr in the quals, we'll actually perform
- * N index scans not one, but the ScalarArrayOpExpr's operator can be
+ * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
+ * index scans not one, but the ScalarArrayOpExpr's operator can be
* considered to act the same as it normally does.
*/
indexBoundQuals = NIL;
@@ -4960,9 +4963,9 @@ btcostestimate(PG_FUNCTION_ARGS)
* ordering, but don't negate it entirely. Before 8.0 we divided the
* correlation by the number of columns, but that seems too strong.)
*
- * We can skip all this if we found a ScalarArrayOpExpr, because then
- * the call must be for a bitmap index scan, and the caller isn't going
- * to care what the index correlation is.
+ * We can skip all this if we found a ScalarArrayOpExpr, because then the
+ * call must be for a bitmap index scan, and the caller isn't going to
+ * care what the index correlation is.
*/
if (found_saop)
PG_RETURN_VOID();
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1362d0a3f8..33b551aed8 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.55 2006/08/25 04:06:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.56 2006/10/04 00:29:59 momjian Exp $
*
* NOTES
* input routine largely stolen from boxin().
@@ -164,7 +164,7 @@ tideq(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) == 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) == 0);
}
Datum
@@ -173,7 +173,7 @@ tidne(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) != 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) != 0);
}
Datum
@@ -182,7 +182,7 @@ tidlt(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) < 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) < 0);
}
Datum
@@ -191,7 +191,7 @@ tidle(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) <= 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) <= 0);
}
Datum
@@ -200,7 +200,7 @@ tidgt(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) > 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) > 0);
}
Datum
@@ -209,7 +209,7 @@ tidge(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) >= 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) >= 0);
}
Datum
@@ -227,7 +227,7 @@ tidlarger(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) >= 0 ? arg1 : arg2);
+ PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1, arg2) >= 0 ? arg1 : arg2);
}
Datum
@@ -236,7 +236,7 @@ tidsmaller(PG_FUNCTION_ARGS)
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) <= 0 ? arg1 : arg2);
+ PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1, arg2) <= 0 ? arg1 : arg2);
}
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index fcd90432af..e06fa99dc1 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.167 2006/09/05 01:13:39 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.168 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1029,10 +1029,10 @@ timestamptz_to_time_t(TimestampTz t)
#ifdef HAVE_INT64_TIMESTAMP
result = (time_t) (t / USECS_PER_SEC +
- ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#else
result = (time_t) (t +
- ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#endif
return result;
@@ -2015,7 +2015,7 @@ timestamp_mi(PG_FUNCTION_ARGS)
* test=> SET timezone = 'EST5EDT';
* test=> SELECT
* test-> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz);
+ * test(> '2005-10-29 13:22:00-04'::timestamptz);
* ?column?
* ----------------
* 1 day 01:00:00
@@ -2027,7 +2027,7 @@ timestamp_mi(PG_FUNCTION_ARGS)
* test-> ('2005-10-29 13:22:00-04'::timestamptz +
* test(> ('2005-10-30 13:22:00-05'::timestamptz -
* test(> '2005-10-29 13:22:00-04'::timestamptz)) at time zone 'EST';
- * timezone
+ * timezone
* --------------------
* 2005-10-30 14:22:00
* (1 row)
@@ -2040,23 +2040,23 @@ timestamp_mi(PG_FUNCTION_ARGS)
}
/*
- * interval_justify_interval()
+ * interval_justify_interval()
*
- * Adjust interval so 'month', 'day', and 'time' portions are within
- * customary bounds. Specifically:
+ * Adjust interval so 'month', 'day', and 'time' portions are within
+ * customary bounds. Specifically:
*
- * 0 <= abs(time) < 24 hours
- * 0 <= abs(day) < 30 days
+ * 0 <= abs(time) < 24 hours
+ * 0 <= abs(day) < 30 days
*
- * Also, the sign bit on all three fields is made equal, so either
- * all three fields are negative or all are positive.
+ * Also, the sign bit on all three fields is made equal, so either
+ * all three fields are negative or all are positive.
*/
Datum
interval_justify_interval(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
Interval *result;
-
+
#ifdef HAVE_INT64_TIMESTAMP
int64 wholeday;
#else
@@ -2087,7 +2087,7 @@ interval_justify_interval(PG_FUNCTION_ARGS)
result->month--;
}
else if (result->month < 0 &&
- (result->day > 0 || (result->day == 0 && result->time > 0)))
+ (result->day > 0 || (result->day == 0 && result->time > 0)))
{
result->day -= DAYS_PER_MONTH;
result->month++;
@@ -2103,7 +2103,7 @@ interval_justify_interval(PG_FUNCTION_ARGS)
result->day--;
}
else if (result->day < 0 && result->time > 0)
- {
+ {
#ifdef HAVE_INT64_TIMESTAMP
result->time -= USECS_PER_DAY;
#else
@@ -2492,8 +2492,10 @@ interval_mul(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder_days, sec_remainder;
- int32 orig_month = span->month, orig_day = span->day;
+ double month_remainder_days,
+ sec_remainder;
+ int32 orig_month = span->month,
+ orig_day = span->day;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
@@ -2512,28 +2514,28 @@ interval_mul(PG_FUNCTION_ARGS)
*/
/*
- * Fractional months full days into days.
+ * Fractional months full days into days.
*
- * Floating point calculation are inherently inprecise, so these
- * calculations are crafted to produce the most reliable result
- * possible. TSROUND() is needed to more accurately produce whole
- * numbers where appropriate.
+ * Floating point calculation are inherently inprecise, so these
+ * calculations are crafted to produce the most reliable result possible.
+ * TSROUND() is needed to more accurately produce whole numbers where
+ * appropriate.
*/
month_remainder_days = (orig_month * factor - result->month) * DAYS_PER_MONTH;
month_remainder_days = TSROUND(month_remainder_days);
sec_remainder = (orig_day * factor - result->day +
- month_remainder_days - (int)month_remainder_days) * SECS_PER_DAY;
+ month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY;
sec_remainder = TSROUND(sec_remainder);
/*
- * Might have 24:00:00 hours due to rounding, or >24 hours because of
- * time cascade from months and days. It might still be >24 if the
- * combination of cascade and the seconds factor operation itself.
+ * Might have 24:00:00 hours due to rounding, or >24 hours because of time
+ * cascade from months and days. It might still be >24 if the combination
+ * of cascade and the seconds factor operation itself.
*/
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int)(sec_remainder / SECS_PER_DAY);
- sec_remainder -= (int)(sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
+ result->day += (int) (sec_remainder / SECS_PER_DAY);
+ sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
@@ -2562,10 +2564,12 @@ interval_div(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder_days, sec_remainder;
- int32 orig_month = span->month, orig_day = span->day;
+ double month_remainder_days,
+ sec_remainder;
+ int32 orig_month = span->month,
+ orig_day = span->day;
Interval *result;
-
+
result = (Interval *) palloc(sizeof(Interval));
if (factor == 0.0)
@@ -2577,18 +2581,17 @@ interval_div(PG_FUNCTION_ARGS)
result->day = (int32) (span->day / factor);
/*
- * Fractional months full days into days. See comment in
- * interval_mul().
+ * Fractional months full days into days. See comment in interval_mul().
*/
month_remainder_days = (orig_month / factor - result->month) * DAYS_PER_MONTH;
month_remainder_days = TSROUND(month_remainder_days);
sec_remainder = (orig_day / factor - result->day +
- month_remainder_days - (int)month_remainder_days) * SECS_PER_DAY;
+ month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY;
sec_remainder = TSROUND(sec_remainder);
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int)(sec_remainder / SECS_PER_DAY);
- sec_remainder -= (int)(sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
+ result->day += (int) (sec_remainder / SECS_PER_DAY);
+ sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 5124bee765..937cf96ebe 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.118 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.119 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -604,8 +604,8 @@ bpchareq(PG_FUNCTION_ARGS)
len2 = bcTruelen(arg2);
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = false;
@@ -631,8 +631,8 @@ bpcharne(PG_FUNCTION_ARGS)
len2 = bcTruelen(arg2);
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = true;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 6215e3eda5..33f40b685c 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.150 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.151 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -985,8 +985,8 @@ texteq(PG_FUNCTION_ARGS)
bool result;
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (VARSIZE(arg1) != VARSIZE(arg2))
result = false;
@@ -1008,8 +1008,8 @@ textne(PG_FUNCTION_ARGS)
bool result;
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (VARSIZE(arg1) != VARSIZE(arg2))
result = true;
@@ -2217,7 +2217,7 @@ replace_text_regexp(text *src_text, void *regexp,
text *ret_text;
regex_t *re = (regex_t *) regexp;
int src_text_len = VARSIZE(src_text) - VARHDRSZ;
- StringInfoData buf;
+ StringInfoData buf;
regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT];
pg_wchar *data;
size_t data_len;
@@ -2236,7 +2236,7 @@ replace_text_regexp(text *src_text, void *regexp,
for (search_start = data_pos = 0; search_start <= data_len;)
{
- int regexec_result;
+ int regexec_result;
regexec_result = pg_regexec(re,
data,
@@ -2503,7 +2503,7 @@ array_to_text(PG_FUNCTION_ARGS)
int typlen;
bool typbyval;
char typalign;
- StringInfoData buf;
+ StringInfoData buf;
bool printed = false;
char *p;
bits8 *bitmap;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 20b83e196a..0d1ae15b39 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.132 2006/07/31 20:09:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.133 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -786,8 +786,8 @@ InitCatCache(int id,
/*
* nbuckets is the number of hash buckets to use in this catcache.
- * Currently we just use a hard-wired estimate of an appropriate size
- * for each cache; maybe later make them dynamically resizable?
+ * Currently we just use a hard-wired estimate of an appropriate size for
+ * each cache; maybe later make them dynamically resizable?
*
* nbuckets must be a power of two. We check this via Assert rather than
* a full runtime check because the values will be coming from constant
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 247c6bf5a6..899c8e202b 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.77 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.78 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -633,11 +633,11 @@ AcceptInvalidationMessages(void)
* However, it also makes the system unbelievably slow --- the regression
* tests take about 100 times longer than normal.
*
- * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY.
- * This slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to
- * try a few simple tests, to make sure that cache reload isn't subject
- * to internal cache-flush hazards, but after you've done a few thousand
+ * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
+ * slows things by at least a factor of 10000, so I wouldn't suggest
+ * trying to run the entire regression tests that way. It's useful to try
+ * a few simple tests, to make sure that cache reload isn't subject to
+ * internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
*/
#if defined(CLOBBER_CACHE_ALWAYS)
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 53e3a5bf55..fdc31e0d97 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.137 2006/09/28 20:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.138 2006/10/04 00:30:00 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@@ -206,9 +206,9 @@ get_op_btree_interpretation(Oid opno, List **opclasses, List **opstrats)
/*
* Get the nominal left-hand input type of the operator; we will ignore
- * opclasses that don't have that as the expected input datatype. This
- * is a kluge to avoid being confused by binary-compatible opclasses
- * (such as text_ops and varchar_ops, which share the same operators).
+ * opclasses that don't have that as the expected input datatype. This is
+ * a kluge to avoid being confused by binary-compatible opclasses (such as
+ * text_ops and varchar_ops, which share the same operators).
*/
op_input_types(opno, &lefttype, &righttype);
Assert(OidIsValid(lefttype));
@@ -219,14 +219,15 @@ get_op_btree_interpretation(Oid opno, List **opclasses, List **opstrats)
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
0, 0, 0);
+
/*
- * If we can't find any opclass containing the op, perhaps it is a
- * <> operator. See if it has a negator that is in an opclass.
+ * If we can't find any opclass containing the op, perhaps it is a <>
+ * operator. See if it has a negator that is in an opclass.
*/
op_negated = false;
if (catlist->n_members == 0)
{
- Oid op_negator = get_negator(opno);
+ Oid op_negator = get_negator(opno);
if (OidIsValid(op_negator))
{
@@ -1395,13 +1396,13 @@ get_type_io_data(Oid typid,
Form_pg_type typeStruct;
/*
- * In bootstrap mode, pass it off to bootstrap.c. This hack allows
- * us to use array_in and array_out during bootstrap.
+ * In bootstrap mode, pass it off to bootstrap.c. This hack allows us to
+ * use array_in and array_out during bootstrap.
*/
if (IsBootstrapProcessingMode())
{
- Oid typinput;
- Oid typoutput;
+ Oid typinput;
+ Oid typoutput;
boot_get_type_io_data(typid,
typlen,
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 190543e2bd..40e71d513d 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.248 2006/09/05 21:08:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.249 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -182,7 +182,7 @@ static void RelationReloadClassinfo(Relation relation);
static void RelationFlushRelation(Relation relation);
static bool load_relcache_init_file(void);
static void write_relcache_init_file(void);
-static void write_item(const void *data, Size len, FILE *fp);
+static void write_item(const void *data, Size len, FILE *fp);
static void formrdesc(const char *relationName, Oid relationReltype,
bool hasoids, int natts, FormData_pg_attribute *att);
@@ -298,14 +298,14 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * The variable-length fields (relacl, reloptions) are NOT stored in the
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. The
+ * variable-length fields (relacl, reloptions) are NOT stored in the
* relcache --- there'd be little point in it, since we don't copy the
* tuple's nulls bitmap and hence wouldn't know if the values are valid.
- * Bottom line is that relacl *cannot* be retrieved from the relcache.
- * Get it from the syscache if you need it. The same goes for the
- * original form of reloptions (however, we do store the parsed form
- * of reloptions in rd_options).
+ * Bottom line is that relacl *cannot* be retrieved from the relcache. Get
+ * it from the syscache if you need it. The same goes for the original
+ * form of reloptions (however, we do store the parsed form of reloptions
+ * in rd_options).
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -355,9 +355,9 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
}
/*
- * Fetch reloptions from tuple; have to use a hardwired descriptor
- * because we might not have any other for pg_class yet (consider
- * executing this code for pg_class itself)
+ * Fetch reloptions from tuple; have to use a hardwired descriptor because
+ * we might not have any other for pg_class yet (consider executing this
+ * code for pg_class itself)
*/
datum = fastgetattr(tuple,
Anum_pg_class_reloptions,
@@ -620,8 +620,8 @@ RelationBuildRuleLock(Relation relation)
*
* Note: since we scan the rules using RewriteRelRulenameIndexId, we will
* be reading the rules in name order, except possibly during
- * emergency-recovery operations (ie, IgnoreSystemIndexes). This in
- * turn ensures that rules will be fired in name order.
+ * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
+ * ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
@@ -649,10 +649,10 @@ RelationBuildRuleLock(Relation relation)
rule->isInstead = rewrite_form->is_instead;
/*
- * Must use heap_getattr to fetch ev_action and ev_qual. Also,
- * the rule strings are often large enough to be toasted. To avoid
- * leaking memory in the caller's context, do the detoasting here
- * so we can free the detoasted version.
+ * Must use heap_getattr to fetch ev_action and ev_qual. Also, the
+ * rule strings are often large enough to be toasted. To avoid
+ * leaking memory in the caller's context, do the detoasting here so
+ * we can free the detoasted version.
*/
rule_datum = heap_getattr(rewrite_tuple,
Anum_pg_rewrite_ev_action,
@@ -686,16 +686,16 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
- * The reason for doing this when the rule is loaded, rather than
- * when it is stored, is that otherwise ALTER TABLE OWNER would have
- * to grovel through stored rules to update checkAsUser fields.
- * Scanning the rule tree during load is relatively cheap (compared
- * to constructing it in the first place), so we do it here.
+ * The reason for doing this when the rule is loaded, rather than when
+ * it is stored, is that otherwise ALTER TABLE OWNER would have to
+ * grovel through stored rules to update checkAsUser fields. Scanning
+ * the rule tree during load is relatively cheap (compared to
+ * constructing it in the first place), so we do it here.
*/
setRuleCheckAsUser((Node *) rule->actions, relation->rd_rel->relowner);
setRuleCheckAsUser(rule->qual, relation->rd_rel->relowner);
@@ -1626,14 +1626,14 @@ RelationClearRelation(Relation relation, bool rebuild)
* Even non-system indexes should not be blown away if they are open and
* have valid index support information. This avoids problems with active
* use of the index support information. As with nailed indexes, we
- * re-read the pg_class row to handle possible physical relocation of
- * the index.
+ * re-read the pg_class row to handle possible physical relocation of the
+ * index.
*/
if (relation->rd_rel->relkind == RELKIND_INDEX &&
relation->rd_refcnt > 0 &&
relation->rd_indexcxt != NULL)
{
- relation->rd_isvalid = false; /* needs to be revalidated */
+ relation->rd_isvalid = false; /* needs to be revalidated */
RelationReloadClassinfo(relation);
return;
}
@@ -2140,9 +2140,9 @@ RelationBuildLocalRelation(const char *relname,
/*
* check that hardwired list of shared rels matches what's in the
- * bootstrap .bki file. If you get a failure here during initdb,
- * you probably need to fix IsSharedRelation() to match whatever
- * you've done to the set of shared relations.
+ * bootstrap .bki file. If you get a failure here during initdb, you
+ * probably need to fix IsSharedRelation() to match whatever you've done
+ * to the set of shared relations.
*/
if (shared_relation != IsSharedRelation(relid))
elog(ERROR, "shared_relation flag for \"%s\" does not match IsSharedRelation(%u)",
@@ -2308,7 +2308,7 @@ RelationCacheInitialize(void)
* the system catalogs. We first try to read pre-computed relcache
* entries from the pg_internal.init file. If that's missing or
* broken, make phony entries for the minimum set of nailed-in-cache
- * relations. Then (unless bootstrapping) make sure we have entries
+ * relations. Then (unless bootstrapping) make sure we have entries
* for the critical system indexes. Once we've done all this, we
* have enough infrastructure to open any system catalog or use any
* catcache. The last step is to rewrite pg_internal.init if needed.
@@ -2319,7 +2319,7 @@ RelationCacheInitializePhase2(void)
HASH_SEQ_STATUS status;
RelIdCacheEnt *idhentry;
MemoryContext oldcxt;
- bool needNewCacheFile = false;
+ bool needNewCacheFile = false;
/*
* switch to cache memory context
@@ -2375,7 +2375,7 @@ RelationCacheInitializePhase2(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -2505,7 +2505,7 @@ BuildHardcodedDescriptor(int natts, Form_pg_attribute attrs, bool hasoids)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
result = CreateTemplateTupleDesc(natts, hasoids);
- result->tdtypeid = RECORDOID; /* not right, but we don't care */
+ result->tdtypeid = RECORDOID; /* not right, but we don't care */
result->tdtypmod = -1;
for (i = 0; i < natts; i++)
@@ -3166,7 +3166,7 @@ load_relcache_init_file(void)
if ((nread = fread(rel->rd_options, 1, len, fp)) != len)
goto read_failed;
if (len != VARATT_SIZE(rel->rd_options))
- goto read_failed; /* sanity check */
+ goto read_failed; /* sanity check */
}
else
{
@@ -3457,7 +3457,7 @@ write_relcache_init_file(void)
/* finally, write the vector of support procedures */
write_item(rel->rd_support,
- relform->relnatts * (am->amsupport * sizeof(RegProcedure)),
+ relform->relnatts * (am->amsupport * sizeof(RegProcedure)),
fp);
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 2eaa9ee096..79aba4ebc8 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.106 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.107 2006/10/04 00:30:00 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
@@ -516,7 +516,8 @@ static const struct cachedesc cacheinfo[] = {
}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index ee91c9fe41..c5a0272414 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.21 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.22 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -268,9 +268,9 @@ lookup_type_cache(Oid type_id, int flags)
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
- * Link to the tupdesc and increment its refcount (we assert it's
- * a refcounted descriptor). We don't use IncrTupleDescRefCount()
- * for this, because the reference mustn't be entered in the current
+ * Link to the tupdesc and increment its refcount (we assert it's a
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for
+ * this, because the reference mustn't be entered in the current
* resource owner; it can outlive the current query.
*/
typentry->tupDesc = RelationGetDescr(rel);
@@ -372,7 +372,7 @@ lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
TupleDesc
lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
{
- TupleDesc tmp;
+ TupleDesc tmp;
tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
return CreateTupleDescCopyConstr(tmp);
@@ -501,9 +501,9 @@ flush_rowtype_cache(Oid type_id)
return; /* tupdesc hasn't been requested */
/*
- * Release our refcount and free the tupdesc if none remain.
- * (Can't use DecrTupleDescRefCount because this reference is not
- * logged in current resource owner.)
+ * Release our refcount and free the tupdesc if none remain. (Can't use
+ * DecrTupleDescRefCount because this reference is not logged in current
+ * resource owner.)
*/
Assert(typentry->tupDesc->tdrefcount > 0);
if (--typentry->tupDesc->tdrefcount == 0)
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index b12674026e..c3710c6211 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.90 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.91 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,14 +27,14 @@
/* signatures for PostgreSQL-specific library init/fini functions */
-typedef void (*PG_init_t)(void);
-typedef void (*PG_fini_t)(void);
+typedef void (*PG_init_t) (void);
+typedef void (*PG_fini_t) (void);
/* hashtable entry for rendezvous variables */
typedef struct
-{
- char varName[NAMEDATALEN]; /* hash key (must be first) */
- void *varValue;
+{
+ char varName[NAMEDATALEN]; /* hash key (must be first) */
+ void *varValue;
} rendezvousHashEntry;
/*
@@ -168,7 +168,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -248,7 +248,7 @@ internal_load_library(const char *libname)
const Pg_magic_struct *magic_data_ptr = (*magic_func) ();
if (magic_data_ptr->len != magic_data.len ||
- memcmp(magic_data_ptr, &magic_data, magic_data.len) != 0)
+ memcmp(magic_data_ptr, &magic_data, magic_data.len) != 0)
{
/* copy data block before unlinking library */
Pg_magic_struct module_magic_data = *magic_data_ptr;
@@ -258,22 +258,22 @@ internal_load_library(const char *libname)
free((char *) file_scanner);
/*
- * Report suitable error. It's probably not worth writing
- * a separate error message for each field; only the most
- * common case of wrong major version gets its own message.
+ * Report suitable error. It's probably not worth writing a
+ * separate error message for each field; only the most common
+ * case of wrong major version gets its own message.
*/
if (module_magic_data.version != magic_data.version)
ereport(ERROR,
- (errmsg("incompatible library \"%s\": version mismatch",
- libname),
- errdetail("Server is version %d.%d, library is version %d.%d.",
- magic_data.version/100,
- magic_data.version % 100,
- module_magic_data.version/100,
- module_magic_data.version % 100)));
+ (errmsg("incompatible library \"%s\": version mismatch",
+ libname),
+ errdetail("Server is version %d.%d, library is version %d.%d.",
+ magic_data.version / 100,
+ magic_data.version % 100,
+ module_magic_data.version / 100,
+ module_magic_data.version % 100)));
ereport(ERROR,
- (errmsg("incompatible library \"%s\": magic block mismatch",
- libname)));
+ (errmsg("incompatible library \"%s\": magic block mismatch",
+ libname)));
}
}
else
@@ -283,9 +283,9 @@ internal_load_library(const char *libname)
free((char *) file_scanner);
/* complain */
ereport(ERROR,
- (errmsg("incompatible library \"%s\": missing magic block",
- libname),
- errhint("Extension libraries are now required to use the PG_MODULE_MAGIC macro.")));
+ (errmsg("incompatible library \"%s\": missing magic block",
+ libname),
+ errhint("Extension libraries are now required to use the PG_MODULE_MAGIC macro.")));
}
/*
@@ -293,7 +293,7 @@ internal_load_library(const char *libname)
*/
PG_init = (PG_init_t) pg_dlsym(file_scanner->handle, "_PG_init");
if (PG_init)
- (*PG_init)();
+ (*PG_init) ();
/* OK to link it into list */
if (file_list == NULL)
@@ -351,7 +351,7 @@ internal_unload_library(const char *libname)
*/
PG_fini = (PG_fini_t) pg_dlsym(file_scanner->handle, "_PG_fini");
if (PG_fini)
- (*PG_fini)();
+ (*PG_fini) ();
clear_external_function_hash(file_scanner->handle);
pg_dlclose(file_scanner->handle);
@@ -441,8 +441,8 @@ expand_dynamic_library_name(const char *name)
}
/*
- * If we can't find the file, just return the string as-is.
- * The ensuing load attempt will fail and report a suitable message.
+ * If we can't find the file, just return the string as-is. The ensuing
+ * load attempt will fail and report a suitable message.
*/
return pstrdup(name);
}
@@ -575,7 +575,7 @@ find_in_dynamic_libpath(const char *basename)
/*
- * Find (or create) a rendezvous variable that one dynamically
+ * Find (or create) a rendezvous variable that one dynamically
* loaded library can use to meet up with another.
*
* On the first call of this function for a particular varName,
@@ -589,22 +589,22 @@ find_in_dynamic_libpath(const char *basename)
* to find each other and share information: they just need to agree
* on the variable name and the data it will point to.
*/
-void **
+void **
find_rendezvous_variable(const char *varName)
{
- static HTAB *rendezvousHash = NULL;
+ static HTAB *rendezvousHash = NULL;
rendezvousHashEntry *hentry;
- bool found;
+ bool found;
/* Create a hashtable if we haven't already done so in this process */
if (rendezvousHash == NULL)
{
- HASHCTL ctl;
+ HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
- ctl.keysize = NAMEDATALEN;
- ctl.entrysize = sizeof(rendezvousHashEntry);
+ ctl.keysize = NAMEDATALEN;
+ ctl.entrysize = sizeof(rendezvousHashEntry);
rendezvousHash = hash_create("Rendezvous variable hash",
16,
&ctl,
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 4472b3fcc9..df4594b435 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.101 2006/05/30 21:21:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.102 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1798,7 +1798,7 @@ ReceiveFunctionCall(FmgrInfo *flinfo, StringInfo buf,
*
* This is little more than window dressing for FunctionCall1, but its use is
* recommended anyway so that code invoking output functions can be identified
- * easily. Note however that it does guarantee a non-toasted result.
+ * easily. Note however that it does guarantee a non-toasted result.
*/
bytea *
SendFunctionCall(FmgrInfo *flinfo, Datum val)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index aa0a6b11f2..23b48c0b63 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.72 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.73 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ struct HASHHDR
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
+ long num_partitions; /* # partitions (must be power of 2), or 0 */
long ffactor; /* target fill factor */
long max_dsize; /* 'dsize' limit if directory is fixed size */
long ssize; /* segment size --- must be power of 2 */
@@ -132,9 +132,10 @@ struct HASHHDR
int nelem_alloc; /* number of entries to allocate at once */
#ifdef HASH_STATISTICS
+
/*
- * Count statistics here. NB: stats code doesn't bother with mutex,
- * so counts could be corrupted a bit in a partitioned table.
+ * Count statistics here. NB: stats code doesn't bother with mutex, so
+ * counts could be corrupted a bit in a partitioned table.
*/
long accesses;
long collisions;
@@ -287,8 +288,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
hashp->hash = string_hash; /* default hash function */
/*
- * If you don't specify a match function, it defaults to string_compare
- * if you used string_hash (either explicitly or by default) and to memcmp
+ * If you don't specify a match function, it defaults to string_compare if
+ * you used string_hash (either explicitly or by default) and to memcmp
* otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.)
*/
if (flags & HASH_COMPARE)
@@ -317,8 +318,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set
- * as well.
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * well.
*/
hashp->hctl = info->hctl;
hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
@@ -413,8 +414,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
* This reduces problems with run-time out-of-shared-memory conditions.
*
* For a non-shared hash table, preallocate the requested number of
- * elements if it's less than our chosen nelem_alloc. This avoids
- * wasting space if the caller correctly estimates a small table size.
+ * elements if it's less than our chosen nelem_alloc. This avoids wasting
+ * space if the caller correctly estimates a small table size.
*/
if ((flags & HASH_SHARED_MEM) ||
nelem < hctl->nelem_alloc)
@@ -479,15 +480,16 @@ choose_nelem_alloc(Size entrysize)
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
/*
- * The idea here is to choose nelem_alloc at least 32, but round up
- * so that the allocation request will be a power of 2 or just less.
- * This makes little difference for hash tables in shared memory,
- * but for hash tables managed by palloc, the allocation request
- * will be rounded up to a power of 2 anyway. If we fail to take
- * this into account, we'll waste as much as half the allocated space.
+ * The idea here is to choose nelem_alloc at least 32, but round up so
+ * that the allocation request will be a power of 2 or just less. This
+ * makes little difference for hash tables in shared memory, but for hash
+ * tables managed by palloc, the allocation request will be rounded up to
+ * a power of 2 anyway. If we fail to take this into account, we'll waste
+ * as much as half the allocated space.
*/
allocSize = 32 * 4; /* assume elementSize at least 8 */
- do {
+ do
+ {
allocSize <<= 1;
nelem_alloc = allocSize / elementSize;
} while (nelem_alloc < 32);
@@ -926,7 +928,7 @@ hash_search_with_hash_value(HTAB *hashp,
/* Check if it is time to split a bucket */
/* Can't split if running in partitioned mode */
if (!IS_PARTITIONED(hctl) &&
- hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
+ hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
{
/*
* NOTE: failure to expand table is not a fatal error, it just
diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c
index af528881ef..2b0a30abe0 100644
--- a/src/backend/utils/hash/hashfn.c
+++ b/src/backend/utils/hash/hashfn.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.28 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.29 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,9 +32,9 @@ string_hash(const void *key, Size keysize)
* because when it is copied into the hash table it will be truncated at
* that length.
*/
- Size s_len = strlen((const char *) key);
+ Size s_len = strlen((const char *) key);
- s_len = Min(s_len, keysize-1);
+ s_len = Min(s_len, keysize - 1);
return DatumGetUInt32(hash_any((const unsigned char *) key,
(int) s_len));
}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 1295288b89..265a3a06be 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.98 2006/05/02 11:28:55 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.99 2006/10/04 00:30:02 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
@@ -108,4 +108,4 @@ int VacuumCostDelay = 0;
int VacuumCostBalance = 0; /* working state for vacuum */
bool VacuumCostActive = false;
-int GinFuzzySearchLimit = 0;
+int GinFuzzySearchLimit = 0;
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 2d979afead..5ddb0b2e44 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.158 2006/08/16 04:32:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.159 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ static char socketLockFile[MAXPGPATH];
* ----------------------------------------------------------------
*/
-bool IgnoreSystemIndexes = false;
+bool IgnoreSystemIndexes = false;
/* ----------------------------------------------------------------
* system index reindexing support
@@ -1097,7 +1097,7 @@ ValidatePgVersion(const char *path)
*-------------------------------------------------------------------------
*/
-/*
+/*
* GUC variables: lists of library names to be preloaded at postmaster
* start and at backend start
*/
@@ -1146,7 +1146,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
/* If restricting, insert $libdir/plugins if not mentioned already */
if (restricted && first_dir_separator(filename) == NULL)
{
- char *expanded;
+ char *expanded;
expanded = palloc(strlen("$libdir/plugins/") + strlen(filename) + 1);
strcpy(expanded, "$libdir/plugins/");
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 6664755fd3..9ab8c9ba97 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.170 2006/09/18 22:40:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.171 2006/10/04 00:30:02 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -132,9 +132,9 @@ CheckMyDatabase(const char *name, bool am_superuser)
/*
* Check permissions to connect to the database.
*
- * These checks are not enforced when in standalone mode, so that
- * there is a way to recover from disabling all access to all databases,
- * for example "UPDATE pg_database SET datallowconn = false;".
+ * These checks are not enforced when in standalone mode, so that there is
+ * a way to recover from disabling all access to all databases, for
+ * example "UPDATE pg_database SET datallowconn = false;".
*
* We do not enforce them for the autovacuum process either.
*/
@@ -150,9 +150,9 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser
- * test is redundant, but since we have the flag, might as well
- * check it and save a few cycles.)
+ * Check privilege to connect to the database. (The am_superuser test
+ * is redundant, but since we have the flag, might as well check it
+ * and save a few cycles.)
*/
if (!am_superuser &&
pg_database_aclcheck(MyDatabaseId, GetUserId(),
@@ -294,8 +294,8 @@ InitPostgres(const char *dbname, const char *username)
char *fullpath;
/*
- * Set up the global variables holding database id and path. But note
- * we won't actually try to touch the database just yet.
+ * Set up the global variables holding database id and path. But note we
+ * won't actually try to touch the database just yet.
*
* We take a shortcut in the bootstrap case, otherwise we have to look up
* the db name in pg_database.
@@ -324,8 +324,8 @@ InitPostgres(const char *dbname, const char *username)
SetDatabasePath(fullpath);
/*
- * Finish filling in the PGPROC struct, and add it to the ProcArray.
- * (We need to know MyDatabaseId before we can do this, since it's entered
+ * Finish filling in the PGPROC struct, and add it to the ProcArray. (We
+ * need to know MyDatabaseId before we can do this, since it's entered
* into the PGPROC struct.)
*
* Once I have done this, I am visible to other backends!
@@ -360,8 +360,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Initialize the relation cache and the system catalog caches. Note that
* no catalog access happens here; we only set up the hashtable structure.
- * We must do this before starting a transaction because transaction
- * abort would try to touch these hashtables.
+ * We must do this before starting a transaction because transaction abort
+ * would try to touch these hashtables.
*/
RelationCacheInitialize();
InitCatalogCache();
@@ -388,20 +388,19 @@ InitPostgres(const char *dbname, const char *username)
/*
* Now that we have a transaction, we can take locks. Take a writer's
- * lock on the database we are trying to connect to. If there is
- * a concurrently running DROP DATABASE on that database, this will
- * block us until it finishes (and has updated the flat file copy
- * of pg_database).
+ * lock on the database we are trying to connect to. If there is a
+ * concurrently running DROP DATABASE on that database, this will block us
+ * until it finishes (and has updated the flat file copy of pg_database).
*
- * Note that the lock is not held long, only until the end of this
- * startup transaction. This is OK since we are already advertising
- * our use of the database in the PGPROC array; anyone trying a DROP
- * DATABASE after this point will see us there.
+ * Note that the lock is not held long, only until the end of this startup
+ * transaction. This is OK since we are already advertising our use of
+ * the database in the PGPROC array; anyone trying a DROP DATABASE after
+ * this point will see us there.
*
* Note: use of RowExclusiveLock here is reasonable because we envision
- * our session as being a concurrent writer of the database. If we had
- * a way of declaring a session as being guaranteed-read-only, we could
- * use AccessShareLock for such sessions and thereby not conflict against
+ * our session as being a concurrent writer of the database. If we had a
+ * way of declaring a session as being guaranteed-read-only, we could use
+ * AccessShareLock for such sessions and thereby not conflict against
* CREATE DATABASE.
*/
if (!bootstrap)
@@ -415,8 +414,8 @@ InitPostgres(const char *dbname, const char *username)
*/
if (!bootstrap)
{
- Oid dbid2;
- Oid tsid2;
+ Oid dbid2;
+ Oid tsid2;
if (!FindMyDatabase(dbname, &dbid2, &tsid2) ||
dbid2 != MyDatabaseId || tsid2 != MyDatabaseTableSpace)
@@ -424,12 +423,12 @@ InitPostgres(const char *dbname, const char *username)
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("It seems to have just been dropped or renamed.")));
+ errdetail("It seems to have just been dropped or renamed.")));
}
/*
- * Now we should be able to access the database directory safely.
- * Verify it's there and looks reasonable.
+ * Now we should be able to access the database directory safely. Verify
+ * it's there and looks reasonable.
*/
if (!bootstrap)
{
@@ -440,8 +439,8 @@ InitPostgres(const char *dbname, const char *username)
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("The database subdirectory \"%s\" is missing.",
- fullpath)));
+ errdetail("The database subdirectory \"%s\" is missing.",
+ fullpath)));
else
ereport(FATAL,
(errcode_for_file_access(),
@@ -493,10 +492,10 @@ InitPostgres(const char *dbname, const char *username)
initialize_acl();
/*
- * Read the real pg_database row for our database, check permissions
- * and set up database-specific GUC settings. We can't do this until all
- * the database-access infrastructure is up. (Also, it wants to know if
- * the user is a superuser, so the above stuff has to happen first.)
+ * Read the real pg_database row for our database, check permissions and
+ * set up database-specific GUC settings. We can't do this until all the
+ * database-access infrastructure is up. (Also, it wants to know if the
+ * user is a superuser, so the above stuff has to happen first.)
*/
if (!bootstrap)
CheckMyDatabase(dbname, am_superuser);
diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c
index deaf912ed0..5a771f5f55 100644
--- a/src/backend/utils/mb/conv.c
+++ b/src/backend/utils/mb/conv.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.60 2006/05/21 20:05:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.61 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,7 +70,7 @@ mic2latin(const unsigned char *mic, unsigned char *p, int len,
}
else
{
- int l = pg_mic_mblen(mic);
+ int l = pg_mic_mblen(mic);
if (len < l)
report_invalid_encoding(PG_MULE_INTERNAL, (const char *) mic,
@@ -217,7 +217,7 @@ mic2latin_with_table(const unsigned char *mic,
}
else
{
- int l = pg_mic_mblen(mic);
+ int l = pg_mic_mblen(mic);
if (len < l)
report_invalid_encoding(PG_MULE_INTERNAL, (const char *) mic,
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c
index 0e964c4583..326d84238c 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c,v 1.15 2006/05/30 22:12:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c,v 1.16 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -173,7 +173,7 @@ euc_tw2mic(const unsigned char *euc, unsigned char *p, int len)
(const char *) euc, len);
if (c1 == SS2)
{
- c1 = euc[1]; /* plane No. */
+ c1 = euc[1]; /* plane No. */
if (c1 == 0xa1)
*p++ = LC_CNS11643_1;
else if (c1 == 0xa2)
@@ -187,7 +187,7 @@ euc_tw2mic(const unsigned char *euc, unsigned char *p, int len)
*p++ = euc[3];
}
else
- { /* CNS11643-1 */
+ { /* CNS11643-1 */
*p++ = LC_CNS11643_1;
*p++ = c1;
*p++ = euc[1];
@@ -302,7 +302,7 @@ big52mic(const unsigned char *big5, unsigned char *p, int len)
{
*p++ = 0x9d; /* LCPRV2 */
}
- *p++ = lc; /* Plane No. */
+ *p++ = lc; /* Plane No. */
*p++ = (cnsBuf >> 8) & 0x00ff;
*p++ = cnsBuf & 0x00ff;
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
index b9f6c46c22..c311c1d3af 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.16 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.17 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,4 +69,3 @@ koi8r_to_utf8(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
-
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
index d65f64f1ef..b8cbe7c7c0 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.15 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.16 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ utf8_to_gb18030(PG_FUNCTION_ARGS)
Assert(len >= 0);
UtfToLocal(src, dest, ULmapGB18030,
- sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), PG_GB18030, len);
+ sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), PG_GB18030, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index a53f99985a..668758e746 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.21 2006/07/11 18:26:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.22 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,12 +118,12 @@ iso8859_to_utf8(PG_FUNCTION_ARGS)
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(1) == PG_UTF8);
Assert(len >= 0);
- for (i=0;i<sizeof(maps)/sizeof(pg_conv_map);i++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
@@ -134,7 +134,7 @@ iso8859_to_utf8(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
+ errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
PG_RETURN_VOID();
}
@@ -146,12 +146,12 @@ utf8_to_iso8859(PG_FUNCTION_ARGS)
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(0) == PG_UTF8);
Assert(len >= 0);
- for (i=0;i<sizeof(maps)/sizeof(pg_conv_map);i++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
@@ -162,7 +162,7 @@ utf8_to_iso8859(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
+ errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c
index 627573b0f5..a3746d754b 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c,v 1.17 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c,v 1.18 2006/10/04 00:30:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ utf8_to_iso8859_1(PG_FUNCTION_ARGS)
}
else
{
- int l = pg_utf_mblen(src);
+ int l = pg_utf_mblen(src);
if (l > len || !pg_utf8_islegal(src, l))
report_invalid_encoding(PG_UTF8, (const char *) src, len);
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
index c95919fa0a..559367701b 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.5 2006/07/11 18:26:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.6 2006/10/04 00:30:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,37 +68,37 @@ typedef struct
static pg_conv_map maps[] = {
{PG_WIN866, LUmapWIN866, ULmapWIN866,
sizeof(LUmapWIN866) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN866) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN866) / sizeof(pg_utf_to_local)},
{PG_WIN874, LUmapWIN874, ULmapWIN874,
sizeof(LUmapWIN874) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN874) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN874) / sizeof(pg_utf_to_local)},
{PG_WIN1250, LUmapWIN1250, ULmapWIN1250,
sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1250) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1250) / sizeof(pg_utf_to_local)},
{PG_WIN1251, LUmapWIN1251, ULmapWIN1251,
sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1251) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1251) / sizeof(pg_utf_to_local)},
{PG_WIN1252, LUmapWIN1252, ULmapWIN1252,
sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1252) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1252) / sizeof(pg_utf_to_local)},
{PG_WIN1253, LUmapWIN1253, ULmapWIN1253,
sizeof(LUmapWIN1253) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1253) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1253) / sizeof(pg_utf_to_local)},
{PG_WIN1254, LUmapWIN1254, ULmapWIN1254,
sizeof(LUmapWIN1254) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1254) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1254) / sizeof(pg_utf_to_local)},
{PG_WIN1255, LUmapWIN1255, ULmapWIN1255,
sizeof(LUmapWIN1255) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1255) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1255) / sizeof(pg_utf_to_local)},
{PG_WIN1256, LUmapWIN1256, ULmapWIN1256,
sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1256) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1256) / sizeof(pg_utf_to_local)},
{PG_WIN1257, LUmapWIN1257, ULmapWIN1257,
sizeof(LUmapWIN1257) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1257) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1257) / sizeof(pg_utf_to_local)},
{PG_WIN1258, LUmapWIN1258, ULmapWIN1258,
sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1258) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1258) / sizeof(pg_utf_to_local)},
};
Datum
@@ -108,12 +108,12 @@ win_to_utf8(PG_FUNCTION_ARGS)
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(1) == PG_UTF8);
Assert(len >= 0);
- for (i=0;i<sizeof(maps)/sizeof(pg_conv_map);i++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
@@ -136,12 +136,12 @@ utf8_to_win(PG_FUNCTION_ARGS)
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(0) == PG_UTF8);
Assert(len >= 0);
- for (i=0;i<sizeof(maps)/sizeof(pg_conv_map);i++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index c45d3eb778..e91c8a2a58 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -4,7 +4,7 @@
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.58 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.59 2006/10/04 00:30:02 momjian Exp $
*/
#include "postgres.h"
@@ -133,9 +133,9 @@ SetClientEncoding(int encoding, bool doit)
else
{
/*
- * This is the first time through, so create the context. Make
- * it a child of TopMemoryContext so that these values survive
- * across transactions.
+ * This is the first time through, so create the context. Make it a
+ * child of TopMemoryContext so that these values survive across
+ * transactions.
*/
MbProcContext = AllocSetContextCreate(TopMemoryContext,
"MbProcContext",
@@ -378,28 +378,27 @@ pg_client_to_server(const char *s, int len)
{
/*
* No conversion is possible, but we must still validate the data,
- * because the client-side code might have done string escaping
- * using the selected client_encoding. If the client encoding is
- * ASCII-safe then we just do a straight validation under that
- * encoding. For an ASCII-unsafe encoding we have a problem:
- * we dare not pass such data to the parser but we have no way
- * to convert it. We compromise by rejecting the data if it
- * contains any non-ASCII characters.
+ * because the client-side code might have done string escaping using
+ * the selected client_encoding. If the client encoding is ASCII-safe
+ * then we just do a straight validation under that encoding. For an
+ * ASCII-unsafe encoding we have a problem: we dare not pass such data
+ * to the parser but we have no way to convert it. We compromise by
+ * rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(ClientEncoding->encoding))
(void) pg_verify_mbstr(ClientEncoding->encoding, s, len, false);
else
{
- int i;
+ int i;
for (i = 0; i < len; i++)
{
if (s[i] == '\0' || IS_HIGHBIT_SET(s[i]))
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte value for encoding \"%s\": 0x%02x",
- pg_enc2name_tbl[PG_SQL_ASCII].name,
- (unsigned char) s[i])));
+ errmsg("invalid byte value for encoding \"%s\": 0x%02x",
+ pg_enc2name_tbl[PG_SQL_ASCII].name,
+ (unsigned char) s[i])));
}
}
return (char *) s;
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index b33cbef49f..6828fce7d3 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1,7 +1,7 @@
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.57 2006/08/22 12:11:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.58 2006/10/04 00:30:02 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
@@ -40,7 +40,7 @@
* SQL/ASCII
*/
static int
-pg_ascii2wchar_with_len
+ pg_ascii2wchar_with_len
(const unsigned char *from, pg_wchar *to, int len)
{
int cnt = 0;
@@ -68,7 +68,7 @@ pg_ascii_dsplen(const unsigned char *s)
return 0;
if (*s < 0x20 || *s == 0x7f)
return -1;
-
+
return 1;
}
@@ -82,7 +82,8 @@ static int pg_euc2wchar_with_len
while (len > 0 && *from)
{
- if (*from == SS2 && len >= 2) /* JIS X 0201 (so called "1 byte KANA") */
+ if (*from == SS2 && len >= 2) /* JIS X 0201 (so called "1 byte
+ * KANA") */
{
from++;
*to = (SS2 << 8) | *from++;
@@ -95,13 +96,14 @@ static int pg_euc2wchar_with_len
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* JIS X 0208 KANJI */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* JIS X 0208 KANJI */
{
*to = *from++ << 8;
*to |= *from++;
len -= 2;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
*to = *from++;
len--;
@@ -222,7 +224,7 @@ static int pg_euccn2wchar_with_len
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 1 */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 1 */
{
*to = *from++ << 8;
*to |= *from++;
@@ -278,7 +280,7 @@ static int pg_euctw2wchar_with_len
if (*from == SS2 && len >= 4) /* code set 2 */
{
from++;
- *to = (SS2 << 24) | (*from++ << 16) ;
+ *to = (SS2 << 24) | (*from++ << 16);
*to |= *from++ << 8;
*to |= *from++;
len -= 4;
@@ -290,7 +292,7 @@ static int pg_euctw2wchar_with_len
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 2 */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 2 */
{
*to = *from++ << 8;
*to |= *from++;
@@ -455,7 +457,7 @@ struct mbinterval
/* auxiliary function for binary search in interval table */
static int
-mbbisearch(pg_wchar ucs, const struct mbinterval *table, int max)
+mbbisearch(pg_wchar ucs, const struct mbinterval * table, int max)
{
int min = 0;
int mid;
@@ -677,14 +679,14 @@ pg_mule_mblen(const unsigned char *s)
else if (IS_LCPRV2(*s))
len = 4;
else
- len = 1; /* assume ASCII */
+ len = 1; /* assume ASCII */
return len;
}
static int
pg_mule_dsplen(const unsigned char *s)
{
- int len;
+ int len;
if (IS_LC1(*s))
len = 1;
@@ -695,7 +697,7 @@ pg_mule_dsplen(const unsigned char *s)
else if (IS_LCPRV2(*s))
len = 2;
else
- len = 1; /* assume ASCII */
+ len = 1; /* assume ASCII */
return len;
}
@@ -739,11 +741,11 @@ pg_sjis_mblen(const unsigned char *s)
int len;
if (*s >= 0xa1 && *s <= 0xdf)
- len = 1; /* 1 byte kana? */
+ len = 1; /* 1 byte kana? */
else if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
@@ -753,11 +755,11 @@ pg_sjis_dsplen(const unsigned char *s)
int len;
if (*s >= 0xa1 && *s <= 0xdf)
- len = 1; /* 1 byte kana? */
+ len = 1; /* 1 byte kana? */
else if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
@@ -770,9 +772,9 @@ pg_big5_mblen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
@@ -782,9 +784,9 @@ pg_big5_dsplen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
@@ -797,9 +799,9 @@ pg_gbk_mblen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
@@ -809,9 +811,9 @@ pg_gbk_dsplen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
@@ -824,9 +826,9 @@ pg_uhc_mblen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* 2byte? */
+ len = 2; /* 2byte? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
@@ -836,9 +838,9 @@ pg_uhc_dsplen(const unsigned char *s)
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* 2byte? */
+ len = 2; /* 2byte? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
@@ -852,7 +854,7 @@ pg_gb18030_mblen(const unsigned char *s)
int len;
if (!IS_HIGHBIT_SET(*s))
- len = 1; /* ASCII */
+ len = 1; /* ASCII */
else
{
if ((*(s + 1) >= 0x40 && *(s + 1) <= 0x7e) || (*(s + 1) >= 0x80 && *(s + 1) <= 0xfe))
@@ -873,7 +875,7 @@ pg_gb18030_dsplen(const unsigned char *s)
if (IS_HIGHBIT_SET(*s))
len = 2;
else
- len = pg_ascii_dsplen(s); /* ASCII */
+ len = pg_ascii_dsplen(s); /* ASCII */
return len;
}
@@ -906,13 +908,14 @@ static int
pg_eucjp_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
switch (c1)
{
- case SS2: /* JIS X 0201 */
+ case SS2: /* JIS X 0201 */
l = 2;
if (l > len)
return -1;
@@ -921,7 +924,7 @@ pg_eucjp_verifier(const unsigned char *s, int len)
return -1;
break;
- case SS3: /* JIS X 0212 */
+ case SS3: /* JIS X 0212 */
l = 3;
if (l > len)
return -1;
@@ -945,7 +948,8 @@ pg_eucjp_verifier(const unsigned char *s, int len)
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
@@ -959,7 +963,8 @@ static int
pg_euckr_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
@@ -974,7 +979,8 @@ pg_euckr_verifier(const unsigned char *s, int len)
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
@@ -989,13 +995,14 @@ static int
pg_euctw_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
switch (c1)
{
- case SS2: /* CNS 11643 Plane 1-7 */
+ case SS2: /* CNS 11643 Plane 1-7 */
l = 4;
if (l > len)
return -1;
@@ -1010,7 +1017,7 @@ pg_euctw_verifier(const unsigned char *s, int len)
return -1;
break;
- case SS3: /* unused */
+ case SS3: /* unused */
return -1;
default:
@@ -1024,7 +1031,8 @@ pg_euctw_verifier(const unsigned char *s, int len)
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
@@ -1036,7 +1044,8 @@ pg_euctw_verifier(const unsigned char *s, int len)
static int
pg_johab_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
unsigned char c;
l = mbl = pg_johab_mblen(s);
@@ -1059,7 +1068,8 @@ pg_johab_verifier(const unsigned char *s, int len)
static int
pg_mule_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
unsigned char c;
l = mbl = pg_mule_mblen(s);
@@ -1085,8 +1095,10 @@ pg_latin1_verifier(const unsigned char *s, int len)
static int
pg_sjis_verifier(const unsigned char *s, int len)
{
- int l, mbl;
- unsigned char c1, c2;
+ int l,
+ mbl;
+ unsigned char c1,
+ c2;
l = mbl = pg_sjis_mblen(s);
@@ -1106,7 +1118,8 @@ pg_sjis_verifier(const unsigned char *s, int len)
static int
pg_big5_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_big5_mblen(s);
@@ -1125,7 +1138,8 @@ pg_big5_verifier(const unsigned char *s, int len)
static int
pg_gbk_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_gbk_mblen(s);
@@ -1144,7 +1158,8 @@ pg_gbk_verifier(const unsigned char *s, int len)
static int
pg_uhc_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_uhc_mblen(s);
@@ -1163,7 +1178,8 @@ pg_uhc_verifier(const unsigned char *s, int len)
static int
pg_gb18030_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_gb18030_mblen(s);
@@ -1182,7 +1198,7 @@ pg_gb18030_verifier(const unsigned char *s, int len)
static int
pg_utf8_verifier(const unsigned char *s, int len)
{
- int l = pg_utf_mblen(s);
+ int l = pg_utf_mblen(s);
if (len < l)
return -1;
@@ -1270,45 +1286,45 @@ pg_utf8_islegal(const unsigned char *source, int length)
*-------------------------------------------------------------------
*/
pg_wchar_tbl pg_wchar_table[] = {
- {pg_ascii2wchar_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* 0; PG_SQL_ASCII */
- {pg_eucjp2wchar_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* 1; PG_EUC_JP */
- {pg_euccn2wchar_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 3}, /* 2; PG_EUC_CN */
- {pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* 3; PG_EUC_KR */
- {pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, pg_euctw_verifier, 3}, /* 4; PG_EUC_TW */
- {pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 5; PG_JOHAB */
+ {pg_ascii2wchar_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* 0; PG_SQL_ASCII */
+ {pg_eucjp2wchar_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* 1; PG_EUC_JP */
+ {pg_euccn2wchar_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 3}, /* 2; PG_EUC_CN */
+ {pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* 3; PG_EUC_KR */
+ {pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, pg_euctw_verifier, 3}, /* 4; PG_EUC_TW */
+ {pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 5; PG_JOHAB */
{pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, pg_utf8_verifier, 4}, /* 6; PG_UTF8 */
- {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, pg_mule_verifier, 3}, /* 7; PG_MULE_INTERNAL */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 8; PG_LATIN1 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 9; PG_LATIN2 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 10; PG_LATIN3 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 11; PG_LATIN4 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 12; PG_LATIN5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 13; PG_LATIN6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 14; PG_LATIN7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 15; PG_LATIN8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 16; PG_LATIN9 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 17; PG_LATIN10 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 18; PG_WIN1256 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 19; PG_WIN1258 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 20; PG_WIN874 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 21; PG_KOI8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1251 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1252 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 23; PG_WIN866 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 24; ISO-8859-5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 25; ISO-8859-6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 26; ISO-8859-7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 27; ISO-8859-8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 28; PG_WIN1250 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 29; PG_WIN1253 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 30; PG_WIN1254 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 31; PG_WIN1255 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 32; PG_WIN1257 */
- {0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* 33; PG_SJIS */
- {0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* 34; PG_BIG5 */
+ {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, pg_mule_verifier, 3}, /* 7; PG_MULE_INTERNAL */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 8; PG_LATIN1 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 9; PG_LATIN2 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 10; PG_LATIN3 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 11; PG_LATIN4 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 12; PG_LATIN5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 13; PG_LATIN6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 14; PG_LATIN7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 15; PG_LATIN8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 16; PG_LATIN9 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 17; PG_LATIN10 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 18; PG_WIN1256 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 19; PG_WIN1258 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 20; PG_WIN874 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 21; PG_KOI8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1251 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1252 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 23; PG_WIN866 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 24; ISO-8859-5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 25; ISO-8859-6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 26; ISO-8859-7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 27; ISO-8859-8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 28; PG_WIN1250 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 29; PG_WIN1253 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 30; PG_WIN1254 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 31; PG_WIN1255 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 32; PG_WIN1257 */
+ {0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* 33; PG_SJIS */
+ {0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* 34; PG_BIG5 */
{0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* 35; PG_GBK */
{0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* 36; PG_UHC */
- {0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 2} /* 37; PG_GB18030 */
+ {0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 2} /* 37; PG_GB18030 */
};
/* returns the byte length of a word for mule internal code */
@@ -1358,8 +1374,8 @@ pg_encoding_verifymb(int encoding, const char *mbstr, int len)
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
+ ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
}
/*
@@ -1487,9 +1503,9 @@ report_invalid_encoding(int encoding, const char *mbstr, int len)
errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
pg_enc2name_tbl[encoding].name,
buf),
- errhint("This error can also happen if the byte sequence does not "
- "match the encoding expected by the server, which is controlled "
- "by \"client_encoding\".")));
+ errhint("This error can also happen if the byte sequence does not "
+ "match the encoding expected by the server, which is controlled "
+ "by \"client_encoding\".")));
}
/*
@@ -1516,10 +1532,10 @@ report_untranslatable_char(int src_encoding, int dest_encoding,
ereport(ERROR,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("character 0x%s of encoding \"%s\" has no equivalent in \"%s\"",
- buf,
- pg_enc2name_tbl[src_encoding].name,
- pg_enc2name_tbl[dest_encoding].name)));
+ errmsg("character 0x%s of encoding \"%s\" has no equivalent in \"%s\"",
+ buf,
+ pg_enc2name_tbl[src_encoding].name,
+ pg_enc2name_tbl[dest_encoding].name)));
}
#endif
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d0eb77a4ad..74092a8f78 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.353 2006/10/03 21:11:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.354 2006/10/04 00:30:03 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -204,6 +204,7 @@ static char *log_error_verbosity_str;
static char *log_statement_str;
static char *log_min_error_statement_str;
static char *log_destination_string;
+
#ifdef HAVE_SYSLOG
static char *syslog_facility_str;
static char *syslog_ident_str;
@@ -220,7 +221,7 @@ static char *locale_ctype;
static char *regex_flavor_string;
static char *server_encoding_string;
static char *server_version_string;
-static int server_version_num;
+static int server_version_num;
static char *timezone_string;
static char *timezone_abbreviations_string;
static char *XactIsoLevel_string;
@@ -992,9 +993,9 @@ static struct config_bool ConfigureNamesBool[] =
{
{"allow_system_table_mods", PGC_POSTMASTER, DEVELOPER_OPTIONS,
- gettext_noop("Allows modifications of the structure of system tables."),
- NULL,
- GUC_NOT_IN_SAMPLE
+ gettext_noop("Allows modifications of the structure of system tables."),
+ NULL,
+ GUC_NOT_IN_SAMPLE
},
&allowSystemTableMods,
false, NULL, NULL
@@ -1002,10 +1003,10 @@ static struct config_bool ConfigureNamesBool[] =
{
{"ignore_system_indexes", PGC_BACKEND, DEVELOPER_OPTIONS,
- gettext_noop("Disables reading from system indexes."),
- gettext_noop("It does not prevent updating the indexes, so it is safe "
- "to use. The worst consequence is slowness."),
- GUC_NOT_IN_SAMPLE
+ gettext_noop("Disables reading from system indexes."),
+ gettext_noop("It does not prevent updating the indexes, so it is safe "
+ "to use. The worst consequence is slowness."),
+ GUC_NOT_IN_SAMPLE
},
&IgnoreSystemIndexes,
false, NULL, NULL
@@ -1022,19 +1023,19 @@ static struct config_int ConfigureNamesInt[] =
{
{
{"archive_timeout", PGC_SIGHUP, WAL_SETTINGS,
- gettext_noop("Forces a switch to the next xlog file if a "
- "new file has not been started within N seconds."),
- NULL,
- GUC_UNIT_S
+ gettext_noop("Forces a switch to the next xlog file if a "
+ "new file has not been started within N seconds."),
+ NULL,
+ GUC_UNIT_S
},
&XLogArchiveTimeout,
0, 0, INT_MAX, NULL, NULL
},
{
{"post_auth_delay", PGC_BACKEND, DEVELOPER_OPTIONS,
- gettext_noop("Waits N seconds on connection startup after authentication."),
- gettext_noop("This allows attaching a debugger to the process."),
- GUC_NOT_IN_SAMPLE | GUC_UNIT_S
+ gettext_noop("Waits N seconds on connection startup after authentication."),
+ gettext_noop("This allows attaching a debugger to the process."),
+ GUC_NOT_IN_SAMPLE | GUC_UNIT_S
},
&PostAuthDelay,
0, 0, INT_MAX, NULL, NULL
@@ -1863,7 +1864,7 @@ static struct config_string ConfigureNamesString[] =
{"default_tablespace", PGC_USERSET, CLIENT_CONN_STATEMENT,
gettext_noop("Sets the default tablespace to create tables and indexes in."),
gettext_noop("An empty string selects the database's default tablespace."),
- GUC_IS_NAME
+ GUC_IS_NAME
},
&default_tablespace,
"", assign_default_tablespace, NULL
@@ -2295,7 +2296,7 @@ static void ReportGUCOption(struct config_generic * record);
static void ShowGUCConfigOption(const char *name, DestReceiver *dest);
static void ShowAllGUCConfig(DestReceiver *dest);
static char *_ShowOption(struct config_generic * record, bool use_units);
-static bool is_newvalue_equal(struct config_generic *record, const char *newvalue);
+static bool is_newvalue_equal(struct config_generic * record, const char *newvalue);
/*
@@ -3584,7 +3585,7 @@ parse_int(const char *value, int *result, int flags)
if ((flags & GUC_UNIT_MEMORY) && endptr != value)
{
- bool used = false;
+ bool used = false;
while (*endptr == ' ')
endptr++;
@@ -3612,10 +3613,10 @@ parse_int(const char *value, int *result, int flags)
switch (flags & GUC_UNIT_MEMORY)
{
case GUC_UNIT_BLOCKS:
- val /= (BLCKSZ/1024);
+ val /= (BLCKSZ / 1024);
break;
case GUC_UNIT_XBLOCKS:
- val /= (XLOG_BLCKSZ/1024);
+ val /= (XLOG_BLCKSZ / 1024);
break;
}
}
@@ -3623,7 +3624,7 @@ parse_int(const char *value, int *result, int flags)
if ((flags & GUC_UNIT_TIME) && endptr != value)
{
- bool used = false;
+ bool used = false;
while (*endptr == ' ')
endptr++;
@@ -3662,12 +3663,12 @@ parse_int(const char *value, int *result, int flags)
{
switch (flags & GUC_UNIT_TIME)
{
- case GUC_UNIT_S:
- val /= MS_PER_S;
- break;
- case GUC_UNIT_MIN:
- val /= MS_PER_MIN;
- break;
+ case GUC_UNIT_S:
+ val /= MS_PER_S;
+ break;
+ case GUC_UNIT_MIN:
+ val /= MS_PER_MIN;
+ break;
}
}
}
@@ -4173,9 +4174,10 @@ set_config_option(const char *name, const char *value,
newval = guc_strdup(elevel, value);
if (newval == NULL)
return false;
+
/*
- * The only sort of "parsing" check we need to do is
- * apply truncation if GUC_IS_NAME.
+ * The only sort of "parsing" check we need to do is apply
+ * truncation if GUC_IS_NAME.
*/
if (conf->gen.flags & GUC_IS_NAME)
truncate_identifier(newval, strlen(newval), true);
@@ -4988,11 +4990,11 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow)
values[2] = "kB";
break;
case GUC_UNIT_BLOCKS:
- snprintf(buf, sizeof(buf), "%dkB", BLCKSZ/1024);
+ snprintf(buf, sizeof(buf), "%dkB", BLCKSZ / 1024);
values[2] = buf;
break;
case GUC_UNIT_XBLOCKS:
- snprintf(buf, sizeof(buf), "%dkB", XLOG_BLCKSZ/1024);
+ snprintf(buf, sizeof(buf), "%dkB", XLOG_BLCKSZ / 1024);
values[2] = buf;
break;
case GUC_UNIT_MS:
@@ -5271,18 +5273,18 @@ _ShowOption(struct config_generic * record, bool use_units)
val = (*conf->show_hook) ();
else
{
- char unit[4];
- int result = *conf->variable;
+ char unit[4];
+ int result = *conf->variable;
if (use_units && result > 0 && (record->flags & GUC_UNIT_MEMORY))
{
switch (record->flags & GUC_UNIT_MEMORY)
{
case GUC_UNIT_BLOCKS:
- result *= BLCKSZ/1024;
+ result *= BLCKSZ / 1024;
break;
case GUC_UNIT_XBLOCKS:
- result *= XLOG_BLCKSZ/1024;
+ result *= XLOG_BLCKSZ / 1024;
break;
}
@@ -5342,7 +5344,7 @@ _ShowOption(struct config_generic * record, bool use_units)
strcpy(unit, "");
snprintf(buffer, sizeof(buffer), "%d%s",
- (int)result, unit);
+ (int) result, unit);
val = buffer;
}
}
@@ -5387,37 +5389,37 @@ _ShowOption(struct config_generic * record, bool use_units)
static bool
-is_newvalue_equal(struct config_generic *record, const char *newvalue)
+is_newvalue_equal(struct config_generic * record, const char *newvalue)
{
switch (record->vartype)
{
case PGC_BOOL:
- {
- struct config_bool *conf = (struct config_bool *) record;
- bool newval;
+ {
+ struct config_bool *conf = (struct config_bool *) record;
+ bool newval;
- return parse_bool(newvalue, &newval) && *conf->variable == newval;
- }
+ return parse_bool(newvalue, &newval) && *conf->variable == newval;
+ }
case PGC_INT:
- {
- struct config_int *conf = (struct config_int *) record;
- int newval;
+ {
+ struct config_int *conf = (struct config_int *) record;
+ int newval;
- return parse_int(newvalue, &newval, record->flags) && *conf->variable == newval;
- }
+ return parse_int(newvalue, &newval, record->flags) && *conf->variable == newval;
+ }
case PGC_REAL:
- {
- struct config_real *conf = (struct config_real *) record;
- double newval;
+ {
+ struct config_real *conf = (struct config_real *) record;
+ double newval;
- return parse_real(newvalue, &newval) && *conf->variable == newval;
- }
+ return parse_real(newvalue, &newval) && *conf->variable == newval;
+ }
case PGC_STRING:
- {
- struct config_string *conf = (struct config_string *) record;
+ {
+ struct config_string *conf = (struct config_string *) record;
- return strcmp(*conf->variable, newvalue) == 0;
- }
+ return strcmp(*conf->variable, newvalue) == 0;
+ }
}
return false;
@@ -6251,7 +6253,7 @@ assign_debug_assertions(bool newval, bool doit, GucSource source)
if (newval)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("assertion checking is not supported by this build")));
+ errmsg("assertion checking is not supported by this build")));
#endif
return true;
}
@@ -6338,12 +6340,11 @@ static const char *
assign_backslash_quote(const char *newval, bool doit, GucSource source)
{
BackslashQuoteType bq;
- bool bqbool;
+ bool bqbool;
/*
- * Although only "on", "off", and "safe_encoding" are documented,
- * we use parse_bool so we can accept all the likely variants of
- * "on" and "off".
+ * Although only "on", "off", and "safe_encoding" are documented, we use
+ * parse_bool so we can accept all the likely variants of "on" and "off".
*/
if (pg_strcasecmp(newval, "safe_encoding") == 0)
bq = BACKSLASH_QUOTE_SAFE_ENCODING;
@@ -6367,14 +6368,14 @@ assign_timezone_abbreviations(const char *newval, bool doit, GucSource source)
* The powerup value shown above for timezone_abbreviations is "UNKNOWN".
* When we see this we just do nothing. If this value isn't overridden
* from the config file then pg_timezone_abbrev_initialize() will
- * eventually replace it with "Default". This hack has two purposes:
- * to avoid wasting cycles loading values that might soon be overridden
- * from the config file, and to avoid trying to read the timezone abbrev
- * files during InitializeGUCOptions(). The latter doesn't work in an
- * EXEC_BACKEND subprocess because my_exec_path hasn't been set yet and
- * so we can't locate PGSHAREDIR. (Essentially the same hack is used
- * to delay initializing TimeZone ... if we have any more, we should
- * try to clean up and centralize this mechanism ...)
+ * eventually replace it with "Default". This hack has two purposes: to
+ * avoid wasting cycles loading values that might soon be overridden from
+ * the config file, and to avoid trying to read the timezone abbrev files
+ * during InitializeGUCOptions(). The latter doesn't work in an
+ * EXEC_BACKEND subprocess because my_exec_path hasn't been set yet and so
+ * we can't locate PGSHAREDIR. (Essentially the same hack is used to
+ * delay initializing TimeZone ... if we have any more, we should try to
+ * clean up and centralize this mechanism ...)
*/
if (strcmp(newval, "UNKNOWN") == 0)
{
@@ -6385,11 +6386,11 @@ assign_timezone_abbreviations(const char *newval, bool doit, GucSource source)
if (timezone_abbreviations_string == NULL ||
strcmp(timezone_abbreviations_string, newval) != 0)
{
- int elevel;
+ int elevel;
/*
* If reading config file, only the postmaster should bleat loudly
- * about problems. Otherwise, it's just this one process doing it,
+ * about problems. Otherwise, it's just this one process doing it,
* and we use WARNING message level.
*/
if (source == PGC_S_FILE)
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index f6c4c588b2..38d98634f1 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.32 2006/09/27 18:40:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.33 2006/10/04 00:30:04 momjian Exp $
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
* various details abducted from various places
@@ -31,7 +31,7 @@
#include "utils/ps_status.h"
extern char **environ;
-bool update_process_title = true;
+bool update_process_title = true;
/*
@@ -287,7 +287,7 @@ set_ps_display(const char *activity, bool force)
if (!force && !update_process_title)
return;
-
+
#ifndef PS_USE_NONE
/* no ps display for stand-alone backend */
if (!IsUnderPostmaster)
@@ -336,8 +336,8 @@ set_ps_display(const char *activity, bool force)
#ifdef PS_USE_WIN32
{
/*
- * Win32 does not support showing any changed arguments. To make it
- * at all possible to track which backend is doing what, we create a
+ * Win32 does not support showing any changed arguments. To make it at
+ * all possible to track which backend is doing what, we create a
* named object that can be viewed with for example Process Explorer.
*/
static HANDLE ident_handle = INVALID_HANDLE_VALUE;
@@ -351,7 +351,6 @@ set_ps_display(const char *activity, bool force)
ident_handle = CreateEvent(NULL, TRUE, FALSE, name);
}
#endif /* PS_USE_WIN32 */
-
#endif /* not PS_USE_NONE */
}
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index 1a092ca242..1eff2e401b 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/tzparser.c,v 1.1 2006/07/25 03:51:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/tzparser.c,v 1.2 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,11 +35,11 @@ static int tz_elevel; /* to avoid passing this around a lot */
static bool validateTzEntry(tzEntry *tzentry);
static bool splitTzLine(const char *filename, int lineno,
- char *line, tzEntry *tzentry);
-static int addToArray(tzEntry **base, int *arraysize, int n,
- tzEntry *entry, bool override);
-static int ParseTzFile(const char *filename, int depth,
- tzEntry **base, int *arraysize, int n);
+ char *line, tzEntry *tzentry);
+static int addToArray(tzEntry **base, int *arraysize, int n,
+ tzEntry *entry, bool override);
+static int ParseTzFile(const char *filename, int depth,
+ tzEntry **base, int *arraysize, int n);
/*
@@ -53,7 +53,8 @@ validateTzEntry(tzEntry *tzentry)
unsigned char *p;
/*
- * Check restrictions imposed by datetkntbl storage format (see datetime.c)
+ * Check restrictions imposed by datetkntbl storage format (see
+ * datetime.c)
*/
if (strlen(tzentry->abbrev) > TOKMAXLEN)
{
@@ -77,8 +78,8 @@ validateTzEntry(tzEntry *tzentry)
/*
* Sanity-check the offset: shouldn't exceed 14 hours
*/
- if (tzentry->offset > 14*60*60 ||
- tzentry->offset < -14*60*60)
+ if (tzentry->offset > 14 * 60 * 60 ||
+ tzentry->offset < -14 * 60 * 60)
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -105,11 +106,11 @@ validateTzEntry(tzEntry *tzentry)
static bool
splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry)
{
- char *abbrev;
- char *offset;
- char *offset_endptr;
- char *remain;
- char *is_dst;
+ char *abbrev;
+ char *offset;
+ char *offset_endptr;
+ char *remain;
+ char *is_dst;
tzentry->lineno = lineno;
tzentry->filename = filename;
@@ -130,8 +131,8 @@ splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry)
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("missing time zone offset in time zone file \"%s\", line %d",
- filename, lineno)));
+ errmsg("missing time zone offset in time zone file \"%s\", line %d",
+ filename, lineno)));
return false;
}
tzentry->offset = strtol(offset, &offset_endptr, 10);
@@ -157,7 +158,7 @@ splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry)
remain = is_dst;
}
- if (!remain) /* no more non-whitespace chars */
+ if (!remain) /* no more non-whitespace chars */
return true;
if (remain[0] != '#') /* must be a comment */
@@ -186,23 +187,23 @@ static int
addToArray(tzEntry **base, int *arraysize, int n,
tzEntry *entry, bool override)
{
- tzEntry* arrayptr;
+ tzEntry *arrayptr;
int low;
int high;
/*
- * Search the array for a duplicate; as a useful side effect, the array
- * is maintained in sorted order. We use strcmp() to ensure we match
- * the sort order datetime.c expects.
+ * Search the array for a duplicate; as a useful side effect, the array is
+ * maintained in sorted order. We use strcmp() to ensure we match the
+ * sort order datetime.c expects.
*/
arrayptr = *base;
low = 0;
- high = n-1;
+ high = n - 1;
while (low <= high)
{
- int mid = (low + high) >> 1;
- tzEntry *midptr = arrayptr + mid;
- int cmp;
+ int mid = (low + high) >> 1;
+ tzEntry *midptr = arrayptr + mid;
+ int cmp;
cmp = strcmp(entry->abbrev, midptr->abbrev);
if (cmp < 0)
@@ -214,7 +215,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Found a duplicate entry; complain unless it's the same.
*/
- if (midptr->offset == entry->offset &&
+ if (midptr->offset == entry->offset &&
midptr->is_dst == entry->is_dst)
{
/* return unchanged array */
@@ -230,8 +231,8 @@ addToArray(tzEntry **base, int *arraysize, int n,
/* same abbrev but something is different, complain */
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone abbreviation \"%s\" is multiply defined",
- entry->abbrev),
+ errmsg("time zone abbreviation \"%s\" is multiply defined",
+ entry->abbrev),
errdetail("Time zone file \"%s\", line %d conflicts with file \"%s\", line %d.",
midptr->filename, midptr->lineno,
entry->filename, entry->lineno)));
@@ -257,7 +258,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/* Must dup the abbrev to ensure it survives */
arrayptr->abbrev = pstrdup(entry->abbrev);
- return n+1;
+ return n + 1;
}
/*
@@ -275,15 +276,15 @@ static int
ParseTzFile(const char *filename, int depth,
tzEntry **base, int *arraysize, int n)
{
- char share_path[MAXPGPATH];
- char file_path[MAXPGPATH];
- FILE *tzFile;
- char tzbuf[1024];
- char *line;
- tzEntry tzentry;
- int lineno = 0;
- bool override = false;
- const char *p;
+ char share_path[MAXPGPATH];
+ char file_path[MAXPGPATH];
+ FILE *tzFile;
+ char tzbuf[1024];
+ char *line;
+ tzEntry tzentry;
+ int lineno = 0;
+ bool override = false;
+ const char *p;
/*
* We enforce that the filename is all alpha characters. This may be
@@ -299,23 +300,23 @@ ParseTzFile(const char *filename, int depth,
if (depth > 0)
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid time zone file name \"%s\"",
- filename)));
+ errmsg("invalid time zone file name \"%s\"",
+ filename)));
return -1;
}
}
/*
- * The maximal recursion depth is a pretty arbitrary setting.
- * It is hard to imagine that someone needs more than 3 levels so stick
- * with this conservative setting until someone complains.
+ * The maximal recursion depth is a pretty arbitrary setting. It is hard
+ * to imagine that someone needs more than 3 levels so stick with this
+ * conservative setting until someone complains.
*/
if (depth > 3)
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone file recursion limit exceeded in file \"%s\"",
- filename)));
+ errmsg("time zone file recursion limit exceeded in file \"%s\"",
+ filename)));
return -1;
}
@@ -350,13 +351,13 @@ ParseTzFile(const char *filename, int depth,
/* else we're at EOF after all */
break;
}
- if (strlen(tzbuf) == sizeof(tzbuf)-1)
+ if (strlen(tzbuf) == sizeof(tzbuf) - 1)
{
/* the line is too long for tzbuf */
ereport(tz_elevel,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("line is too long in time zone file \"%s\", line %d",
- filename, lineno)));
+ errmsg("line is too long in time zone file \"%s\", line %d",
+ filename, lineno)));
return -1;
}
@@ -365,15 +366,15 @@ ParseTzFile(const char *filename, int depth,
while (*line && isspace((unsigned char) *line))
line++;
- if (*line == '\0') /* empty line */
+ if (*line == '\0') /* empty line */
continue;
- if (*line == '#') /* comment line */
+ if (*line == '#') /* comment line */
continue;
if (pg_strncasecmp(line, "@INCLUDE", strlen("@INCLUDE")) == 0)
{
/* pstrdup so we can use filename in result data structure */
- char* includeFile = pstrdup(line + strlen("@INCLUDE"));
+ char *includeFile = pstrdup(line + strlen("@INCLUDE"));
includeFile = strtok(includeFile, WHITESPACE);
if (!includeFile || !*includeFile)
@@ -425,15 +426,15 @@ load_tzoffsets(const char *filename, bool doit, int elevel)
{
MemoryContext tmpContext;
MemoryContext oldContext;
- tzEntry *array;
+ tzEntry *array;
int arraysize;
int n;
tz_elevel = elevel;
/*
- * Create a temp memory context to work in. This makes it easy to
- * clean up afterwards.
+ * Create a temp memory context to work in. This makes it easy to clean
+ * up afterwards.
*/
tmpContext = AllocSetContextCreate(CurrentMemoryContext,
"TZParserMemory",
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 5762607b9e..7f525891f6 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.67 2006/06/28 22:05:37 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.68 2006/10/04 00:30:04 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -883,7 +883,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > ALLOC_CHUNK_LIMIT)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 883f075eec..7fabe243ac 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.95 2006/09/27 18:40:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.96 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,7 +69,7 @@ do { \
PortalHashEnt *hentry; bool found; \
\
hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
- (NAME), HASH_ENTER, &found); \
+ (NAME), HASH_ENTER, &found); \
if (found) \
elog(ERROR, "duplicate portal name"); \
hentry->portal = PORTAL; \
@@ -145,11 +145,11 @@ GetPortalByName(const char *name)
* Get the "primary" Query within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such Query. If multiple Query structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Note: the reason this is just handed a List is so that prepared statements
- * can share the code. For use with a portal, use PortalGetPrimaryQuery
+ * can share the code. For use with a portal, use PortalGetPrimaryQuery
* rather than calling this directly.
*/
Query *
@@ -790,22 +790,21 @@ AtSubCleanup_Portals(SubTransactionId mySubid)
Datum
pg_cursor(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
- PortalHashEnt *hentry;
+ FuncCallContext *funcctx;
+ HASH_SEQ_STATUS *hash_seq;
+ PortalHashEnt *hentry;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- MemoryContext oldcontext;
- TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -819,8 +818,8 @@ pg_cursor(PG_FUNCTION_ARGS)
funcctx->user_fctx = NULL;
/*
- * build tupdesc for result tuples. This must match the
- * definition of the pg_cursors view in system_views.sql
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_cursors view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -871,7 +870,7 @@ pg_cursor(PG_FUNCTION_ARGS)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(portal->sourceText));
+ CStringGetDatum(portal->sourceText));
values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
@@ -884,4 +883,3 @@ pg_cursor(PG_FUNCTION_ARGS)
SRF_RETURN_DONE(funcctx);
}
-
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 18dc461778..d978fd135a 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -70,7 +70,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.21 2006/03/07 23:46:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.22 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -171,7 +171,7 @@ struct LogicalTapeSet
* is of length nTapes.
*/
int nTapes; /* # of logical tapes in set */
- LogicalTape tapes[1]; /* must be last in struct! */
+ LogicalTape tapes[1]; /* must be last in struct! */
};
static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
@@ -303,12 +303,12 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
}
/*
- * Add blocknum to array, and mark the array unsorted if it's no longer
- * in decreasing order.
+ * Add blocknum to array, and mark the array unsorted if it's no longer in
+ * decreasing order.
*/
ndx = lts->nFreeBlocks++;
lts->freeBlocks[ndx] = blocknum;
- if (ndx > 0 && lts->freeBlocks[ndx-1] < blocknum)
+ if (ndx > 0 && lts->freeBlocks[ndx - 1] < blocknum)
lts->blocksSorted = false;
}
@@ -522,12 +522,12 @@ LogicalTapeSetCreate(int ntapes)
int i;
/*
- * Create top-level struct including per-tape LogicalTape structs.
- * First LogicalTape struct is already counted in sizeof(LogicalTapeSet).
+ * Create top-level struct including per-tape LogicalTape structs. First
+ * LogicalTape struct is already counted in sizeof(LogicalTapeSet).
*/
Assert(ntapes > 0);
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
- (ntapes - 1) * sizeof(LogicalTape));
+ (ntapes - 1) *sizeof(LogicalTape));
lts->pfile = BufFileCreateTemp(false);
lts->nFileBlocks = 0L;
lts->forgetFreeSpace = false;
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 08e63e0756..652f9a2ff4 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -91,7 +91,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.69 2006/10/03 22:18:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.70 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,28 +119,28 @@ bool trace_sort = false;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -205,7 +205,7 @@ struct Tuplesortstate
* qsort_arg_comparator.
*/
int (*comparetup) (const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
/*
* Function to copy a supplied input tuple into palloc'd space and set up
@@ -223,19 +223,19 @@ struct Tuplesortstate
* state->availMem by the amount of memory space thereby released.
*/
void (*writetup) (Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
/*
* Function to read a stored tuple from tape back into memory. 'len' is
* the already-read length of the stored tuple. Create a palloc'd copy,
- * initialize tuple/datum1/isnull1 in the target SortTuple struct,
- * and decrease state->availMem by the amount of memory space consumed.
+ * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
+ * decrease state->availMem by the amount of memory space consumed.
*/
void (*readtup) (Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -255,8 +255,8 @@ struct Tuplesortstate
int currentRun;
/*
- * Unless otherwise noted, all pointer variables below are pointers
- * to arrays of length maxTapes, holding per-tape data.
+ * Unless otherwise noted, all pointer variables below are pointers to
+ * arrays of length maxTapes, holding per-tape data.
*/
/*
@@ -280,7 +280,7 @@ struct Tuplesortstate
int *mergeavailslots; /* slots left for prereading each tape */
long *mergeavailmem; /* availMem for prereading each tape */
int mergefreelist; /* head of freelist of recycled slots */
- int mergefirstfree; /* first slot never used in this merge */
+ int mergefirstfree; /* first slot never used in this merge */
/*
* Variables for Algorithm D. Note that destTape is a "logical" tape
@@ -314,8 +314,8 @@ struct Tuplesortstate
* tuplesort_begin_heap and used only by the MinimalTuple routines.
*/
TupleDesc tupDesc;
- ScanKey scanKeys; /* array of length nKeys */
- SortFunctionKind *sortFnKinds; /* array of length nKeys */
+ ScanKey scanKeys; /* array of length nKeys */
+ SortFunctionKind *sortFnKinds; /* array of length nKeys */
/*
* These variables are specific to the IndexTuple case; they are set by
@@ -346,7 +346,7 @@ struct Tuplesortstate
};
#define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
-#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
+#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
#define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
#define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
#define LACKMEM(state) ((state)->availMem < 0)
@@ -411,26 +411,26 @@ static void tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex);
static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
static void markrunend(Tuplesortstate *state, int tapenum);
static int comparetup_heap(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_heap(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
static int comparetup_index(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_index(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_index(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
static int comparetup_datum(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_datum(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
/*
@@ -460,8 +460,8 @@ tuplesort_begin_common(int workMem, bool randomAccess)
MemoryContext oldcontext;
/*
- * Create a working memory context for this sort operation.
- * All data needed by the sort will live inside this context.
+ * Create a working memory context for this sort operation. All data
+ * needed by the sort will live inside this context.
*/
sortcontext = AllocSetContextCreate(CurrentMemoryContext,
"TupleSort",
@@ -470,8 +470,8 @@ tuplesort_begin_common(int workMem, bool randomAccess)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the Tuplesortstate within the per-sort context. This way,
- * we don't need a separate pfree() operation for it at shutdown.
+ * Make the Tuplesortstate within the per-sort context. This way, we
+ * don't need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(sortcontext);
@@ -680,8 +680,8 @@ tuplesort_end(Tuplesortstate *state)
/*
* Delete temporary "tape" files, if any.
*
- * Note: want to include this in reported total cost of sort, hence
- * need for two #ifdef TRACE_SORT sections.
+ * Note: want to include this in reported total cost of sort, hence need
+ * for two #ifdef TRACE_SORT sections.
*/
if (state->tapeset)
LogicalTapeSetClose(state->tapeset);
@@ -701,8 +701,8 @@ tuplesort_end(Tuplesortstate *state)
MemoryContextSwitchTo(oldcontext);
/*
- * Free the per-sort memory context, thereby releasing all working
- * memory, including the Tuplesortstate struct itself.
+ * Free the per-sort memory context, thereby releasing all working memory,
+ * including the Tuplesortstate struct itself.
*/
MemoryContextDelete(state->sortcontext);
}
@@ -721,15 +721,16 @@ grow_memtuples(Tuplesortstate *state)
{
/*
* We need to be sure that we do not cause LACKMEM to become true, else
- * the space management algorithm will go nuts. We assume here that
- * the memory chunk overhead associated with the memtuples array is
- * constant and so there will be no unexpected addition to what we ask
- * for. (The minimum array size established in tuplesort_begin_common
- * is large enough to force palloc to treat it as a separate chunk, so
- * this assumption should be good. But let's check it.)
+ * the space management algorithm will go nuts. We assume here that the
+ * memory chunk overhead associated with the memtuples array is constant
+ * and so there will be no unexpected addition to what we ask for. (The
+ * minimum array size established in tuplesort_begin_common is large
+ * enough to force palloc to treat it as a separate chunk, so this
+ * assumption should be good. But let's check it.)
*/
if (state->availMem <= (long) (state->memtupsize * sizeof(SortTuple)))
return false;
+
/*
* On a 64-bit machine, allowedMem could be high enough to get us into
* trouble with MaxAllocSize, too.
@@ -804,8 +805,8 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
SortTuple stup;
/*
- * If it's a pass-by-reference value, copy it into memory we control,
- * and decrease availMem. Then call the common code.
+ * If it's a pass-by-reference value, copy it into memory we control, and
+ * decrease availMem. Then call the common code.
*/
if (isNull || state->datumTypeByVal)
{
@@ -837,10 +838,10 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the
- * array as needed. Note that we try to grow the array when there
- * is still one free slot remaining --- if we fail, there'll still
- * be room to store the incoming tuple, and then we'll switch to
+ * Save the tuple into the unsorted array. First, grow the array
+ * as needed. Note that we try to grow the array when there is
+ * still one free slot remaining --- if we fail, there'll still be
+ * room to store the incoming tuple, and then we'll switch to
* tape-based operation.
*/
if (state->memtupcount >= state->memtupsize - 1)
@@ -869,14 +870,14 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_BUILDRUNS:
/*
- * Insert the tuple into the heap, with run number
- * currentRun if it can go into the current run, else run number
- * currentRun+1. The tuple can go into the current run if it is
- * >= the first not-yet-output tuple. (Actually, it could go into
- * the current run if it is >= the most recently output tuple ...
- * but that would require keeping around the tuple we last output,
- * and it's simplest to let writetup free each tuple as soon as
- * it's written.)
+ * Insert the tuple into the heap, with run number currentRun if
+ * it can go into the current run, else run number currentRun+1.
+ * The tuple can go into the current run if it is >= the first
+ * not-yet-output tuple. (Actually, it could go into the current
+ * run if it is >= the most recently output tuple ... but that
+ * would require keeping around the tuple we last output, and it's
+ * simplest to let writetup free each tuple as soon as it's
+ * written.)
*
* Note there will always be at least one tuple in the heap at
* this point; see dumptuples.
@@ -1262,14 +1263,14 @@ tuplesort_merge_order(long allowedMem)
int mOrder;
/*
- * We need one tape for each merge input, plus another one for the
- * output, and each of these tapes needs buffer space. In addition
- * we want MERGE_BUFFER_SIZE workspace per input tape (but the output
- * tape doesn't count).
+ * We need one tape for each merge input, plus another one for the output,
+ * and each of these tapes needs buffer space. In addition we want
+ * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
+ * count).
*
* Note: you might be thinking we need to account for the memtuples[]
- * array in this calculation, but we effectively treat that as part of
- * the MERGE_BUFFER_SIZE workspace.
+ * array in this calculation, but we effectively treat that as part of the
+ * MERGE_BUFFER_SIZE workspace.
*/
mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
(MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
@@ -1298,8 +1299,8 @@ inittapes(Tuplesortstate *state)
/*
* We must have at least 2*maxTapes slots in the memtuples[] array, else
- * we'd not have room for merge heap plus preread. It seems unlikely
- * that this case would ever occur, but be safe.
+ * we'd not have room for merge heap plus preread. It seems unlikely that
+ * this case would ever occur, but be safe.
*/
maxTapes = Min(maxTapes, state->memtupsize / 2);
@@ -1314,12 +1315,12 @@ inittapes(Tuplesortstate *state)
/*
* Decrease availMem to reflect the space needed for tape buffers; but
- * don't decrease it to the point that we have no room for tuples.
- * (That case is only likely to occur if sorting pass-by-value Datums;
- * in all other scenarios the memtuples[] array is unlikely to occupy
- * more than half of allowedMem. In the pass-by-value case it's not
- * important to account for tuple space, so we don't care if LACKMEM
- * becomes inaccurate.)
+ * don't decrease it to the point that we have no room for tuples. (That
+ * case is only likely to occur if sorting pass-by-value Datums; in all
+ * other scenarios the memtuples[] array is unlikely to occupy more than
+ * half of allowedMem. In the pass-by-value case it's not important to
+ * account for tuple space, so we don't care if LACKMEM becomes
+ * inaccurate.)
*/
tapeSpace = maxTapes * TAPE_BUFFER_OVERHEAD;
if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
@@ -1435,7 +1436,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -1666,7 +1667,7 @@ beginmerge(Tuplesortstate *state)
memset(state->mergelast, 0,
state->maxTapes * sizeof(*state->mergelast));
state->mergefreelist = 0; /* nothing in the freelist */
- state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
+ state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
/*
* Initialize space allocation to let each active input tape have an equal
@@ -1966,7 +1967,7 @@ tuplesort_restorepos(Tuplesortstate *state)
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -1977,7 +1978,7 @@ tuplesort_restorepos(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
@@ -1993,10 +1994,10 @@ tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
int j;
/*
- * Save the tupleindex --- see notes above about writing on *tuple.
- * It's a historical artifact that tupleindex is passed as a separate
- * argument and not in *tuple, but it's notationally convenient so
- * let's leave it that way.
+ * Save the tupleindex --- see notes above about writing on *tuple. It's a
+ * historical artifact that tupleindex is passed as a separate argument
+ * and not in *tuple, but it's notationally convenient so let's leave it
+ * that way.
*/
tuple->tupindex = tupleindex;
@@ -2432,8 +2433,8 @@ comparetup_index(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
{
/*
* This is similar to _bt_tuplecompare(), but we have already done the
- * index_getattr calls for the first column, and we need to keep track
- * of whether any null fields are present. Also see the special treatment
+ * index_getattr calls for the first column, and we need to keep track of
+ * whether any null fields are present. Also see the special treatment
* for equal keys at the end.
*/
ScanKey scanKey = state->indexScanKey;
@@ -2686,7 +2687,7 @@ readtup_datum(Tuplesortstate *state, SortTuple *stup,
}
else
{
- void *raddr = palloc(tuplen);
+ void *raddr = palloc(tuplen);
if (LogicalTapeRead(state->tapeset, tapenum, raddr,
tuplen) != tuplen)
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index a2ed330ccc..dcf68de335 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.28 2006/06/27 02:51:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.29 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -347,6 +347,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
switch (state->status)
{
case TSS_INMEM:
+
/*
* Grow the array as needed. Note that we try to grow the array
* when there is still one free slot remaining --- if we fail,
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index a3b52fa7eb..eedc6222be 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -32,7 +32,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.97 2006/09/15 16:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.98 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1326,7 +1326,7 @@ XidInSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
@@ -1342,7 +1342,7 @@ XidInSnapshot(TransactionId xid, Snapshot snapshot)
/*
* If the snapshot contains full subxact data, the fastest way to check
* things is just to compare the given XID against both subxact XIDs and
- * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
+ * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
* to convert a subxact XID to its parent XID, but then we need only look
* at top-level XIDs not subxacts.
*/
@@ -1365,8 +1365,8 @@ XidInSnapshot(TransactionId xid, Snapshot snapshot)
xid = SubTransGetTopmostTransaction(xid);
/*
- * If xid was indeed a subxact, we might now have an xid < xmin,
- * so recheck to avoid an array scan. No point in rechecking xmax.
+ * If xid was indeed a subxact, we might now have an xid < xmin, so
+ * recheck to avoid an array scan. No point in rechecking xmax.
*/
if (TransactionIdPrecedes(xid, snapshot->xmin))
return false;